3de1f08676e8f53183257e136b279053045a5099
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 n_try=0
12 try=""
13 n_bad=0
14 sum_bad=0
15 bad=""
16 n_notrun=0
17 notrun=""
18 interrupt=true
19 diff="diff -u"
20 showme=false
21 have_test_arg=false
22 randomize=false
23 export here=`pwd`
24 xfile=""
25 brief_test_summary=false
26 do_report=false
27 DUMP_OUTPUT=false
28
29 # This is a global variable used to pass test failure text to reporting gunk
30 _err_msg=""
31
32 # start the initialisation work now
33 iam=check
34
35 export MSGVERB="text:action"
36 export QA_CHECK_FS=${QA_CHECK_FS:=true}
37
38 # number of diff lines from a failed test, 0 for whole output
39 export DIFF_LENGTH=${DIFF_LENGTH:=10}
40
41 # by default don't output timestamps
42 timestamp=${TIMESTAMP:=false}
43
44 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
45
46 SRC_GROUPS="generic shared"
47 export SRC_DIR="tests"
48
49 usage()
50 {
51     echo "Usage: $0 [options] [testlist]"'
52
53 check options
54     -nfs                test NFS
55     -glusterfs                test GlusterFS
56     -cifs               test CIFS
57     -9p                 test 9p
58     -overlay            test overlay
59     -pvfs2          test PVFS2
60     -tmpfs              test TMPFS
61     -ubifs              test ubifs
62     -l                  line mode diff
63     -udiff              show unified diff (default)
64     -n                  show me, do not run tests
65     -T                  output timestamps
66     -r                  randomize test order
67     -d                  dump test output to stdout
68     -b                  brief test summary
69     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
70     --large-fs          optimise scratch device for large filesystems
71     -s section          run only specified section from config file
72     -S section          exclude the specified section from the config file
73
74 testlist options
75     -g group[,group...] include tests from these groups
76     -x group[,group...] exclude tests from these groups
77     -X exclude_file     exclude individual tests
78     -E external_file    exclude individual tests
79     [testlist]          include tests matching names in testlist
80
81 testlist argument is a list of tests in the form of <test dir>/<test name>.
82
83 <test dir> is a directory under tests that contains a group file,
84 with a list of the names of the tests in that directory.
85
86 <test name> may be either a specific test file name (e.g. xfs/001) or
87 a test file name match pattern (e.g. xfs/*).
88
89 group argument is either a name of a tests group to collect from all
90 the test dirs (e.g. quick) or a name of a tests group to collect from
91 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
92 If you want to run all the tests in the test suite, use "-g all" to specify all
93 groups.
94
95 exclude_file argument refers to a name of a file inside each test directory.
96 for every test dir where this file is found, the listed test names are
97 excluded from the list of tests to run from that test dir.
98
99 external_file argument is a path to a single file containing a list of tests
100 to exclude in the form of <test dir>/<test name>.
101
102 examples:
103  check xfs/001
104  check -g quick
105  check -g xfs/quick
106  check -x stress xfs/*
107  check -X .exclude -g auto
108  check -E ~/.xfstests.exclude
109 '
110             exit 0
111 }
112
113 get_sub_group_list()
114 {
115         local d=$1
116         local grp=$2
117
118         test -s "$SRC_DIR/$d/group" || return 1
119
120         local grpl=$(sed -n < $SRC_DIR/$d/group \
121                 -e 's/#.*//' \
122                 -e 's/$/ /' \
123                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
124         echo $grpl
125 }
126
127 get_group_list()
128 {
129         local grp=$1
130         local grpl=""
131         local sub=$(dirname $grp)
132
133         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
134                 # group is given as <subdir>/<group> (e.g. xfs/quick)
135                 grp=$(basename $grp)
136                 get_sub_group_list $sub $grp
137                 return
138         fi
139
140         for d in $SRC_GROUPS $FSTYP; do
141                 if ! test -d "$SRC_DIR/$d" ; then
142                         continue
143                 fi
144                 grpl="$grpl $(get_sub_group_list $d $grp)"
145         done
146         echo $grpl
147 }
148
149 # Find all tests, excluding files that are test metadata such as group files.
150 # It matches test names against $VALID_TEST_NAME defined in common/rc
151 get_all_tests()
152 {
153         touch $tmp.list
154         for d in $SRC_GROUPS $FSTYP; do
155                 if ! test -d "$SRC_DIR/$d" ; then
156                         continue
157                 fi
158                 ls $SRC_DIR/$d/* | \
159                         grep -v "\..*" | \
160                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
161                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
162         done
163 }
164
165 # takes the list of tests to run in $tmp.list, and removes the tests passed to
166 # the function from that list.
167 trim_test_list()
168 {
169         test_list="$*"
170
171         rm -f $tmp.grep
172         numsed=0
173         for t in $test_list
174         do
175             if [ $numsed -gt 100 ]; then
176                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
177                 mv $tmp.tmp $tmp.list
178                 numsed=0
179                 rm -f $tmp.grep
180             fi
181             echo "^$t\$" >>$tmp.grep
182             numsed=`expr $numsed + 1`
183         done
184         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
185         mv $tmp.tmp $tmp.list
186         rm -f $tmp.grep
187 }
188
189
190 _wallclock()
191 {
192     date "+%s"
193 }
194
195 _timestamp()
196 {
197     now=`date "+%T"`
198     echo -n " [$now]"
199 }
200
201 _prepare_test_list()
202 {
203         unset list
204         # Tests specified on the command line
205         if [ -s $tmp.arglist ]; then
206                 cat $tmp.arglist > $tmp.list
207         else
208                 touch $tmp.list
209         fi
210
211         # Specified groups to include
212         # Note that the CLI processing adds a leading space to the first group
213         # parameter, so we have to catch that here checking for "all"
214         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
215                 # no test numbers, do everything
216                 get_all_tests
217         else
218                 for group in $GROUP_LIST; do
219                         list=$(get_group_list $group)
220                         if [ -z "$list" ]; then
221                                 echo "Group \"$group\" is empty or not defined?"
222                                 exit 1
223                         fi
224
225                         for t in $list; do
226                                 grep -s "^$t\$" $tmp.list >/dev/null || \
227                                                         echo "$t" >>$tmp.list
228                         done
229                 done
230         fi
231
232         # Specified groups to exclude
233         for xgroup in $XGROUP_LIST; do
234                 list=$(get_group_list $xgroup)
235                 if [ -z "$list" ]; then
236                         echo "Group \"$xgroup\" is empty or not defined?"
237                         exit 1
238                 fi
239
240                 trim_test_list $list
241         done
242
243         # sort the list of tests into numeric order
244         list=`sort -n $tmp.list | uniq`
245         rm -f $tmp.list
246
247         if $randomize
248         then
249                 list=`echo $list | awk -f randomize.awk`
250         fi
251 }
252
253 # Process command arguments first.
254 while [ $# -gt 0 ]; do
255         case "$1" in
256         -\? | -h | --help) usage ;;
257
258         -nfs)           FSTYP=nfs ;;
259         -glusterfs)     FSTYP=glusterfs ;;
260         -cifs)          FSTYP=cifs ;;
261         -9p)            FSTYP=9p ;;
262         -overlay)       FSTYP=overlay; export OVERLAY=true ;;
263         -pvfs2)         FSTYP=pvfs2 ;;
264         -tmpfs)         FSTYP=tmpfs ;;
265         -ubifs)         FSTYP=ubifs ;;
266
267         -g)     group=$2 ; shift ;
268                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
269                 ;;
270
271         -x)     xgroup=$2 ; shift ;
272                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
273                 ;;
274
275         -X)     xfile=$2; shift ;
276                 for d in $SRC_GROUPS $FSTYP; do
277                         [ -f $SRC_DIR/$d/$xfile ] || continue
278                         for f in `sed "s/#.*$//" $SRC_DIR/$d/$xfile`; do
279                                 echo $d/$f >> $tmp.xlist
280                         done
281                 done
282                 ;;
283         -E)     xfile=$2; shift ;
284                 if [ -f $xfile ]; then
285                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
286                 fi
287                 ;;
288         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
289         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
290         -l)     diff="diff" ;;
291         -udiff) diff="$diff -u" ;;
292
293         -n)     showme=true ;;
294         -r)     randomize=true ;;
295
296         -T)     timestamp=true ;;
297         -d)     DUMP_OUTPUT=true ;;
298         -b)     brief_test_summary=true;;
299         -R)     report_fmt=$2 ; shift ;
300                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
301                 do_report=true
302                 ;;
303         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
304         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
305
306         -*)     usage ;;
307         *)      # not an argument, we've got tests now.
308                 have_test_arg=true ;;
309         esac
310
311         # if we've found a test specification, the break out of the processing
312         # loop before we shift the arguments so that this is the first argument
313         # that we process in the test arg loop below.
314         if $have_test_arg; then
315                 break;
316         fi
317
318         shift
319 done
320
321 # we need common/rc, that also sources common/config. We need to source it
322 # after processing args, overlay needs FSTYP set before sourcing common/config
323 if ! . ./common/rc; then
324         echo "check: failed to source common/rc"
325         exit 1
326 fi
327
328 # Process tests from command line now.
329 if $have_test_arg; then
330         while [ $# -gt 0 ]; do
331                 case "$1" in
332                 -*)     echo "Arguments before tests, please!"
333                         status=1
334                         exit $status
335                         ;;
336                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
337                         list=$(cd $SRC_DIR; echo $1)
338                         for t in $list; do
339                                 test_dir=`dirname $t`
340                                 test_dir=${test_dir#$SRC_DIR/*}
341                                 test_name=`basename $t`
342                                 group_file=$SRC_DIR/$test_dir/group
343
344                                 if egrep -q "^$test_name" $group_file; then
345                                         # in group file ... OK
346                                         echo $SRC_DIR/$test_dir/$test_name \
347                                                 >>$tmp.arglist
348                                 else
349                                         # oops
350                                         echo "$t - unknown test, ignored"
351                                 fi
352                         done
353                         ;;
354                 esac
355
356                 shift
357         done
358 elif [ -z "$GROUP_LIST" ]; then
359         # default group list is the auto group. If any other group or test is
360         # specified, we use that instead.
361         GROUP_LIST="auto"
362 fi
363
364 if [ `id -u` -ne 0 ]
365 then
366     echo "check: QA must be run as root"
367     exit 1
368 fi
369
370 _wipe_counters()
371 {
372         n_try="0"
373         n_bad="0"
374         n_notrun="0"
375         unset try notrun bad
376 }
377
378 _wrapup()
379 {
380         seq="check"
381         check="$RESULT_BASE/check"
382
383         if $showme; then
384                 if $needwrap; then
385                         if $do_report; then
386                                 _make_section_report
387                         fi
388                         needwrap=false
389                 fi
390         elif $needwrap; then
391                 if [ -f $check.time -a -f $tmp.time ]; then
392                         cat $check.time $tmp.time  \
393                                 | $AWK_PROG '
394                                 { t[$1] = $2 }
395                                 END {
396                                         if (NR > 0) {
397                                                 for (i in t) print i " " t[i]
398                                         }
399                                 }' \
400                                 | sort -n >$tmp.out
401                         mv $tmp.out $check.time
402                 fi
403
404                 echo "" >>$check.log
405                 date >>$check.log
406
407                 echo "SECTION       -- $section" >>$tmp.summary
408                 echo "=========================" >>$tmp.summary
409                 if [ ! -z "$n_try" -a $n_try != 0 ]; then
410                         if [ $brief_test_summary == "false" ]; then
411                                 echo "Ran:$try"
412                                 echo "Ran:$try" >>$tmp.summary
413                         fi
414                         echo "Ran:$try" >>$check.log
415                 fi
416
417                 $interrupt && echo "Interrupted!" | tee -a $check.log
418
419                 if [ ! -z "$notrun" ]; then
420                         if [ $brief_test_summary == "false" ]; then
421                                 echo "Not run:$notrun"
422                                 echo "Not run:$notrun" >>$tmp.summary
423                         fi
424                         echo "Not run:$notrun" >>$check.log
425                 fi
426
427                 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
428                         echo "Failures:$bad"
429                         echo "Failed $n_bad of $n_try tests"
430                         echo "Failures:$bad" >>$check.log
431                         echo "Failed $n_bad of $n_try tests" >>$check.log
432                         echo "Failures:$bad" >>$tmp.summary
433                         echo "Failed $n_bad of $n_try tests" >>$tmp.summary
434                 else
435                         echo "Passed all $n_try tests"
436                         echo "Passed all $n_try tests" >>$check.log
437                         echo "Passed all $n_try tests" >>$tmp.summary
438                 fi
439                 echo "" >>$tmp.summary
440                 if $do_report; then
441                         _make_section_report
442                 fi
443                 needwrap=false
444         fi
445
446         sum_bad=`expr $sum_bad + $n_bad`
447         _wipe_counters
448         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
449         if ! $OPTIONS_HAVE_SECTIONS; then
450                 rm -f $tmp.*
451         fi
452 }
453
454 _summary()
455 {
456         _wrapup
457         if $showme; then
458                 :
459         elif $needsum; then
460                 count=`wc -L $tmp.summary | cut -f1 -d" "`
461                 cat $tmp.summary
462                 needsum=false
463         fi
464         rm -f $tmp.*
465 }
466
467 _check_filesystems()
468 {
469         if [ -f ${RESULT_DIR}/require_test ]; then
470                 _check_test_fs || err=true
471                 rm -f ${RESULT_DIR}/require_test*
472         else
473                 _test_unmount 2> /dev/null
474         fi
475         if [ -f ${RESULT_DIR}/require_scratch ]; then
476                 _check_scratch_fs || err=true
477                 rm -f ${RESULT_DIR}/require_scratch*
478         else
479                 _scratch_unmount 2> /dev/null
480         fi
481 }
482
483 _expunge_test()
484 {
485         local TEST_ID="$1"
486         if [ -s $tmp.xlist ]; then
487                 if grep -q $TEST_ID $tmp.xlist; then
488                         echo "       [expunged]"
489                         return 1
490                 fi
491         fi
492         return 0
493 }
494
495 _init_kmemleak
496 _prepare_test_list
497
498 if $OPTIONS_HAVE_SECTIONS; then
499         trap "_summary; exit \$status" 0 1 2 3 15
500 else
501         trap "_wrapup; exit \$status" 0 1 2 3 15
502 fi
503
504 for section in $HOST_OPTIONS_SECTIONS; do
505         OLD_FSTYP=$FSTYP
506         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
507         get_next_config $section
508
509         # Do we need to run only some sections ?
510         if [ ! -z "$RUN_SECTION" ]; then
511                 skip=true
512                 for s in $RUN_SECTION; do
513                         if [ $section == $s ]; then
514                                 skip=false
515                                 break;
516                         fi
517                 done
518                 if $skip; then
519                         continue
520                 fi
521         fi
522
523         # Did this section get excluded?
524         if [ ! -z "$EXCLUDE_SECTION" ]; then
525                 skip=false
526                 for s in $EXCLUDE_SECTION; do
527                         if [ $section == $s ]; then
528                                 skip=true
529                                 break;
530                         fi
531                 done
532                 if $skip; then
533                         continue
534                 fi
535         fi
536
537         if $OPTIONS_HAVE_SECTIONS; then
538                 echo "SECTION       -- $section"
539         fi
540
541         sect_start=`_wallclock`
542         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
543                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
544                 _test_unmount 2> /dev/null
545                 if ! _test_mkfs >$tmp.err 2>&1
546                 then
547                         echo "our local _test_mkfs routine ..."
548                         cat $tmp.err
549                         echo "check: failed to mkfs \$TEST_DEV using specified options"
550                         status=1
551                         exit
552                 fi
553                 if ! _test_mount
554                 then
555                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
556                         status=1
557                         exit
558                 fi
559                 _prepare_test_list
560         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
561                 _test_unmount 2> /dev/null
562                 if ! _test_mount
563                 then
564                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
565                         status=1
566                         exit
567                 fi
568         fi
569
570         init_rc
571
572         seq="check"
573         check="$RESULT_BASE/check"
574
575         # don't leave old full output behind on a clean run
576         rm -f $check.full
577
578         [ -f $check.time ] || touch $check.time
579
580         # print out our test configuration
581         echo "FSTYP         -- `_full_fstyp_details`"
582         echo "PLATFORM      -- `_full_platform_details`"
583         if [ ! -z "$SCRATCH_DEV" ]; then
584           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
585           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
586         fi
587         echo
588         needwrap=true
589
590         if [ ! -z "$SCRATCH_DEV" ]; then
591           _scratch_unmount 2> /dev/null
592           # call the overridden mkfs - make sure the FS is built
593           # the same as we'll create it later.
594
595           if ! _scratch_mkfs >$tmp.err 2>&1
596           then
597               echo "our local _scratch_mkfs routine ..."
598               cat $tmp.err
599               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
600               status=1
601               exit
602           fi
603
604           # call the overridden mount - make sure the FS mounts with
605           # the same options that we'll mount with later.
606           if ! _scratch_mount >$tmp.err 2>&1
607           then
608               echo "our local mount routine ..."
609               cat $tmp.err
610               echo "check: failed to mount \$SCRATCH_DEV using specified options"
611               status=1
612               exit
613           fi
614         fi
615
616         seqres="$check"
617         _check_test_fs
618
619         err=false
620         first_test=true
621         prev_seq=""
622         for seq in $list ; do
623                 # Run report for previous test!
624                 if $err ; then
625                         bad="$bad $seqnum"
626                         n_bad=`expr $n_bad + 1`
627                         tc_status="fail"
628                 fi
629                 if $do_report && ! $first_test ; then
630                         if [ $tc_status != "expunge" ] ; then
631                                 _make_testcase_report "$prev_seq" "$tc_status"
632                         fi
633                 fi
634                 first_test=false
635
636                 err=false
637                 prev_seq="$seq"
638                 if [ ! -f $seq ]; then
639                         # Try to get full name in case the user supplied only
640                         # seq id and the test has a name. A bit of hassle to
641                         # find really the test and not its sample output or
642                         # helping files.
643                         bname=$(basename $seq)
644                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
645                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
646                                      END { print shortest }')
647                         if [ -f $full_seq ] && \
648                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
649                                 seq=$full_seq
650                         fi
651                 fi
652
653                 # the filename for the test and the name output are different.
654                 # we don't include the tests/ directory in the name output.
655                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
656
657                 # Similarly, the result directory needs to replace the tests/
658                 # part of the test location.
659                 group=`dirname $seq`
660                 if $OPTIONS_HAVE_SECTIONS; then
661                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
662                         REPORT_DIR="$RESULT_BASE/$section"
663                 else
664                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
665                         REPORT_DIR="$RESULT_BASE"
666                 fi
667                 seqres="$REPORT_DIR/$seqnum"
668
669                 mkdir -p $RESULT_DIR
670                 echo -n "$seqnum"
671
672                 if $showme; then
673                         _expunge_test $seqnum
674                         if [ $? -eq 1 ]; then
675                             tc_status="expunge"
676                             continue
677                         fi
678                         echo
679                         start=0
680                         stop=0
681                         tc_status="list"
682                         n_notrun=`expr $n_notrun + 1`
683                         continue
684                 fi
685
686                 tc_status="pass"
687                 if [ ! -f $seq ]; then
688                         echo " - no such test?"
689                         continue
690                 fi
691
692                 # really going to try and run this one
693                 rm -f $seqres.out.bad
694
695                 # check if we really should run it
696                 _expunge_test $seqnum
697                 if [ $? -eq 1 ]; then
698                         tc_status="expunge"
699                         continue
700                 fi
701
702                 # record that we really tried to run this test.
703                 try="$try $seqnum"
704                 n_try=`expr $n_try + 1`
705
706                 # slashes now in names, sed barfs on them so use grep
707                 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
708                 if [ "X$lasttime" != X ]; then
709                         echo -n " ${lasttime}s ... "
710                 else
711                         echo -n "       " # prettier output with timestamps.
712                 fi
713                 rm -f core $seqres.notrun
714
715                 start=`_wallclock`
716                 $timestamp && echo -n " ["`date "+%T"`"]"
717                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
718                 $LOGGER_PROG "run xfstest $seqnum"
719                 if [ -w /dev/kmsg ]; then
720                         export date_time=`date +"%F %T"`
721                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
722                         # _check_dmesg depends on this log in dmesg
723                         touch ${RESULT_DIR}/check_dmesg
724                 fi
725                 if [ "$DUMP_OUTPUT" = true ]; then
726                         ./$seq 2>&1 | tee $tmp.out
727                         # Because $? would get tee's return code
728                         sts=${PIPESTATUS[0]}
729                 else
730                         ./$seq >$tmp.out 2>&1
731                         sts=$?
732                 fi
733
734                 if [ -f core ]; then
735                         _dump_err_cont "[dumped core]"
736                         mv core $RESULT_BASE/$seqnum.core
737                         err=true
738                 fi
739
740                 if [ -f $seqres.notrun ]; then
741                         $timestamp && _timestamp
742                         stop=`_wallclock`
743                         $timestamp || echo -n "[not run] "
744                         $timestamp && echo " [not run]" && \
745                                       echo -n " $seqnum -- "
746                         cat $seqres.notrun
747                         notrun="$notrun $seqnum"
748                         n_notrun=`expr $n_notrun + 1`
749                         tc_status="notrun"
750                         continue;
751                 fi
752
753                 if [ $sts -ne 0 ]; then
754                         _dump_err_cont "[failed, exit status $sts]"
755                         _test_unmount 2> /dev/null
756                         _scratch_unmount 2> /dev/null
757                         err=true
758                 else
759                         # the test apparently passed, so check for corruption
760                         # and log messages that shouldn't be there.
761                         _check_filesystems
762                         _check_dmesg || err=true
763                         _check_kmemleak || err=true
764                 fi
765
766                 # test ends after all checks are done.
767                 $timestamp && _timestamp
768                 stop=`_wallclock`
769
770                 if [ ! -f $seq.out ]; then
771                         _dump_err "no qualified output"
772                         err=true
773                         continue;
774                 fi
775
776                 # coreutils 8.16+ changed quote formats in error messages
777                 # from `foo' to 'foo'. Filter old versions to match the new
778                 # version.
779                 sed -i "s/\`/\'/g" $tmp.out
780                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
781                         if ! $err ; then
782                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
783                                 echo -n " `expr $stop - $start`s"
784                         fi
785                         echo ""
786                 else
787                         _dump_err "- output mismatch (see $seqres.out.bad)"
788                         mv $tmp.out $seqres.out.bad
789                         $diff $seq.out $seqres.out.bad | {
790                         if test "$DIFF_LENGTH" -le 0; then
791                                 cat
792                         else
793                                 head -n "$DIFF_LENGTH"
794                                 echo "..."
795                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
796                                         " to see the entire diff)"
797                         fi; } | sed -e 's/^\(.\)/    \1/'
798                         err=true
799                 fi
800         done
801
802         # make sure we record the status of the last test we ran.
803         if $err ; then
804                 bad="$bad $seqnum"
805                 n_bad=`expr $n_bad + 1`
806                 tc_status="fail"
807         fi
808         if $do_report && ! $first_test ; then
809                 if [ $tc_status != "expunge" ] ; then
810                         _make_testcase_report "$prev_seq" "$tc_status"
811                 fi
812         fi
813
814         sect_stop=`_wallclock`
815         interrupt=false
816         _wrapup
817         interrupt=true
818         echo
819
820         _test_unmount 2> /dev/null
821         _scratch_unmount 2> /dev/null
822 done
823
824 interrupt=false
825 status=`expr $sum_bad != 0`
826 exit