fsstress: add the ability to create snapshots
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 n_try=0
12 try=""
13 n_bad=0
14 sum_bad=0
15 bad=""
16 n_notrun=0
17 notrun=""
18 interrupt=true
19 diff="diff -u"
20 showme=false
21 have_test_arg=false
22 randomize=false
23 export here=`pwd`
24 xfile=""
25 subdir_xfile=""
26 brief_test_summary=false
27 do_report=false
28 DUMP_OUTPUT=false
29
30 # This is a global variable used to pass test failure text to reporting gunk
31 _err_msg=""
32
33 # start the initialisation work now
34 iam=check
35
36 export MSGVERB="text:action"
37 export QA_CHECK_FS=${QA_CHECK_FS:=true}
38
39 # number of diff lines from a failed test, 0 for whole output
40 export DIFF_LENGTH=${DIFF_LENGTH:=10}
41
42 # by default don't output timestamps
43 timestamp=${TIMESTAMP:=false}
44
45 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
46
47 SRC_GROUPS="generic shared"
48 export SRC_DIR="tests"
49
50 usage()
51 {
52     echo "Usage: $0 [options] [testlist]"'
53
54 check options
55     -nfs                test NFS
56     -glusterfs          test GlusterFS
57     -cifs               test CIFS
58     -9p                 test 9p
59     -virtiofs           test virtiofs
60     -overlay            test overlay
61     -pvfs2              test PVFS2
62     -tmpfs              test TMPFS
63     -ubifs              test ubifs
64     -l                  line mode diff
65     -udiff              show unified diff (default)
66     -n                  show me, do not run tests
67     -T                  output timestamps
68     -r                  randomize test order
69     -d                  dump test output to stdout
70     -b                  brief test summary
71     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
72     --large-fs          optimise scratch device for large filesystems
73     -s section          run only specified section from config file
74     -S section          exclude the specified section from the config file
75
76 testlist options
77     -g group[,group...] include tests from these groups
78     -x group[,group...] exclude tests from these groups
79     -X exclude_file     exclude individual tests
80     -E external_file    exclude individual tests
81     [testlist]          include tests matching names in testlist
82
83 testlist argument is a list of tests in the form of <test dir>/<test name>.
84
85 <test dir> is a directory under tests that contains a group file,
86 with a list of the names of the tests in that directory.
87
88 <test name> may be either a specific test file name (e.g. xfs/001) or
89 a test file name match pattern (e.g. xfs/*).
90
91 group argument is either a name of a tests group to collect from all
92 the test dirs (e.g. quick) or a name of a tests group to collect from
93 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
94 If you want to run all the tests in the test suite, use "-g all" to specify all
95 groups.
96
97 exclude_file argument refers to a name of a file inside each test directory.
98 for every test dir where this file is found, the listed test names are
99 excluded from the list of tests to run from that test dir.
100
101 external_file argument is a path to a single file containing a list of tests
102 to exclude in the form of <test dir>/<test name>.
103
104 examples:
105  check xfs/001
106  check -g quick
107  check -g xfs/quick
108  check -x stress xfs/*
109  check -X .exclude -g auto
110  check -E ~/.xfstests.exclude
111 '
112             exit 0
113 }
114
115 get_sub_group_list()
116 {
117         local d=$1
118         local grp=$2
119
120         test -s "$SRC_DIR/$d/group" || return 1
121
122         local grpl=$(sed -n < $SRC_DIR/$d/group \
123                 -e 's/#.*//' \
124                 -e 's/$/ /' \
125                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
126         echo $grpl
127 }
128
129 get_group_list()
130 {
131         local grp=$1
132         local grpl=""
133         local sub=$(dirname $grp)
134         local fsgroup="$FSTYP"
135
136         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
137                 # group is given as <subdir>/<group> (e.g. xfs/quick)
138                 grp=$(basename $grp)
139                 get_sub_group_list $sub $grp
140                 return
141         fi
142
143         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
144             fsgroup=ext4
145         fi
146         for d in $SRC_GROUPS $fsgroup; do
147                 if ! test -d "$SRC_DIR/$d" ; then
148                         continue
149                 fi
150                 grpl="$grpl $(get_sub_group_list $d $grp)"
151         done
152         echo $grpl
153 }
154
155 # Find all tests, excluding files that are test metadata such as group files.
156 # It matches test names against $VALID_TEST_NAME defined in common/rc
157 get_all_tests()
158 {
159         touch $tmp.list
160         for d in $SRC_GROUPS $FSTYP; do
161                 if ! test -d "$SRC_DIR/$d" ; then
162                         continue
163                 fi
164                 ls $SRC_DIR/$d/* | \
165                         grep -v "\..*" | \
166                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
167                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
168         done
169 }
170
171 # takes the list of tests to run in $tmp.list, and removes the tests passed to
172 # the function from that list.
173 trim_test_list()
174 {
175         test_list="$*"
176
177         rm -f $tmp.grep
178         numsed=0
179         for t in $test_list
180         do
181             if [ $numsed -gt 100 ]; then
182                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
183                 mv $tmp.tmp $tmp.list
184                 numsed=0
185                 rm -f $tmp.grep
186             fi
187             echo "^$t\$" >>$tmp.grep
188             numsed=`expr $numsed + 1`
189         done
190         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
191         mv $tmp.tmp $tmp.list
192         rm -f $tmp.grep
193 }
194
195
196 _wallclock()
197 {
198     date "+%s"
199 }
200
201 _timestamp()
202 {
203     now=`date "+%T"`
204     echo -n " [$now]"
205 }
206
207 _prepare_test_list()
208 {
209         unset list
210         # Tests specified on the command line
211         if [ -s $tmp.arglist ]; then
212                 cat $tmp.arglist > $tmp.list
213         else
214                 touch $tmp.list
215         fi
216
217         # Specified groups to include
218         # Note that the CLI processing adds a leading space to the first group
219         # parameter, so we have to catch that here checking for "all"
220         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
221                 # no test numbers, do everything
222                 get_all_tests
223         else
224                 for group in $GROUP_LIST; do
225                         list=$(get_group_list $group)
226                         if [ -z "$list" ]; then
227                                 echo "Group \"$group\" is empty or not defined?"
228                                 exit 1
229                         fi
230
231                         for t in $list; do
232                                 grep -s "^$t\$" $tmp.list >/dev/null || \
233                                                         echo "$t" >>$tmp.list
234                         done
235                 done
236         fi
237
238         # Specified groups to exclude
239         for xgroup in $XGROUP_LIST; do
240                 list=$(get_group_list $xgroup)
241                 if [ -z "$list" ]; then
242                         echo "Group \"$xgroup\" is empty or not defined?"
243                         exit 1
244                 fi
245
246                 trim_test_list $list
247         done
248
249         # sort the list of tests into numeric order
250         if $randomize; then
251                 if type shuf >& /dev/null; then
252                         sorter="shuf"
253                 else
254                         sorter="awk -v seed=$RANDOM -f randomize.awk"
255                 fi
256         else
257                 sorter="cat"
258         fi
259         list=`sort -n $tmp.list | uniq | $sorter`
260         rm -f $tmp.list
261 }
262
263 # Process command arguments first.
264 while [ $# -gt 0 ]; do
265         case "$1" in
266         -\? | -h | --help) usage ;;
267
268         -nfs)           FSTYP=nfs ;;
269         -glusterfs)     FSTYP=glusterfs ;;
270         -cifs)          FSTYP=cifs ;;
271         -9p)            FSTYP=9p ;;
272         -virtiofs)      FSTYP=virtiofs ;;
273         -overlay)       FSTYP=overlay; export OVERLAY=true ;;
274         -pvfs2)         FSTYP=pvfs2 ;;
275         -tmpfs)         FSTYP=tmpfs ;;
276         -ubifs)         FSTYP=ubifs ;;
277
278         -g)     group=$2 ; shift ;
279                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
280                 ;;
281
282         -x)     xgroup=$2 ; shift ;
283                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
284                 ;;
285
286         -X)     subdir_xfile=$2; shift ;
287                 ;;
288         -E)     xfile=$2; shift ;
289                 if [ -f $xfile ]; then
290                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
291                 fi
292                 ;;
293         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
294         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
295         -l)     diff="diff" ;;
296         -udiff) diff="$diff -u" ;;
297
298         -n)     showme=true ;;
299         -r)     randomize=true ;;
300
301         -T)     timestamp=true ;;
302         -d)     DUMP_OUTPUT=true ;;
303         -b)     brief_test_summary=true;;
304         -R)     report_fmt=$2 ; shift ;
305                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
306                 do_report=true
307                 ;;
308         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
309         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
310
311         -*)     usage ;;
312         *)      # not an argument, we've got tests now.
313                 have_test_arg=true ;;
314         esac
315
316         # if we've found a test specification, the break out of the processing
317         # loop before we shift the arguments so that this is the first argument
318         # that we process in the test arg loop below.
319         if $have_test_arg; then
320                 break;
321         fi
322
323         shift
324 done
325
326 # we need common/rc, that also sources common/config. We need to source it
327 # after processing args, overlay needs FSTYP set before sourcing common/config
328 if ! . ./common/rc; then
329         echo "check: failed to source common/rc"
330         exit 1
331 fi
332
333 if [ -n "$subdir_xfile" ]; then
334         for d in $SRC_GROUPS $FSTYP; do
335                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
336                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
337                         echo $d/$f >> $tmp.xlist
338                 done
339         done
340 fi
341
342 # Process tests from command line now.
343 if $have_test_arg; then
344         while [ $# -gt 0 ]; do
345                 case "$1" in
346                 -*)     echo "Arguments before tests, please!"
347                         status=1
348                         exit $status
349                         ;;
350                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
351                         list=$(cd $SRC_DIR; echo $1)
352                         for t in $list; do
353                                 test_dir=`dirname $t`
354                                 test_dir=${test_dir#$SRC_DIR/*}
355                                 test_name=`basename $t`
356                                 group_file=$SRC_DIR/$test_dir/group
357
358                                 if egrep -q "^$test_name" $group_file; then
359                                         # in group file ... OK
360                                         echo $SRC_DIR/$test_dir/$test_name \
361                                                 >>$tmp.arglist
362                                 else
363                                         # oops
364                                         echo "$t - unknown test, ignored"
365                                 fi
366                         done
367                         ;;
368                 esac
369
370                 shift
371         done
372 elif [ -z "$GROUP_LIST" ]; then
373         # default group list is the auto group. If any other group or test is
374         # specified, we use that instead.
375         GROUP_LIST="auto"
376 fi
377
378 if [ `id -u` -ne 0 ]
379 then
380     echo "check: QA must be run as root"
381     exit 1
382 fi
383
384 _wipe_counters()
385 {
386         n_try="0"
387         n_bad="0"
388         n_notrun="0"
389         unset try notrun bad
390 }
391
392 _wrapup()
393 {
394         seq="check"
395         check="$RESULT_BASE/check"
396
397         if $showme; then
398                 if $needwrap; then
399                         if $do_report; then
400                                 _make_section_report
401                         fi
402                         needwrap=false
403                 fi
404         elif $needwrap; then
405                 if [ -f $check.time -a -f $tmp.time ]; then
406                         cat $check.time $tmp.time  \
407                                 | $AWK_PROG '
408                                 { t[$1] = $2 }
409                                 END {
410                                         if (NR > 0) {
411                                                 for (i in t) print i " " t[i]
412                                         }
413                                 }' \
414                                 | sort -n >$tmp.out
415                         mv $tmp.out $check.time
416                 fi
417
418                 echo "" >>$check.log
419                 date >>$check.log
420
421                 echo "SECTION       -- $section" >>$tmp.summary
422                 echo "=========================" >>$tmp.summary
423                 if [ ! -z "$n_try" -a $n_try != 0 ]; then
424                         if [ $brief_test_summary == "false" ]; then
425                                 echo "Ran:$try"
426                                 echo "Ran:$try" >>$tmp.summary
427                         fi
428                         echo "Ran:$try" >>$check.log
429                 fi
430
431                 $interrupt && echo "Interrupted!" | tee -a $check.log
432
433                 if [ ! -z "$notrun" ]; then
434                         if [ $brief_test_summary == "false" ]; then
435                                 echo "Not run:$notrun"
436                                 echo "Not run:$notrun" >>$tmp.summary
437                         fi
438                         echo "Not run:$notrun" >>$check.log
439                 fi
440
441                 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
442                         echo "Failures:$bad"
443                         echo "Failed $n_bad of $n_try tests"
444                         echo "Failures:$bad" >>$check.log
445                         echo "Failed $n_bad of $n_try tests" >>$check.log
446                         echo "Failures:$bad" >>$tmp.summary
447                         echo "Failed $n_bad of $n_try tests" >>$tmp.summary
448                 else
449                         echo "Passed all $n_try tests"
450                         echo "Passed all $n_try tests" >>$check.log
451                         echo "Passed all $n_try tests" >>$tmp.summary
452                 fi
453                 echo "" >>$tmp.summary
454                 if $do_report; then
455                         _make_section_report
456                 fi
457                 needwrap=false
458         fi
459
460         sum_bad=`expr $sum_bad + $n_bad`
461         _wipe_counters
462         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
463         if ! $OPTIONS_HAVE_SECTIONS; then
464                 rm -f $tmp.*
465         fi
466 }
467
468 _summary()
469 {
470         _wrapup
471         if $showme; then
472                 :
473         elif $needsum; then
474                 count=`wc -L $tmp.summary | cut -f1 -d" "`
475                 cat $tmp.summary
476                 needsum=false
477         fi
478         rm -f $tmp.*
479 }
480
481 _check_filesystems()
482 {
483         if [ -f ${RESULT_DIR}/require_test ]; then
484                 _check_test_fs || err=true
485                 rm -f ${RESULT_DIR}/require_test*
486         else
487                 _test_unmount 2> /dev/null
488         fi
489         if [ -f ${RESULT_DIR}/require_scratch ]; then
490                 _check_scratch_fs || err=true
491                 rm -f ${RESULT_DIR}/require_scratch*
492         fi
493         _scratch_unmount 2> /dev/null
494 }
495
496 _expunge_test()
497 {
498         local TEST_ID="$1"
499         if [ -s $tmp.xlist ]; then
500                 if grep -q $TEST_ID $tmp.xlist; then
501                         echo "       [expunged]"
502                         return 1
503                 fi
504         fi
505         return 0
506 }
507
508 # Make the check script unattractive to the OOM killer...
509 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
510 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
511
512 # ...and make the tests themselves somewhat more attractive to it, so that if
513 # the system runs out of memory it'll be the test that gets killed and not the
514 # test framework.
515 _run_seq() {
516         bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
517 }
518
519 _detect_kmemleak
520 _prepare_test_list
521
522 if $OPTIONS_HAVE_SECTIONS; then
523         trap "_summary; exit \$status" 0 1 2 3 15
524 else
525         trap "_wrapup; exit \$status" 0 1 2 3 15
526 fi
527
528 for section in $HOST_OPTIONS_SECTIONS; do
529         OLD_FSTYP=$FSTYP
530         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
531         get_next_config $section
532
533         # Do we need to run only some sections ?
534         if [ ! -z "$RUN_SECTION" ]; then
535                 skip=true
536                 for s in $RUN_SECTION; do
537                         if [ $section == $s ]; then
538                                 skip=false
539                                 break;
540                         fi
541                 done
542                 if $skip; then
543                         continue
544                 fi
545         fi
546
547         # Did this section get excluded?
548         if [ ! -z "$EXCLUDE_SECTION" ]; then
549                 skip=false
550                 for s in $EXCLUDE_SECTION; do
551                         if [ $section == $s ]; then
552                                 skip=true
553                                 break;
554                         fi
555                 done
556                 if $skip; then
557                         continue
558                 fi
559         fi
560
561         mkdir -p $RESULT_BASE
562         if [ ! -d $RESULT_BASE ]; then
563                 echo "failed to create results directory $RESULT_BASE"
564                 status=1
565                 exit
566         fi
567
568         if $OPTIONS_HAVE_SECTIONS; then
569                 echo "SECTION       -- $section"
570         fi
571
572         sect_start=`_wallclock`
573         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
574                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
575                 _test_unmount 2> /dev/null
576                 if ! _test_mkfs >$tmp.err 2>&1
577                 then
578                         echo "our local _test_mkfs routine ..."
579                         cat $tmp.err
580                         echo "check: failed to mkfs \$TEST_DEV using specified options"
581                         status=1
582                         exit
583                 fi
584                 if ! _test_mount
585                 then
586                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
587                         status=1
588                         exit
589                 fi
590                 _prepare_test_list
591         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
592                 _test_unmount 2> /dev/null
593                 if ! _test_mount
594                 then
595                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
596                         status=1
597                         exit
598                 fi
599         fi
600
601         init_rc
602
603         seq="check"
604         check="$RESULT_BASE/check"
605
606         # don't leave old full output behind on a clean run
607         rm -f $check.full
608
609         [ -f $check.time ] || touch $check.time
610
611         # print out our test configuration
612         echo "FSTYP         -- `_full_fstyp_details`"
613         echo "PLATFORM      -- `_full_platform_details`"
614         if [ ! -z "$SCRATCH_DEV" ]; then
615           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
616           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
617         fi
618         echo
619         needwrap=true
620
621         if [ ! -z "$SCRATCH_DEV" ]; then
622           _scratch_unmount 2> /dev/null
623           # call the overridden mkfs - make sure the FS is built
624           # the same as we'll create it later.
625
626           if ! _scratch_mkfs >$tmp.err 2>&1
627           then
628               echo "our local _scratch_mkfs routine ..."
629               cat $tmp.err
630               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
631               status=1
632               exit
633           fi
634
635           # call the overridden mount - make sure the FS mounts with
636           # the same options that we'll mount with later.
637           if ! _try_scratch_mount >$tmp.err 2>&1
638           then
639               echo "our local mount routine ..."
640               cat $tmp.err
641               echo "check: failed to mount \$SCRATCH_DEV using specified options"
642               status=1
643               exit
644           else
645               _scratch_unmount
646           fi
647         fi
648
649         seqres="$check"
650         _check_test_fs
651
652         err=false
653         first_test=true
654         prev_seq=""
655         for seq in $list ; do
656                 # Run report for previous test!
657                 if $err ; then
658                         bad="$bad $seqnum"
659                         n_bad=`expr $n_bad + 1`
660                         tc_status="fail"
661                 fi
662                 if $do_report && ! $first_test ; then
663                         if [ $tc_status != "expunge" ] ; then
664                                 _make_testcase_report "$prev_seq" "$tc_status"
665                         fi
666                 fi
667                 first_test=false
668
669                 err=false
670                 prev_seq="$seq"
671                 if [ ! -f $seq ]; then
672                         # Try to get full name in case the user supplied only
673                         # seq id and the test has a name. A bit of hassle to
674                         # find really the test and not its sample output or
675                         # helping files.
676                         bname=$(basename $seq)
677                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
678                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
679                                      END { print shortest }')
680                         if [ -f $full_seq ] && \
681                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
682                                 seq=$full_seq
683                         fi
684                 fi
685
686                 # the filename for the test and the name output are different.
687                 # we don't include the tests/ directory in the name output.
688                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
689
690                 # Similarly, the result directory needs to replace the tests/
691                 # part of the test location.
692                 group=`dirname $seq`
693                 if $OPTIONS_HAVE_SECTIONS; then
694                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
695                         REPORT_DIR="$RESULT_BASE/$section"
696                 else
697                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
698                         REPORT_DIR="$RESULT_BASE"
699                 fi
700                 seqres="$REPORT_DIR/$seqnum"
701
702                 mkdir -p $RESULT_DIR
703                 echo -n "$seqnum"
704
705                 if $showme; then
706                         _expunge_test $seqnum
707                         if [ $? -eq 1 ]; then
708                             tc_status="expunge"
709                             continue
710                         fi
711                         echo
712                         start=0
713                         stop=0
714                         tc_status="list"
715                         n_notrun=`expr $n_notrun + 1`
716                         continue
717                 fi
718
719                 tc_status="pass"
720                 if [ ! -f $seq ]; then
721                         echo " - no such test?"
722                         continue
723                 fi
724
725                 # really going to try and run this one
726                 rm -f $seqres.out.bad
727
728                 # check if we really should run it
729                 _expunge_test $seqnum
730                 if [ $? -eq 1 ]; then
731                         tc_status="expunge"
732                         continue
733                 fi
734
735                 # record that we really tried to run this test.
736                 try="$try $seqnum"
737                 n_try=`expr $n_try + 1`
738
739                 # slashes now in names, sed barfs on them so use grep
740                 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
741                 if [ "X$lasttime" != X ]; then
742                         echo -n " ${lasttime}s ... "
743                 else
744                         echo -n "       " # prettier output with timestamps.
745                 fi
746                 rm -f core $seqres.notrun
747
748                 start=`_wallclock`
749                 $timestamp && echo -n " ["`date "+%T"`"]"
750                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
751                 $LOGGER_PROG "run xfstest $seqnum"
752                 if [ -w /dev/kmsg ]; then
753                         export date_time=`date +"%F %T"`
754                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
755                         # _check_dmesg depends on this log in dmesg
756                         touch ${RESULT_DIR}/check_dmesg
757                 fi
758                 _try_wipe_scratch_devs > /dev/null 2>&1
759                 if [ "$DUMP_OUTPUT" = true ]; then
760                         _run_seq 2>&1 | tee $tmp.out
761                         # Because $? would get tee's return code
762                         sts=${PIPESTATUS[0]}
763                 else
764                         _run_seq >$tmp.out 2>&1
765                         sts=$?
766                 fi
767
768                 if [ -f core ]; then
769                         _dump_err_cont "[dumped core]"
770                         mv core $RESULT_BASE/$seqnum.core
771                         err=true
772                 fi
773
774                 if [ -f $seqres.notrun ]; then
775                         $timestamp && _timestamp
776                         stop=`_wallclock`
777                         $timestamp || echo -n "[not run] "
778                         $timestamp && echo " [not run]" && \
779                                       echo -n " $seqnum -- "
780                         cat $seqres.notrun
781                         notrun="$notrun $seqnum"
782                         n_notrun=`expr $n_notrun + 1`
783                         tc_status="notrun"
784                         continue;
785                 fi
786
787                 if [ $sts -ne 0 ]; then
788                         _dump_err_cont "[failed, exit status $sts]"
789                         _test_unmount 2> /dev/null
790                         _scratch_unmount 2> /dev/null
791                         rm -f ${RESULT_DIR}/require_test*
792                         rm -f ${RESULT_DIR}/require_scratch*
793                         err=true
794                 else
795                         # the test apparently passed, so check for corruption
796                         # and log messages that shouldn't be there.
797                         _check_filesystems
798                         _check_dmesg || err=true
799                 fi
800
801                 # Scan for memory leaks after every test so that associating
802                 # a leak to a particular test will be as accurate as possible.
803                 _check_kmemleak || err=true
804
805                 # test ends after all checks are done.
806                 $timestamp && _timestamp
807                 stop=`_wallclock`
808
809                 if [ ! -f $seq.out ]; then
810                         _dump_err "no qualified output"
811                         err=true
812                         continue;
813                 fi
814
815                 # coreutils 8.16+ changed quote formats in error messages
816                 # from `foo' to 'foo'. Filter old versions to match the new
817                 # version.
818                 sed -i "s/\`/\'/g" $tmp.out
819                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
820                         if ! $err ; then
821                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
822                                 echo -n " `expr $stop - $start`s"
823                         fi
824                         echo ""
825                 else
826                         _dump_err "- output mismatch (see $seqres.out.bad)"
827                         mv $tmp.out $seqres.out.bad
828                         $diff $seq.out $seqres.out.bad | {
829                         if test "$DIFF_LENGTH" -le 0; then
830                                 cat
831                         else
832                                 head -n "$DIFF_LENGTH"
833                                 echo "..."
834                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
835                                         " to see the entire diff)"
836                         fi; } | sed -e 's/^\(.\)/    \1/'
837                         err=true
838                 fi
839         done
840
841         # make sure we record the status of the last test we ran.
842         if $err ; then
843                 bad="$bad $seqnum"
844                 n_bad=`expr $n_bad + 1`
845                 tc_status="fail"
846         fi
847         if $do_report && ! $first_test ; then
848                 if [ $tc_status != "expunge" ] ; then
849                         _make_testcase_report "$prev_seq" "$tc_status"
850                 fi
851         fi
852
853         sect_stop=`_wallclock`
854         interrupt=false
855         _wrapup
856         interrupt=true
857         echo
858
859         _test_unmount 2> /dev/null
860         _scratch_unmount 2> /dev/null
861 done
862
863 interrupt=false
864 status=`expr $sum_bad != 0`
865 exit