08419811ad618d375bdf463412110fb0d672ee06
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 n_try=0
12 try=""
13 n_bad=0
14 sum_bad=0
15 bad=""
16 n_notrun=0
17 notrun=""
18 interrupt=true
19 diff="diff -u"
20 showme=false
21 have_test_arg=false
22 randomize=false
23 export here=`pwd`
24 xfile=""
25 subdir_xfile=""
26 brief_test_summary=false
27 do_report=false
28 DUMP_OUTPUT=false
29
30 # This is a global variable used to pass test failure text to reporting gunk
31 _err_msg=""
32
33 # start the initialisation work now
34 iam=check
35
36 export MSGVERB="text:action"
37 export QA_CHECK_FS=${QA_CHECK_FS:=true}
38
39 # number of diff lines from a failed test, 0 for whole output
40 export DIFF_LENGTH=${DIFF_LENGTH:=10}
41
42 # by default don't output timestamps
43 timestamp=${TIMESTAMP:=false}
44
45 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
46
47 SRC_GROUPS="generic shared"
48 export SRC_DIR="tests"
49
50 usage()
51 {
52     echo "Usage: $0 [options] [testlist]"'
53
54 check options
55     -nfs                test NFS
56     -glusterfs                test GlusterFS
57     -cifs               test CIFS
58     -9p                 test 9p
59     -overlay            test overlay
60     -pvfs2          test PVFS2
61     -tmpfs              test TMPFS
62     -ubifs              test ubifs
63     -l                  line mode diff
64     -udiff              show unified diff (default)
65     -n                  show me, do not run tests
66     -T                  output timestamps
67     -r                  randomize test order
68     -d                  dump test output to stdout
69     -b                  brief test summary
70     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
71     --large-fs          optimise scratch device for large filesystems
72     -s section          run only specified section from config file
73     -S section          exclude the specified section from the config file
74
75 testlist options
76     -g group[,group...] include tests from these groups
77     -x group[,group...] exclude tests from these groups
78     -X exclude_file     exclude individual tests
79     -E external_file    exclude individual tests
80     [testlist]          include tests matching names in testlist
81
82 testlist argument is a list of tests in the form of <test dir>/<test name>.
83
84 <test dir> is a directory under tests that contains a group file,
85 with a list of the names of the tests in that directory.
86
87 <test name> may be either a specific test file name (e.g. xfs/001) or
88 a test file name match pattern (e.g. xfs/*).
89
90 group argument is either a name of a tests group to collect from all
91 the test dirs (e.g. quick) or a name of a tests group to collect from
92 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
93 If you want to run all the tests in the test suite, use "-g all" to specify all
94 groups.
95
96 exclude_file argument refers to a name of a file inside each test directory.
97 for every test dir where this file is found, the listed test names are
98 excluded from the list of tests to run from that test dir.
99
100 external_file argument is a path to a single file containing a list of tests
101 to exclude in the form of <test dir>/<test name>.
102
103 examples:
104  check xfs/001
105  check -g quick
106  check -g xfs/quick
107  check -x stress xfs/*
108  check -X .exclude -g auto
109  check -E ~/.xfstests.exclude
110 '
111             exit 0
112 }
113
114 get_sub_group_list()
115 {
116         local d=$1
117         local grp=$2
118
119         test -s "$SRC_DIR/$d/group" || return 1
120
121         local grpl=$(sed -n < $SRC_DIR/$d/group \
122                 -e 's/#.*//' \
123                 -e 's/$/ /' \
124                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
125         echo $grpl
126 }
127
128 get_group_list()
129 {
130         local grp=$1
131         local grpl=""
132         local sub=$(dirname $grp)
133
134         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
135                 # group is given as <subdir>/<group> (e.g. xfs/quick)
136                 grp=$(basename $grp)
137                 get_sub_group_list $sub $grp
138                 return
139         fi
140
141         for d in $SRC_GROUPS $FSTYP; do
142                 if ! test -d "$SRC_DIR/$d" ; then
143                         continue
144                 fi
145                 grpl="$grpl $(get_sub_group_list $d $grp)"
146         done
147         echo $grpl
148 }
149
150 # Find all tests, excluding files that are test metadata such as group files.
151 # It matches test names against $VALID_TEST_NAME defined in common/rc
152 get_all_tests()
153 {
154         touch $tmp.list
155         for d in $SRC_GROUPS $FSTYP; do
156                 if ! test -d "$SRC_DIR/$d" ; then
157                         continue
158                 fi
159                 ls $SRC_DIR/$d/* | \
160                         grep -v "\..*" | \
161                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
162                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
163         done
164 }
165
166 # takes the list of tests to run in $tmp.list, and removes the tests passed to
167 # the function from that list.
168 trim_test_list()
169 {
170         test_list="$*"
171
172         rm -f $tmp.grep
173         numsed=0
174         for t in $test_list
175         do
176             if [ $numsed -gt 100 ]; then
177                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
178                 mv $tmp.tmp $tmp.list
179                 numsed=0
180                 rm -f $tmp.grep
181             fi
182             echo "^$t\$" >>$tmp.grep
183             numsed=`expr $numsed + 1`
184         done
185         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
186         mv $tmp.tmp $tmp.list
187         rm -f $tmp.grep
188 }
189
190
191 _wallclock()
192 {
193     date "+%s"
194 }
195
196 _timestamp()
197 {
198     now=`date "+%T"`
199     echo -n " [$now]"
200 }
201
202 _prepare_test_list()
203 {
204         unset list
205         # Tests specified on the command line
206         if [ -s $tmp.arglist ]; then
207                 cat $tmp.arglist > $tmp.list
208         else
209                 touch $tmp.list
210         fi
211
212         # Specified groups to include
213         # Note that the CLI processing adds a leading space to the first group
214         # parameter, so we have to catch that here checking for "all"
215         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
216                 # no test numbers, do everything
217                 get_all_tests
218         else
219                 for group in $GROUP_LIST; do
220                         list=$(get_group_list $group)
221                         if [ -z "$list" ]; then
222                                 echo "Group \"$group\" is empty or not defined?"
223                                 exit 1
224                         fi
225
226                         for t in $list; do
227                                 grep -s "^$t\$" $tmp.list >/dev/null || \
228                                                         echo "$t" >>$tmp.list
229                         done
230                 done
231         fi
232
233         # Specified groups to exclude
234         for xgroup in $XGROUP_LIST; do
235                 list=$(get_group_list $xgroup)
236                 if [ -z "$list" ]; then
237                         echo "Group \"$xgroup\" is empty or not defined?"
238                         exit 1
239                 fi
240
241                 trim_test_list $list
242         done
243
244         # sort the list of tests into numeric order
245         if $randomize; then
246                 if type shuf >& /dev/null; then
247                         sorter="shuf"
248                 else
249                         sorter="awk -v seed=$RANDOM -f randomize.awk"
250                 fi
251         else
252                 sorter="cat"
253         fi
254         list=`sort -n $tmp.list | uniq | $sorter`
255         rm -f $tmp.list
256 }
257
258 # Process command arguments first.
259 while [ $# -gt 0 ]; do
260         case "$1" in
261         -\? | -h | --help) usage ;;
262
263         -nfs)           FSTYP=nfs ;;
264         -glusterfs)     FSTYP=glusterfs ;;
265         -cifs)          FSTYP=cifs ;;
266         -9p)            FSTYP=9p ;;
267         -overlay)       FSTYP=overlay; export OVERLAY=true ;;
268         -pvfs2)         FSTYP=pvfs2 ;;
269         -tmpfs)         FSTYP=tmpfs ;;
270         -ubifs)         FSTYP=ubifs ;;
271
272         -g)     group=$2 ; shift ;
273                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
274                 ;;
275
276         -x)     xgroup=$2 ; shift ;
277                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
278                 ;;
279
280         -X)     subdir_xfile=$2; shift ;
281                 ;;
282         -E)     xfile=$2; shift ;
283                 if [ -f $xfile ]; then
284                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
285                 fi
286                 ;;
287         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
288         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
289         -l)     diff="diff" ;;
290         -udiff) diff="$diff -u" ;;
291
292         -n)     showme=true ;;
293         -r)     randomize=true ;;
294
295         -T)     timestamp=true ;;
296         -d)     DUMP_OUTPUT=true ;;
297         -b)     brief_test_summary=true;;
298         -R)     report_fmt=$2 ; shift ;
299                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
300                 do_report=true
301                 ;;
302         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
303         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
304
305         -*)     usage ;;
306         *)      # not an argument, we've got tests now.
307                 have_test_arg=true ;;
308         esac
309
310         # if we've found a test specification, the break out of the processing
311         # loop before we shift the arguments so that this is the first argument
312         # that we process in the test arg loop below.
313         if $have_test_arg; then
314                 break;
315         fi
316
317         shift
318 done
319
320 # we need common/rc, that also sources common/config. We need to source it
321 # after processing args, overlay needs FSTYP set before sourcing common/config
322 if ! . ./common/rc; then
323         echo "check: failed to source common/rc"
324         exit 1
325 fi
326
327 if [ -n "$subdir_xfile" ]; then
328         for d in $SRC_GROUPS $FSTYP; do
329                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
330                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
331                         echo $d/$f >> $tmp.xlist
332                 done
333         done
334 fi
335
336 # Process tests from command line now.
337 if $have_test_arg; then
338         while [ $# -gt 0 ]; do
339                 case "$1" in
340                 -*)     echo "Arguments before tests, please!"
341                         status=1
342                         exit $status
343                         ;;
344                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
345                         list=$(cd $SRC_DIR; echo $1)
346                         for t in $list; do
347                                 test_dir=`dirname $t`
348                                 test_dir=${test_dir#$SRC_DIR/*}
349                                 test_name=`basename $t`
350                                 group_file=$SRC_DIR/$test_dir/group
351
352                                 if egrep -q "^$test_name" $group_file; then
353                                         # in group file ... OK
354                                         echo $SRC_DIR/$test_dir/$test_name \
355                                                 >>$tmp.arglist
356                                 else
357                                         # oops
358                                         echo "$t - unknown test, ignored"
359                                 fi
360                         done
361                         ;;
362                 esac
363
364                 shift
365         done
366 elif [ -z "$GROUP_LIST" ]; then
367         # default group list is the auto group. If any other group or test is
368         # specified, we use that instead.
369         GROUP_LIST="auto"
370 fi
371
372 if [ `id -u` -ne 0 ]
373 then
374     echo "check: QA must be run as root"
375     exit 1
376 fi
377
378 _wipe_counters()
379 {
380         n_try="0"
381         n_bad="0"
382         n_notrun="0"
383         unset try notrun bad
384 }
385
386 _wrapup()
387 {
388         seq="check"
389         check="$RESULT_BASE/check"
390
391         if $showme; then
392                 if $needwrap; then
393                         if $do_report; then
394                                 _make_section_report
395                         fi
396                         needwrap=false
397                 fi
398         elif $needwrap; then
399                 if [ -f $check.time -a -f $tmp.time ]; then
400                         cat $check.time $tmp.time  \
401                                 | $AWK_PROG '
402                                 { t[$1] = $2 }
403                                 END {
404                                         if (NR > 0) {
405                                                 for (i in t) print i " " t[i]
406                                         }
407                                 }' \
408                                 | sort -n >$tmp.out
409                         mv $tmp.out $check.time
410                 fi
411
412                 echo "" >>$check.log
413                 date >>$check.log
414
415                 echo "SECTION       -- $section" >>$tmp.summary
416                 echo "=========================" >>$tmp.summary
417                 if [ ! -z "$n_try" -a $n_try != 0 ]; then
418                         if [ $brief_test_summary == "false" ]; then
419                                 echo "Ran:$try"
420                                 echo "Ran:$try" >>$tmp.summary
421                         fi
422                         echo "Ran:$try" >>$check.log
423                 fi
424
425                 $interrupt && echo "Interrupted!" | tee -a $check.log
426
427                 if [ ! -z "$notrun" ]; then
428                         if [ $brief_test_summary == "false" ]; then
429                                 echo "Not run:$notrun"
430                                 echo "Not run:$notrun" >>$tmp.summary
431                         fi
432                         echo "Not run:$notrun" >>$check.log
433                 fi
434
435                 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
436                         echo "Failures:$bad"
437                         echo "Failed $n_bad of $n_try tests"
438                         echo "Failures:$bad" >>$check.log
439                         echo "Failed $n_bad of $n_try tests" >>$check.log
440                         echo "Failures:$bad" >>$tmp.summary
441                         echo "Failed $n_bad of $n_try tests" >>$tmp.summary
442                 else
443                         echo "Passed all $n_try tests"
444                         echo "Passed all $n_try tests" >>$check.log
445                         echo "Passed all $n_try tests" >>$tmp.summary
446                 fi
447                 echo "" >>$tmp.summary
448                 if $do_report; then
449                         _make_section_report
450                 fi
451                 needwrap=false
452         fi
453
454         sum_bad=`expr $sum_bad + $n_bad`
455         _wipe_counters
456         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
457         if ! $OPTIONS_HAVE_SECTIONS; then
458                 rm -f $tmp.*
459         fi
460 }
461
462 _summary()
463 {
464         _wrapup
465         if $showme; then
466                 :
467         elif $needsum; then
468                 count=`wc -L $tmp.summary | cut -f1 -d" "`
469                 cat $tmp.summary
470                 needsum=false
471         fi
472         rm -f $tmp.*
473 }
474
475 _check_filesystems()
476 {
477         if [ -f ${RESULT_DIR}/require_test ]; then
478                 _check_test_fs || err=true
479                 rm -f ${RESULT_DIR}/require_test*
480         else
481                 _test_unmount 2> /dev/null
482         fi
483         if [ -f ${RESULT_DIR}/require_scratch ]; then
484                 _check_scratch_fs || err=true
485                 rm -f ${RESULT_DIR}/require_scratch*
486         fi
487         _scratch_unmount 2> /dev/null
488 }
489
490 _expunge_test()
491 {
492         local TEST_ID="$1"
493         if [ -s $tmp.xlist ]; then
494                 if grep -q $TEST_ID $tmp.xlist; then
495                         echo "       [expunged]"
496                         return 1
497                 fi
498         fi
499         return 0
500 }
501
502 # Make the check script unattractive to the OOM killer...
503 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
504 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
505
506 # ...and make the tests themselves somewhat more attractive to it, so that if
507 # the system runs out of memory it'll be the test that gets killed and not the
508 # test framework.
509 _run_seq() {
510         bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
511 }
512
513 _detect_kmemleak
514 _prepare_test_list
515
516 if $OPTIONS_HAVE_SECTIONS; then
517         trap "_summary; exit \$status" 0 1 2 3 15
518 else
519         trap "_wrapup; exit \$status" 0 1 2 3 15
520 fi
521
522 for section in $HOST_OPTIONS_SECTIONS; do
523         OLD_FSTYP=$FSTYP
524         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
525         get_next_config $section
526
527         # Do we need to run only some sections ?
528         if [ ! -z "$RUN_SECTION" ]; then
529                 skip=true
530                 for s in $RUN_SECTION; do
531                         if [ $section == $s ]; then
532                                 skip=false
533                                 break;
534                         fi
535                 done
536                 if $skip; then
537                         continue
538                 fi
539         fi
540
541         # Did this section get excluded?
542         if [ ! -z "$EXCLUDE_SECTION" ]; then
543                 skip=false
544                 for s in $EXCLUDE_SECTION; do
545                         if [ $section == $s ]; then
546                                 skip=true
547                                 break;
548                         fi
549                 done
550                 if $skip; then
551                         continue
552                 fi
553         fi
554
555         mkdir -p $RESULT_BASE
556         if [ ! -d $RESULT_BASE ]; then
557                 echo "failed to create results directory $RESULT_BASE"
558                 status=1
559                 exit
560         fi
561
562         if $OPTIONS_HAVE_SECTIONS; then
563                 echo "SECTION       -- $section"
564         fi
565
566         sect_start=`_wallclock`
567         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
568                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
569                 _test_unmount 2> /dev/null
570                 if ! _test_mkfs >$tmp.err 2>&1
571                 then
572                         echo "our local _test_mkfs routine ..."
573                         cat $tmp.err
574                         echo "check: failed to mkfs \$TEST_DEV using specified options"
575                         status=1
576                         exit
577                 fi
578                 if ! _test_mount
579                 then
580                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
581                         status=1
582                         exit
583                 fi
584                 _prepare_test_list
585         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
586                 _test_unmount 2> /dev/null
587                 if ! _test_mount
588                 then
589                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
590                         status=1
591                         exit
592                 fi
593         fi
594
595         init_rc
596
597         seq="check"
598         check="$RESULT_BASE/check"
599
600         # don't leave old full output behind on a clean run
601         rm -f $check.full
602
603         [ -f $check.time ] || touch $check.time
604
605         # print out our test configuration
606         echo "FSTYP         -- `_full_fstyp_details`"
607         echo "PLATFORM      -- `_full_platform_details`"
608         if [ ! -z "$SCRATCH_DEV" ]; then
609           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
610           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
611         fi
612         echo
613         needwrap=true
614
615         if [ ! -z "$SCRATCH_DEV" ]; then
616           _scratch_unmount 2> /dev/null
617           # call the overridden mkfs - make sure the FS is built
618           # the same as we'll create it later.
619
620           if ! _scratch_mkfs >$tmp.err 2>&1
621           then
622               echo "our local _scratch_mkfs routine ..."
623               cat $tmp.err
624               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
625               status=1
626               exit
627           fi
628
629           # call the overridden mount - make sure the FS mounts with
630           # the same options that we'll mount with later.
631           if ! _try_scratch_mount >$tmp.err 2>&1
632           then
633               echo "our local mount routine ..."
634               cat $tmp.err
635               echo "check: failed to mount \$SCRATCH_DEV using specified options"
636               status=1
637               exit
638           else
639               _scratch_unmount
640           fi
641         fi
642
643         seqres="$check"
644         _check_test_fs
645
646         err=false
647         first_test=true
648         prev_seq=""
649         for seq in $list ; do
650                 # Run report for previous test!
651                 if $err ; then
652                         bad="$bad $seqnum"
653                         n_bad=`expr $n_bad + 1`
654                         tc_status="fail"
655                 fi
656                 if $do_report && ! $first_test ; then
657                         if [ $tc_status != "expunge" ] ; then
658                                 _make_testcase_report "$prev_seq" "$tc_status"
659                         fi
660                 fi
661                 first_test=false
662
663                 err=false
664                 prev_seq="$seq"
665                 if [ ! -f $seq ]; then
666                         # Try to get full name in case the user supplied only
667                         # seq id and the test has a name. A bit of hassle to
668                         # find really the test and not its sample output or
669                         # helping files.
670                         bname=$(basename $seq)
671                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
672                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
673                                      END { print shortest }')
674                         if [ -f $full_seq ] && \
675                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
676                                 seq=$full_seq
677                         fi
678                 fi
679
680                 # the filename for the test and the name output are different.
681                 # we don't include the tests/ directory in the name output.
682                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
683
684                 # Similarly, the result directory needs to replace the tests/
685                 # part of the test location.
686                 group=`dirname $seq`
687                 if $OPTIONS_HAVE_SECTIONS; then
688                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
689                         REPORT_DIR="$RESULT_BASE/$section"
690                 else
691                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
692                         REPORT_DIR="$RESULT_BASE"
693                 fi
694                 seqres="$REPORT_DIR/$seqnum"
695
696                 mkdir -p $RESULT_DIR
697                 echo -n "$seqnum"
698
699                 if $showme; then
700                         _expunge_test $seqnum
701                         if [ $? -eq 1 ]; then
702                             tc_status="expunge"
703                             continue
704                         fi
705                         echo
706                         start=0
707                         stop=0
708                         tc_status="list"
709                         n_notrun=`expr $n_notrun + 1`
710                         continue
711                 fi
712
713                 tc_status="pass"
714                 if [ ! -f $seq ]; then
715                         echo " - no such test?"
716                         continue
717                 fi
718
719                 # really going to try and run this one
720                 rm -f $seqres.out.bad
721
722                 # check if we really should run it
723                 _expunge_test $seqnum
724                 if [ $? -eq 1 ]; then
725                         tc_status="expunge"
726                         continue
727                 fi
728
729                 # record that we really tried to run this test.
730                 try="$try $seqnum"
731                 n_try=`expr $n_try + 1`
732
733                 # slashes now in names, sed barfs on them so use grep
734                 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
735                 if [ "X$lasttime" != X ]; then
736                         echo -n " ${lasttime}s ... "
737                 else
738                         echo -n "       " # prettier output with timestamps.
739                 fi
740                 rm -f core $seqres.notrun
741
742                 start=`_wallclock`
743                 $timestamp && echo -n " ["`date "+%T"`"]"
744                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
745                 $LOGGER_PROG "run xfstest $seqnum"
746                 if [ -w /dev/kmsg ]; then
747                         export date_time=`date +"%F %T"`
748                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
749                         # _check_dmesg depends on this log in dmesg
750                         touch ${RESULT_DIR}/check_dmesg
751                 fi
752                 _try_wipe_scratch_devs > /dev/null 2>&1
753                 if [ "$DUMP_OUTPUT" = true ]; then
754                         _run_seq 2>&1 | tee $tmp.out
755                         # Because $? would get tee's return code
756                         sts=${PIPESTATUS[0]}
757                 else
758                         _run_seq >$tmp.out 2>&1
759                         sts=$?
760                 fi
761
762                 if [ -f core ]; then
763                         _dump_err_cont "[dumped core]"
764                         mv core $RESULT_BASE/$seqnum.core
765                         err=true
766                 fi
767
768                 if [ -f $seqres.notrun ]; then
769                         $timestamp && _timestamp
770                         stop=`_wallclock`
771                         $timestamp || echo -n "[not run] "
772                         $timestamp && echo " [not run]" && \
773                                       echo -n " $seqnum -- "
774                         cat $seqres.notrun
775                         notrun="$notrun $seqnum"
776                         n_notrun=`expr $n_notrun + 1`
777                         tc_status="notrun"
778                         continue;
779                 fi
780
781                 if [ $sts -ne 0 ]; then
782                         _dump_err_cont "[failed, exit status $sts]"
783                         _test_unmount 2> /dev/null
784                         _scratch_unmount 2> /dev/null
785                         rm -f ${RESULT_DIR}/require_test*
786                         rm -f ${RESULT_DIR}/require_scratch*
787                         err=true
788                 else
789                         # the test apparently passed, so check for corruption
790                         # and log messages that shouldn't be there.
791                         _check_filesystems
792                         _check_dmesg || err=true
793                 fi
794
795                 # Scan for memory leaks after every test so that associating
796                 # a leak to a particular test will be as accurate as possible.
797                 _check_kmemleak || err=true
798
799                 # test ends after all checks are done.
800                 $timestamp && _timestamp
801                 stop=`_wallclock`
802
803                 if [ ! -f $seq.out ]; then
804                         _dump_err "no qualified output"
805                         err=true
806                         continue;
807                 fi
808
809                 # coreutils 8.16+ changed quote formats in error messages
810                 # from `foo' to 'foo'. Filter old versions to match the new
811                 # version.
812                 sed -i "s/\`/\'/g" $tmp.out
813                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
814                         if ! $err ; then
815                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
816                                 echo -n " `expr $stop - $start`s"
817                         fi
818                         echo ""
819                 else
820                         _dump_err "- output mismatch (see $seqres.out.bad)"
821                         mv $tmp.out $seqres.out.bad
822                         $diff $seq.out $seqres.out.bad | {
823                         if test "$DIFF_LENGTH" -le 0; then
824                                 cat
825                         else
826                                 head -n "$DIFF_LENGTH"
827                                 echo "..."
828                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
829                                         " to see the entire diff)"
830                         fi; } | sed -e 's/^\(.\)/    \1/'
831                         err=true
832                 fi
833         done
834
835         # make sure we record the status of the last test we ran.
836         if $err ; then
837                 bad="$bad $seqnum"
838                 n_bad=`expr $n_bad + 1`
839                 tc_status="fail"
840         fi
841         if $do_report && ! $first_test ; then
842                 if [ $tc_status != "expunge" ] ; then
843                         _make_testcase_report "$prev_seq" "$tc_status"
844                 fi
845         fi
846
847         sect_stop=`_wallclock`
848         interrupt=false
849         _wrapup
850         interrupt=true
851         echo
852
853         _test_unmount 2> /dev/null
854         _scratch_unmount 2> /dev/null
855 done
856
857 interrupt=false
858 status=`expr $sum_bad != 0`
859 exit