btrfs/036: fix sporadic failures when unmounting scratch filesystem
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 n_try=0
12 try=""
13 n_bad=0
14 sum_bad=0
15 bad=""
16 n_notrun=0
17 notrun=""
18 interrupt=true
19 diff="diff -u"
20 showme=false
21 have_test_arg=false
22 randomize=false
23 export here=`pwd`
24 xfile=""
25 subdir_xfile=""
26 brief_test_summary=false
27 do_report=false
28 DUMP_OUTPUT=false
29
30 # This is a global variable used to pass test failure text to reporting gunk
31 _err_msg=""
32
33 # start the initialisation work now
34 iam=check
35
36 export MSGVERB="text:action"
37 export QA_CHECK_FS=${QA_CHECK_FS:=true}
38
39 # number of diff lines from a failed test, 0 for whole output
40 export DIFF_LENGTH=${DIFF_LENGTH:=10}
41
42 # by default don't output timestamps
43 timestamp=${TIMESTAMP:=false}
44
45 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
46
47 SRC_GROUPS="generic shared"
48 export SRC_DIR="tests"
49
50 usage()
51 {
52     echo "Usage: $0 [options] [testlist]"'
53
54 check options
55     -nfs                test NFS
56     -glusterfs                test GlusterFS
57     -cifs               test CIFS
58     -9p                 test 9p
59     -overlay            test overlay
60     -pvfs2          test PVFS2
61     -tmpfs              test TMPFS
62     -ubifs              test ubifs
63     -l                  line mode diff
64     -udiff              show unified diff (default)
65     -n                  show me, do not run tests
66     -T                  output timestamps
67     -r                  randomize test order
68     -d                  dump test output to stdout
69     -b                  brief test summary
70     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
71     --large-fs          optimise scratch device for large filesystems
72     -s section          run only specified section from config file
73     -S section          exclude the specified section from the config file
74
75 testlist options
76     -g group[,group...] include tests from these groups
77     -x group[,group...] exclude tests from these groups
78     -X exclude_file     exclude individual tests
79     -E external_file    exclude individual tests
80     [testlist]          include tests matching names in testlist
81
82 testlist argument is a list of tests in the form of <test dir>/<test name>.
83
84 <test dir> is a directory under tests that contains a group file,
85 with a list of the names of the tests in that directory.
86
87 <test name> may be either a specific test file name (e.g. xfs/001) or
88 a test file name match pattern (e.g. xfs/*).
89
90 group argument is either a name of a tests group to collect from all
91 the test dirs (e.g. quick) or a name of a tests group to collect from
92 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
93 If you want to run all the tests in the test suite, use "-g all" to specify all
94 groups.
95
96 exclude_file argument refers to a name of a file inside each test directory.
97 for every test dir where this file is found, the listed test names are
98 excluded from the list of tests to run from that test dir.
99
100 external_file argument is a path to a single file containing a list of tests
101 to exclude in the form of <test dir>/<test name>.
102
103 examples:
104  check xfs/001
105  check -g quick
106  check -g xfs/quick
107  check -x stress xfs/*
108  check -X .exclude -g auto
109  check -E ~/.xfstests.exclude
110 '
111             exit 0
112 }
113
114 get_sub_group_list()
115 {
116         local d=$1
117         local grp=$2
118
119         test -s "$SRC_DIR/$d/group" || return 1
120
121         local grpl=$(sed -n < $SRC_DIR/$d/group \
122                 -e 's/#.*//' \
123                 -e 's/$/ /' \
124                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
125         echo $grpl
126 }
127
128 get_group_list()
129 {
130         local grp=$1
131         local grpl=""
132         local sub=$(dirname $grp)
133         local fsgroup="$FSTYP"
134
135         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
136                 # group is given as <subdir>/<group> (e.g. xfs/quick)
137                 grp=$(basename $grp)
138                 get_sub_group_list $sub $grp
139                 return
140         fi
141
142         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
143             fsgroup=ext4
144         fi
145         for d in $SRC_GROUPS $fsgroup; do
146                 if ! test -d "$SRC_DIR/$d" ; then
147                         continue
148                 fi
149                 grpl="$grpl $(get_sub_group_list $d $grp)"
150         done
151         echo $grpl
152 }
153
154 # Find all tests, excluding files that are test metadata such as group files.
155 # It matches test names against $VALID_TEST_NAME defined in common/rc
156 get_all_tests()
157 {
158         touch $tmp.list
159         for d in $SRC_GROUPS $FSTYP; do
160                 if ! test -d "$SRC_DIR/$d" ; then
161                         continue
162                 fi
163                 ls $SRC_DIR/$d/* | \
164                         grep -v "\..*" | \
165                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
166                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
167         done
168 }
169
170 # takes the list of tests to run in $tmp.list, and removes the tests passed to
171 # the function from that list.
172 trim_test_list()
173 {
174         test_list="$*"
175
176         rm -f $tmp.grep
177         numsed=0
178         for t in $test_list
179         do
180             if [ $numsed -gt 100 ]; then
181                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
182                 mv $tmp.tmp $tmp.list
183                 numsed=0
184                 rm -f $tmp.grep
185             fi
186             echo "^$t\$" >>$tmp.grep
187             numsed=`expr $numsed + 1`
188         done
189         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
190         mv $tmp.tmp $tmp.list
191         rm -f $tmp.grep
192 }
193
194
195 _wallclock()
196 {
197     date "+%s"
198 }
199
200 _timestamp()
201 {
202     now=`date "+%T"`
203     echo -n " [$now]"
204 }
205
206 _prepare_test_list()
207 {
208         unset list
209         # Tests specified on the command line
210         if [ -s $tmp.arglist ]; then
211                 cat $tmp.arglist > $tmp.list
212         else
213                 touch $tmp.list
214         fi
215
216         # Specified groups to include
217         # Note that the CLI processing adds a leading space to the first group
218         # parameter, so we have to catch that here checking for "all"
219         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
220                 # no test numbers, do everything
221                 get_all_tests
222         else
223                 for group in $GROUP_LIST; do
224                         list=$(get_group_list $group)
225                         if [ -z "$list" ]; then
226                                 echo "Group \"$group\" is empty or not defined?"
227                                 exit 1
228                         fi
229
230                         for t in $list; do
231                                 grep -s "^$t\$" $tmp.list >/dev/null || \
232                                                         echo "$t" >>$tmp.list
233                         done
234                 done
235         fi
236
237         # Specified groups to exclude
238         for xgroup in $XGROUP_LIST; do
239                 list=$(get_group_list $xgroup)
240                 if [ -z "$list" ]; then
241                         echo "Group \"$xgroup\" is empty or not defined?"
242                         exit 1
243                 fi
244
245                 trim_test_list $list
246         done
247
248         # sort the list of tests into numeric order
249         if $randomize; then
250                 if type shuf >& /dev/null; then
251                         sorter="shuf"
252                 else
253                         sorter="awk -v seed=$RANDOM -f randomize.awk"
254                 fi
255         else
256                 sorter="cat"
257         fi
258         list=`sort -n $tmp.list | uniq | $sorter`
259         rm -f $tmp.list
260 }
261
262 # Process command arguments first.
263 while [ $# -gt 0 ]; do
264         case "$1" in
265         -\? | -h | --help) usage ;;
266
267         -nfs)           FSTYP=nfs ;;
268         -glusterfs)     FSTYP=glusterfs ;;
269         -cifs)          FSTYP=cifs ;;
270         -9p)            FSTYP=9p ;;
271         -overlay)       FSTYP=overlay; export OVERLAY=true ;;
272         -pvfs2)         FSTYP=pvfs2 ;;
273         -tmpfs)         FSTYP=tmpfs ;;
274         -ubifs)         FSTYP=ubifs ;;
275
276         -g)     group=$2 ; shift ;
277                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
278                 ;;
279
280         -x)     xgroup=$2 ; shift ;
281                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
282                 ;;
283
284         -X)     subdir_xfile=$2; shift ;
285                 ;;
286         -E)     xfile=$2; shift ;
287                 if [ -f $xfile ]; then
288                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
289                 fi
290                 ;;
291         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
292         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
293         -l)     diff="diff" ;;
294         -udiff) diff="$diff -u" ;;
295
296         -n)     showme=true ;;
297         -r)     randomize=true ;;
298
299         -T)     timestamp=true ;;
300         -d)     DUMP_OUTPUT=true ;;
301         -b)     brief_test_summary=true;;
302         -R)     report_fmt=$2 ; shift ;
303                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
304                 do_report=true
305                 ;;
306         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
307         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
308
309         -*)     usage ;;
310         *)      # not an argument, we've got tests now.
311                 have_test_arg=true ;;
312         esac
313
314         # if we've found a test specification, the break out of the processing
315         # loop before we shift the arguments so that this is the first argument
316         # that we process in the test arg loop below.
317         if $have_test_arg; then
318                 break;
319         fi
320
321         shift
322 done
323
324 # we need common/rc, that also sources common/config. We need to source it
325 # after processing args, overlay needs FSTYP set before sourcing common/config
326 if ! . ./common/rc; then
327         echo "check: failed to source common/rc"
328         exit 1
329 fi
330
331 if [ -n "$subdir_xfile" ]; then
332         for d in $SRC_GROUPS $FSTYP; do
333                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
334                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
335                         echo $d/$f >> $tmp.xlist
336                 done
337         done
338 fi
339
340 # Process tests from command line now.
341 if $have_test_arg; then
342         while [ $# -gt 0 ]; do
343                 case "$1" in
344                 -*)     echo "Arguments before tests, please!"
345                         status=1
346                         exit $status
347                         ;;
348                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
349                         list=$(cd $SRC_DIR; echo $1)
350                         for t in $list; do
351                                 test_dir=`dirname $t`
352                                 test_dir=${test_dir#$SRC_DIR/*}
353                                 test_name=`basename $t`
354                                 group_file=$SRC_DIR/$test_dir/group
355
356                                 if egrep -q "^$test_name" $group_file; then
357                                         # in group file ... OK
358                                         echo $SRC_DIR/$test_dir/$test_name \
359                                                 >>$tmp.arglist
360                                 else
361                                         # oops
362                                         echo "$t - unknown test, ignored"
363                                 fi
364                         done
365                         ;;
366                 esac
367
368                 shift
369         done
370 elif [ -z "$GROUP_LIST" ]; then
371         # default group list is the auto group. If any other group or test is
372         # specified, we use that instead.
373         GROUP_LIST="auto"
374 fi
375
376 if [ `id -u` -ne 0 ]
377 then
378     echo "check: QA must be run as root"
379     exit 1
380 fi
381
382 _wipe_counters()
383 {
384         n_try="0"
385         n_bad="0"
386         n_notrun="0"
387         unset try notrun bad
388 }
389
390 _wrapup()
391 {
392         seq="check"
393         check="$RESULT_BASE/check"
394
395         if $showme; then
396                 if $needwrap; then
397                         if $do_report; then
398                                 _make_section_report
399                         fi
400                         needwrap=false
401                 fi
402         elif $needwrap; then
403                 if [ -f $check.time -a -f $tmp.time ]; then
404                         cat $check.time $tmp.time  \
405                                 | $AWK_PROG '
406                                 { t[$1] = $2 }
407                                 END {
408                                         if (NR > 0) {
409                                                 for (i in t) print i " " t[i]
410                                         }
411                                 }' \
412                                 | sort -n >$tmp.out
413                         mv $tmp.out $check.time
414                 fi
415
416                 echo "" >>$check.log
417                 date >>$check.log
418
419                 echo "SECTION       -- $section" >>$tmp.summary
420                 echo "=========================" >>$tmp.summary
421                 if [ ! -z "$n_try" -a $n_try != 0 ]; then
422                         if [ $brief_test_summary == "false" ]; then
423                                 echo "Ran:$try"
424                                 echo "Ran:$try" >>$tmp.summary
425                         fi
426                         echo "Ran:$try" >>$check.log
427                 fi
428
429                 $interrupt && echo "Interrupted!" | tee -a $check.log
430
431                 if [ ! -z "$notrun" ]; then
432                         if [ $brief_test_summary == "false" ]; then
433                                 echo "Not run:$notrun"
434                                 echo "Not run:$notrun" >>$tmp.summary
435                         fi
436                         echo "Not run:$notrun" >>$check.log
437                 fi
438
439                 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
440                         echo "Failures:$bad"
441                         echo "Failed $n_bad of $n_try tests"
442                         echo "Failures:$bad" >>$check.log
443                         echo "Failed $n_bad of $n_try tests" >>$check.log
444                         echo "Failures:$bad" >>$tmp.summary
445                         echo "Failed $n_bad of $n_try tests" >>$tmp.summary
446                 else
447                         echo "Passed all $n_try tests"
448                         echo "Passed all $n_try tests" >>$check.log
449                         echo "Passed all $n_try tests" >>$tmp.summary
450                 fi
451                 echo "" >>$tmp.summary
452                 if $do_report; then
453                         _make_section_report
454                 fi
455                 needwrap=false
456         fi
457
458         sum_bad=`expr $sum_bad + $n_bad`
459         _wipe_counters
460         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
461         if ! $OPTIONS_HAVE_SECTIONS; then
462                 rm -f $tmp.*
463         fi
464 }
465
466 _summary()
467 {
468         _wrapup
469         if $showme; then
470                 :
471         elif $needsum; then
472                 count=`wc -L $tmp.summary | cut -f1 -d" "`
473                 cat $tmp.summary
474                 needsum=false
475         fi
476         rm -f $tmp.*
477 }
478
479 _check_filesystems()
480 {
481         if [ -f ${RESULT_DIR}/require_test ]; then
482                 _check_test_fs || err=true
483                 rm -f ${RESULT_DIR}/require_test*
484         else
485                 _test_unmount 2> /dev/null
486         fi
487         if [ -f ${RESULT_DIR}/require_scratch ]; then
488                 _check_scratch_fs || err=true
489                 rm -f ${RESULT_DIR}/require_scratch*
490         fi
491         _scratch_unmount 2> /dev/null
492 }
493
494 _expunge_test()
495 {
496         local TEST_ID="$1"
497         if [ -s $tmp.xlist ]; then
498                 if grep -q $TEST_ID $tmp.xlist; then
499                         echo "       [expunged]"
500                         return 1
501                 fi
502         fi
503         return 0
504 }
505
506 # Make the check script unattractive to the OOM killer...
507 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
508 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
509
510 # ...and make the tests themselves somewhat more attractive to it, so that if
511 # the system runs out of memory it'll be the test that gets killed and not the
512 # test framework.
513 _run_seq() {
514         bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
515 }
516
517 _detect_kmemleak
518 _prepare_test_list
519
520 if $OPTIONS_HAVE_SECTIONS; then
521         trap "_summary; exit \$status" 0 1 2 3 15
522 else
523         trap "_wrapup; exit \$status" 0 1 2 3 15
524 fi
525
526 for section in $HOST_OPTIONS_SECTIONS; do
527         OLD_FSTYP=$FSTYP
528         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
529         get_next_config $section
530
531         # Do we need to run only some sections ?
532         if [ ! -z "$RUN_SECTION" ]; then
533                 skip=true
534                 for s in $RUN_SECTION; do
535                         if [ $section == $s ]; then
536                                 skip=false
537                                 break;
538                         fi
539                 done
540                 if $skip; then
541                         continue
542                 fi
543         fi
544
545         # Did this section get excluded?
546         if [ ! -z "$EXCLUDE_SECTION" ]; then
547                 skip=false
548                 for s in $EXCLUDE_SECTION; do
549                         if [ $section == $s ]; then
550                                 skip=true
551                                 break;
552                         fi
553                 done
554                 if $skip; then
555                         continue
556                 fi
557         fi
558
559         mkdir -p $RESULT_BASE
560         if [ ! -d $RESULT_BASE ]; then
561                 echo "failed to create results directory $RESULT_BASE"
562                 status=1
563                 exit
564         fi
565
566         if $OPTIONS_HAVE_SECTIONS; then
567                 echo "SECTION       -- $section"
568         fi
569
570         sect_start=`_wallclock`
571         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
572                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
573                 _test_unmount 2> /dev/null
574                 if ! _test_mkfs >$tmp.err 2>&1
575                 then
576                         echo "our local _test_mkfs routine ..."
577                         cat $tmp.err
578                         echo "check: failed to mkfs \$TEST_DEV using specified options"
579                         status=1
580                         exit
581                 fi
582                 if ! _test_mount
583                 then
584                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
585                         status=1
586                         exit
587                 fi
588                 _prepare_test_list
589         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
590                 _test_unmount 2> /dev/null
591                 if ! _test_mount
592                 then
593                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
594                         status=1
595                         exit
596                 fi
597         fi
598
599         init_rc
600
601         seq="check"
602         check="$RESULT_BASE/check"
603
604         # don't leave old full output behind on a clean run
605         rm -f $check.full
606
607         [ -f $check.time ] || touch $check.time
608
609         # print out our test configuration
610         echo "FSTYP         -- `_full_fstyp_details`"
611         echo "PLATFORM      -- `_full_platform_details`"
612         if [ ! -z "$SCRATCH_DEV" ]; then
613           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
614           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
615         fi
616         echo
617         needwrap=true
618
619         if [ ! -z "$SCRATCH_DEV" ]; then
620           _scratch_unmount 2> /dev/null
621           # call the overridden mkfs - make sure the FS is built
622           # the same as we'll create it later.
623
624           if ! _scratch_mkfs >$tmp.err 2>&1
625           then
626               echo "our local _scratch_mkfs routine ..."
627               cat $tmp.err
628               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
629               status=1
630               exit
631           fi
632
633           # call the overridden mount - make sure the FS mounts with
634           # the same options that we'll mount with later.
635           if ! _try_scratch_mount >$tmp.err 2>&1
636           then
637               echo "our local mount routine ..."
638               cat $tmp.err
639               echo "check: failed to mount \$SCRATCH_DEV using specified options"
640               status=1
641               exit
642           else
643               _scratch_unmount
644           fi
645         fi
646
647         seqres="$check"
648         _check_test_fs
649
650         err=false
651         first_test=true
652         prev_seq=""
653         for seq in $list ; do
654                 # Run report for previous test!
655                 if $err ; then
656                         bad="$bad $seqnum"
657                         n_bad=`expr $n_bad + 1`
658                         tc_status="fail"
659                 fi
660                 if $do_report && ! $first_test ; then
661                         if [ $tc_status != "expunge" ] ; then
662                                 _make_testcase_report "$prev_seq" "$tc_status"
663                         fi
664                 fi
665                 first_test=false
666
667                 err=false
668                 prev_seq="$seq"
669                 if [ ! -f $seq ]; then
670                         # Try to get full name in case the user supplied only
671                         # seq id and the test has a name. A bit of hassle to
672                         # find really the test and not its sample output or
673                         # helping files.
674                         bname=$(basename $seq)
675                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
676                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
677                                      END { print shortest }')
678                         if [ -f $full_seq ] && \
679                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
680                                 seq=$full_seq
681                         fi
682                 fi
683
684                 # the filename for the test and the name output are different.
685                 # we don't include the tests/ directory in the name output.
686                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
687
688                 # Similarly, the result directory needs to replace the tests/
689                 # part of the test location.
690                 group=`dirname $seq`
691                 if $OPTIONS_HAVE_SECTIONS; then
692                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
693                         REPORT_DIR="$RESULT_BASE/$section"
694                 else
695                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
696                         REPORT_DIR="$RESULT_BASE"
697                 fi
698                 seqres="$REPORT_DIR/$seqnum"
699
700                 mkdir -p $RESULT_DIR
701                 echo -n "$seqnum"
702
703                 if $showme; then
704                         _expunge_test $seqnum
705                         if [ $? -eq 1 ]; then
706                             tc_status="expunge"
707                             continue
708                         fi
709                         echo
710                         start=0
711                         stop=0
712                         tc_status="list"
713                         n_notrun=`expr $n_notrun + 1`
714                         continue
715                 fi
716
717                 tc_status="pass"
718                 if [ ! -f $seq ]; then
719                         echo " - no such test?"
720                         continue
721                 fi
722
723                 # really going to try and run this one
724                 rm -f $seqres.out.bad
725
726                 # check if we really should run it
727                 _expunge_test $seqnum
728                 if [ $? -eq 1 ]; then
729                         tc_status="expunge"
730                         continue
731                 fi
732
733                 # record that we really tried to run this test.
734                 try="$try $seqnum"
735                 n_try=`expr $n_try + 1`
736
737                 # slashes now in names, sed barfs on them so use grep
738                 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
739                 if [ "X$lasttime" != X ]; then
740                         echo -n " ${lasttime}s ... "
741                 else
742                         echo -n "       " # prettier output with timestamps.
743                 fi
744                 rm -f core $seqres.notrun
745
746                 start=`_wallclock`
747                 $timestamp && echo -n " ["`date "+%T"`"]"
748                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
749                 $LOGGER_PROG "run xfstest $seqnum"
750                 if [ -w /dev/kmsg ]; then
751                         export date_time=`date +"%F %T"`
752                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
753                         # _check_dmesg depends on this log in dmesg
754                         touch ${RESULT_DIR}/check_dmesg
755                 fi
756                 _try_wipe_scratch_devs > /dev/null 2>&1
757                 if [ "$DUMP_OUTPUT" = true ]; then
758                         _run_seq 2>&1 | tee $tmp.out
759                         # Because $? would get tee's return code
760                         sts=${PIPESTATUS[0]}
761                 else
762                         _run_seq >$tmp.out 2>&1
763                         sts=$?
764                 fi
765
766                 if [ -f core ]; then
767                         _dump_err_cont "[dumped core]"
768                         mv core $RESULT_BASE/$seqnum.core
769                         err=true
770                 fi
771
772                 if [ -f $seqres.notrun ]; then
773                         $timestamp && _timestamp
774                         stop=`_wallclock`
775                         $timestamp || echo -n "[not run] "
776                         $timestamp && echo " [not run]" && \
777                                       echo -n " $seqnum -- "
778                         cat $seqres.notrun
779                         notrun="$notrun $seqnum"
780                         n_notrun=`expr $n_notrun + 1`
781                         tc_status="notrun"
782                         continue;
783                 fi
784
785                 if [ $sts -ne 0 ]; then
786                         _dump_err_cont "[failed, exit status $sts]"
787                         _test_unmount 2> /dev/null
788                         _scratch_unmount 2> /dev/null
789                         rm -f ${RESULT_DIR}/require_test*
790                         rm -f ${RESULT_DIR}/require_scratch*
791                         err=true
792                 else
793                         # the test apparently passed, so check for corruption
794                         # and log messages that shouldn't be there.
795                         _check_filesystems
796                         _check_dmesg || err=true
797                 fi
798
799                 # Scan for memory leaks after every test so that associating
800                 # a leak to a particular test will be as accurate as possible.
801                 _check_kmemleak || err=true
802
803                 # test ends after all checks are done.
804                 $timestamp && _timestamp
805                 stop=`_wallclock`
806
807                 if [ ! -f $seq.out ]; then
808                         _dump_err "no qualified output"
809                         err=true
810                         continue;
811                 fi
812
813                 # coreutils 8.16+ changed quote formats in error messages
814                 # from `foo' to 'foo'. Filter old versions to match the new
815                 # version.
816                 sed -i "s/\`/\'/g" $tmp.out
817                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
818                         if ! $err ; then
819                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
820                                 echo -n " `expr $stop - $start`s"
821                         fi
822                         echo ""
823                 else
824                         _dump_err "- output mismatch (see $seqres.out.bad)"
825                         mv $tmp.out $seqres.out.bad
826                         $diff $seq.out $seqres.out.bad | {
827                         if test "$DIFF_LENGTH" -le 0; then
828                                 cat
829                         else
830                                 head -n "$DIFF_LENGTH"
831                                 echo "..."
832                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
833                                         " to see the entire diff)"
834                         fi; } | sed -e 's/^\(.\)/    \1/'
835                         err=true
836                 fi
837         done
838
839         # make sure we record the status of the last test we ran.
840         if $err ; then
841                 bad="$bad $seqnum"
842                 n_bad=`expr $n_bad + 1`
843                 tc_status="fail"
844         fi
845         if $do_report && ! $first_test ; then
846                 if [ $tc_status != "expunge" ] ; then
847                         _make_testcase_report "$prev_seq" "$tc_status"
848                 fi
849         fi
850
851         sect_stop=`_wallclock`
852         interrupt=false
853         _wrapup
854         interrupt=true
855         echo
856
857         _test_unmount 2> /dev/null
858         _scratch_unmount 2> /dev/null
859 done
860
861 interrupt=false
862 status=`expr $sum_bad != 0`
863 exit