fstests: drop check.log and check.time into section specific results dir
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 n_try=0
12 try=""
13 n_bad=0
14 sum_bad=0
15 bad=""
16 n_notrun=0
17 notrun=""
18 interrupt=true
19 diff="diff -u"
20 showme=false
21 have_test_arg=false
22 randomize=false
23 export here=`pwd`
24 xfile=""
25 subdir_xfile=""
26 brief_test_summary=false
27 do_report=false
28 DUMP_OUTPUT=false
29 iterations=1
30
31 # This is a global variable used to pass test failure text to reporting gunk
32 _err_msg=""
33
34 # start the initialisation work now
35 iam=check
36
37 export MSGVERB="text:action"
38 export QA_CHECK_FS=${QA_CHECK_FS:=true}
39
40 # number of diff lines from a failed test, 0 for whole output
41 export DIFF_LENGTH=${DIFF_LENGTH:=10}
42
43 # by default don't output timestamps
44 timestamp=${TIMESTAMP:=false}
45
46 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
47
48 SRC_GROUPS="generic shared"
49 export SRC_DIR="tests"
50
51 usage()
52 {
53     echo "Usage: $0 [options] [testlist]"'
54
55 check options
56     -nfs                test NFS
57     -glusterfs          test GlusterFS
58     -cifs               test CIFS
59     -9p                 test 9p
60     -virtiofs           test virtiofs
61     -overlay            test overlay
62     -pvfs2              test PVFS2
63     -tmpfs              test TMPFS
64     -ubifs              test ubifs
65     -l                  line mode diff
66     -udiff              show unified diff (default)
67     -n                  show me, do not run tests
68     -T                  output timestamps
69     -r                  randomize test order
70     -i <n>              iterate the test list <n> times
71     -d                  dump test output to stdout
72     -b                  brief test summary
73     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
74     --large-fs          optimise scratch device for large filesystems
75     -s section          run only specified section from config file
76     -S section          exclude the specified section from the config file
77
78 testlist options
79     -g group[,group...] include tests from these groups
80     -x group[,group...] exclude tests from these groups
81     -X exclude_file     exclude individual tests
82     -E external_file    exclude individual tests
83     [testlist]          include tests matching names in testlist
84
85 testlist argument is a list of tests in the form of <test dir>/<test name>.
86
87 <test dir> is a directory under tests that contains a group file,
88 with a list of the names of the tests in that directory.
89
90 <test name> may be either a specific test file name (e.g. xfs/001) or
91 a test file name match pattern (e.g. xfs/*).
92
93 group argument is either a name of a tests group to collect from all
94 the test dirs (e.g. quick) or a name of a tests group to collect from
95 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
96 If you want to run all the tests in the test suite, use "-g all" to specify all
97 groups.
98
99 exclude_file argument refers to a name of a file inside each test directory.
100 for every test dir where this file is found, the listed test names are
101 excluded from the list of tests to run from that test dir.
102
103 external_file argument is a path to a single file containing a list of tests
104 to exclude in the form of <test dir>/<test name>.
105
106 examples:
107  check xfs/001
108  check -g quick
109  check -g xfs/quick
110  check -x stress xfs/*
111  check -X .exclude -g auto
112  check -E ~/.xfstests.exclude
113 '
114             exit 0
115 }
116
117 get_sub_group_list()
118 {
119         local d=$1
120         local grp=$2
121
122         test -s "$SRC_DIR/$d/group" || return 1
123
124         local grpl=$(sed -n < $SRC_DIR/$d/group \
125                 -e 's/#.*//' \
126                 -e 's/$/ /' \
127                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
128         echo $grpl
129 }
130
131 get_group_list()
132 {
133         local grp=$1
134         local grpl=""
135         local sub=$(dirname $grp)
136         local fsgroup="$FSTYP"
137
138         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
139                 # group is given as <subdir>/<group> (e.g. xfs/quick)
140                 grp=$(basename $grp)
141                 get_sub_group_list $sub $grp
142                 return
143         fi
144
145         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
146             fsgroup=ext4
147         fi
148         for d in $SRC_GROUPS $fsgroup; do
149                 if ! test -d "$SRC_DIR/$d" ; then
150                         continue
151                 fi
152                 grpl="$grpl $(get_sub_group_list $d $grp)"
153         done
154         echo $grpl
155 }
156
157 # Find all tests, excluding files that are test metadata such as group files.
158 # It matches test names against $VALID_TEST_NAME defined in common/rc
159 get_all_tests()
160 {
161         touch $tmp.list
162         for d in $SRC_GROUPS $FSTYP; do
163                 if ! test -d "$SRC_DIR/$d" ; then
164                         continue
165                 fi
166                 ls $SRC_DIR/$d/* | \
167                         grep -v "\..*" | \
168                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
169                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
170         done
171 }
172
173 # takes the list of tests to run in $tmp.list, and removes the tests passed to
174 # the function from that list.
175 trim_test_list()
176 {
177         test_list="$*"
178
179         rm -f $tmp.grep
180         numsed=0
181         for t in $test_list
182         do
183             if [ $numsed -gt 100 ]; then
184                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
185                 mv $tmp.tmp $tmp.list
186                 numsed=0
187                 rm -f $tmp.grep
188             fi
189             echo "^$t\$" >>$tmp.grep
190             numsed=`expr $numsed + 1`
191         done
192         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
193         mv $tmp.tmp $tmp.list
194         rm -f $tmp.grep
195 }
196
197
198 _wallclock()
199 {
200     date "+%s"
201 }
202
203 _timestamp()
204 {
205     now=`date "+%T"`
206     echo -n " [$now]"
207 }
208
209 _prepare_test_list()
210 {
211         unset list
212         # Tests specified on the command line
213         if [ -s $tmp.arglist ]; then
214                 cat $tmp.arglist > $tmp.list
215         else
216                 touch $tmp.list
217         fi
218
219         # Specified groups to include
220         # Note that the CLI processing adds a leading space to the first group
221         # parameter, so we have to catch that here checking for "all"
222         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
223                 # no test numbers, do everything
224                 get_all_tests
225         else
226                 for group in $GROUP_LIST; do
227                         list=$(get_group_list $group)
228                         if [ -z "$list" ]; then
229                                 echo "Group \"$group\" is empty or not defined?"
230                                 exit 1
231                         fi
232
233                         for t in $list; do
234                                 grep -s "^$t\$" $tmp.list >/dev/null || \
235                                                         echo "$t" >>$tmp.list
236                         done
237                 done
238         fi
239
240         # Specified groups to exclude
241         for xgroup in $XGROUP_LIST; do
242                 list=$(get_group_list $xgroup)
243                 if [ -z "$list" ]; then
244                         echo "Group \"$xgroup\" is empty or not defined?"
245                         exit 1
246                 fi
247
248                 trim_test_list $list
249         done
250
251         # sort the list of tests into numeric order
252         if $randomize; then
253                 if type shuf >& /dev/null; then
254                         sorter="shuf"
255                 else
256                         sorter="awk -v seed=$RANDOM -f randomize.awk"
257                 fi
258         else
259                 sorter="cat"
260         fi
261         list=`sort -n $tmp.list | uniq | $sorter`
262         rm -f $tmp.list
263 }
264
265 # Process command arguments first.
266 while [ $# -gt 0 ]; do
267         case "$1" in
268         -\? | -h | --help) usage ;;
269
270         -nfs)           FSTYP=nfs ;;
271         -glusterfs)     FSTYP=glusterfs ;;
272         -cifs)          FSTYP=cifs ;;
273         -9p)            FSTYP=9p ;;
274         -virtiofs)      FSTYP=virtiofs ;;
275         -overlay)       FSTYP=overlay; export OVERLAY=true ;;
276         -pvfs2)         FSTYP=pvfs2 ;;
277         -tmpfs)         FSTYP=tmpfs ;;
278         -ubifs)         FSTYP=ubifs ;;
279
280         -g)     group=$2 ; shift ;
281                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
282                 ;;
283
284         -x)     xgroup=$2 ; shift ;
285                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
286                 ;;
287
288         -X)     subdir_xfile=$2; shift ;
289                 ;;
290         -E)     xfile=$2; shift ;
291                 if [ -f $xfile ]; then
292                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
293                 fi
294                 ;;
295         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
296         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
297         -l)     diff="diff" ;;
298         -udiff) diff="$diff -u" ;;
299
300         -n)     showme=true ;;
301         -r)     randomize=true ;;
302         -i)     iterations=$2; shift ;;
303         -T)     timestamp=true ;;
304         -d)     DUMP_OUTPUT=true ;;
305         -b)     brief_test_summary=true;;
306         -R)     report_fmt=$2 ; shift ;
307                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
308                 do_report=true
309                 ;;
310         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
311         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
312
313         -*)     usage ;;
314         *)      # not an argument, we've got tests now.
315                 have_test_arg=true ;;
316         esac
317
318         # if we've found a test specification, the break out of the processing
319         # loop before we shift the arguments so that this is the first argument
320         # that we process in the test arg loop below.
321         if $have_test_arg; then
322                 break;
323         fi
324
325         shift
326 done
327
328 # we need common/rc, that also sources common/config. We need to source it
329 # after processing args, overlay needs FSTYP set before sourcing common/config
330 if ! . ./common/rc; then
331         echo "check: failed to source common/rc"
332         exit 1
333 fi
334
335 if [ -n "$subdir_xfile" ]; then
336         for d in $SRC_GROUPS $FSTYP; do
337                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
338                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
339                         echo $d/$f >> $tmp.xlist
340                 done
341         done
342 fi
343
344 # Process tests from command line now.
345 if $have_test_arg; then
346         while [ $# -gt 0 ]; do
347                 case "$1" in
348                 -*)     echo "Arguments before tests, please!"
349                         status=1
350                         exit $status
351                         ;;
352                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
353                         list=$(cd $SRC_DIR; echo $1)
354                         for t in $list; do
355                                 test_dir=`dirname $t`
356                                 test_dir=${test_dir#$SRC_DIR/*}
357                                 test_name=`basename $t`
358                                 group_file=$SRC_DIR/$test_dir/group
359
360                                 if egrep -q "^$test_name" $group_file; then
361                                         # in group file ... OK
362                                         echo $SRC_DIR/$test_dir/$test_name \
363                                                 >>$tmp.arglist
364                                 else
365                                         # oops
366                                         echo "$t - unknown test, ignored"
367                                 fi
368                         done
369                         ;;
370                 esac
371
372                 shift
373         done
374 elif [ -z "$GROUP_LIST" ]; then
375         # default group list is the auto group. If any other group or test is
376         # specified, we use that instead.
377         GROUP_LIST="auto"
378 fi
379
380 if [ `id -u` -ne 0 ]
381 then
382     echo "check: QA must be run as root"
383     exit 1
384 fi
385
386 _wipe_counters()
387 {
388         n_try="0"
389         n_bad="0"
390         n_notrun="0"
391         unset try notrun bad
392 }
393
394 _global_log() {
395         echo "$1" >> $check.log
396         if $OPTIONS_HAVE_SECIONS; then
397                 echo "$1" >> ${REPORT_DIR}/check.log
398         fi
399 }
400
401 _wrapup()
402 {
403         seq="check"
404         check="$RESULT_BASE/check"
405
406         if $showme; then
407                 if $needwrap; then
408                         if $do_report; then
409                                 _make_section_report
410                         fi
411                         needwrap=false
412                 fi
413         elif $needwrap; then
414                 if [ -f $check.time -a -f $tmp.time ]; then
415                         cat $check.time $tmp.time  \
416                                 | $AWK_PROG '
417                                 { t[$1] = $2 }
418                                 END {
419                                         if (NR > 0) {
420                                                 for (i in t) print i " " t[i]
421                                         }
422                                 }' \
423                                 | sort -n >$tmp.out
424                         mv $tmp.out $check.time
425                         if $OPTIONS_HAVE_SECTIONS; then
426                                 cp $check.time ${REPORT_DIR}/check.time
427                         fi
428                 fi
429
430                 _global_log ""
431                 _global_log "$(date)"
432
433                 echo "SECTION       -- $section" >>$tmp.summary
434                 echo "=========================" >>$tmp.summary
435                 if [ ! -z "$n_try" -a $n_try != 0 ]; then
436                         if [ $brief_test_summary == "false" ]; then
437                                 echo "Ran:$try"
438                                 echo "Ran:$try" >>$tmp.summary
439                         fi
440                         _global_log "Ran:$try"
441                 fi
442
443                 $interrupt && echo "Interrupted!" | tee -a $check.log
444                 if $OPTIONS_HAVE_SECIONS; then
445                         $interrupt && echo "Interrupted!" | tee -a \
446                                 ${REPORT_DIR}/check.log
447                 fi
448
449                 if [ ! -z "$notrun" ]; then
450                         if [ $brief_test_summary == "false" ]; then
451                                 echo "Not run:$notrun"
452                                 echo "Not run:$notrun" >>$tmp.summary
453                         fi
454                         _global_log "Not run:$notrun"
455                 fi
456
457                 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
458                         echo "Failures:$bad"
459                         echo "Failed $n_bad of $n_try tests"
460                         _global_log "Failures:$bad"
461                         _global_log "Failed $n_bad of $n_try tests"
462                         echo "Failures:$bad" >>$tmp.summary
463                         echo "Failed $n_bad of $n_try tests" >>$tmp.summary
464                 else
465                         echo "Passed all $n_try tests"
466                         _global_log "Passed all $n_try tests"
467                         echo "Passed all $n_try tests" >>$tmp.summary
468                 fi
469                 echo "" >>$tmp.summary
470                 if $do_report; then
471                         _make_section_report
472                 fi
473                 needwrap=false
474         fi
475
476         sum_bad=`expr $sum_bad + $n_bad`
477         _wipe_counters
478         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
479         if ! $OPTIONS_HAVE_SECTIONS; then
480                 rm -f $tmp.*
481         fi
482 }
483
484 _summary()
485 {
486         _wrapup
487         if $showme; then
488                 :
489         elif $needsum; then
490                 count=`wc -L $tmp.summary | cut -f1 -d" "`
491                 cat $tmp.summary
492                 needsum=false
493         fi
494         rm -f $tmp.*
495 }
496
497 _check_filesystems()
498 {
499         if [ -f ${RESULT_DIR}/require_test ]; then
500                 _check_test_fs || err=true
501                 rm -f ${RESULT_DIR}/require_test*
502         else
503                 _test_unmount 2> /dev/null
504         fi
505         if [ -f ${RESULT_DIR}/require_scratch ]; then
506                 _check_scratch_fs || err=true
507                 rm -f ${RESULT_DIR}/require_scratch*
508         fi
509         _scratch_unmount 2> /dev/null
510 }
511
512 _expunge_test()
513 {
514         local TEST_ID="$1"
515         if [ -s $tmp.xlist ]; then
516                 if grep -q $TEST_ID $tmp.xlist; then
517                         echo "       [expunged]"
518                         return 1
519                 fi
520         fi
521         return 0
522 }
523
524 # Make the check script unattractive to the OOM killer...
525 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
526 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
527
528 # ...and make the tests themselves somewhat more attractive to it, so that if
529 # the system runs out of memory it'll be the test that gets killed and not the
530 # test framework.
531 _run_seq() {
532         bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
533 }
534
535 _detect_kmemleak
536 _prepare_test_list
537
538 if $OPTIONS_HAVE_SECTIONS; then
539         trap "_summary; exit \$status" 0 1 2 3 15
540 else
541         trap "_wrapup; exit \$status" 0 1 2 3 15
542 fi
543
544 function run_section()
545 {
546         local section=$1
547
548         OLD_FSTYP=$FSTYP
549         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
550         get_next_config $section
551
552         # Do we need to run only some sections ?
553         if [ ! -z "$RUN_SECTION" ]; then
554                 skip=true
555                 for s in $RUN_SECTION; do
556                         if [ $section == $s ]; then
557                                 skip=false
558                                 break;
559                         fi
560                 done
561                 if $skip; then
562                         return
563                 fi
564         fi
565
566         # Did this section get excluded?
567         if [ ! -z "$EXCLUDE_SECTION" ]; then
568                 skip=false
569                 for s in $EXCLUDE_SECTION; do
570                         if [ $section == $s ]; then
571                                 skip=true
572                                 break;
573                         fi
574                 done
575                 if $skip; then
576                         return
577                 fi
578         fi
579
580         mkdir -p $RESULT_BASE
581         if [ ! -d $RESULT_BASE ]; then
582                 echo "failed to create results directory $RESULT_BASE"
583                 status=1
584                 exit
585         fi
586
587         if $OPTIONS_HAVE_SECTIONS; then
588                 echo "SECTION       -- $section"
589         fi
590
591         sect_start=`_wallclock`
592         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
593                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
594                 _test_unmount 2> /dev/null
595                 if ! _test_mkfs >$tmp.err 2>&1
596                 then
597                         echo "our local _test_mkfs routine ..."
598                         cat $tmp.err
599                         echo "check: failed to mkfs \$TEST_DEV using specified options"
600                         status=1
601                         exit
602                 fi
603                 if ! _test_mount
604                 then
605                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
606                         status=1
607                         exit
608                 fi
609                 _prepare_test_list
610         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
611                 _test_unmount 2> /dev/null
612                 if ! _test_mount
613                 then
614                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
615                         status=1
616                         exit
617                 fi
618         fi
619
620         init_rc
621
622         seq="check"
623         check="$RESULT_BASE/check"
624
625         # don't leave old full output behind on a clean run
626         rm -f $check.full
627
628         [ -f $check.time ] || touch $check.time
629
630         # print out our test configuration
631         echo "FSTYP         -- `_full_fstyp_details`"
632         echo "PLATFORM      -- `_full_platform_details`"
633         if [ ! -z "$SCRATCH_DEV" ]; then
634           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
635           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
636         fi
637         echo
638         needwrap=true
639
640         if [ ! -z "$SCRATCH_DEV" ]; then
641           _scratch_unmount 2> /dev/null
642           # call the overridden mkfs - make sure the FS is built
643           # the same as we'll create it later.
644
645           if ! _scratch_mkfs >$tmp.err 2>&1
646           then
647               echo "our local _scratch_mkfs routine ..."
648               cat $tmp.err
649               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
650               status=1
651               exit
652           fi
653
654           # call the overridden mount - make sure the FS mounts with
655           # the same options that we'll mount with later.
656           if ! _try_scratch_mount >$tmp.err 2>&1
657           then
658               echo "our local mount routine ..."
659               cat $tmp.err
660               echo "check: failed to mount \$SCRATCH_DEV using specified options"
661               status=1
662               exit
663           else
664               _scratch_unmount
665           fi
666         fi
667
668         seqres="$check"
669         _check_test_fs
670
671         err=false
672         first_test=true
673         prev_seq=""
674         for seq in $list ; do
675                 # Run report for previous test!
676                 if $err ; then
677                         bad="$bad $seqnum"
678                         n_bad=`expr $n_bad + 1`
679                         tc_status="fail"
680                 fi
681                 if $do_report && ! $first_test ; then
682                         if [ $tc_status != "expunge" ] ; then
683                                 _make_testcase_report "$prev_seq" "$tc_status"
684                         fi
685                 fi
686                 first_test=false
687
688                 err=false
689                 prev_seq="$seq"
690                 if [ ! -f $seq ]; then
691                         # Try to get full name in case the user supplied only
692                         # seq id and the test has a name. A bit of hassle to
693                         # find really the test and not its sample output or
694                         # helping files.
695                         bname=$(basename $seq)
696                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
697                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
698                                      END { print shortest }')
699                         if [ -f $full_seq ] && \
700                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
701                                 seq=$full_seq
702                         fi
703                 fi
704
705                 # the filename for the test and the name output are different.
706                 # we don't include the tests/ directory in the name output.
707                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
708
709                 # Similarly, the result directory needs to replace the tests/
710                 # part of the test location.
711                 group=`dirname $seq`
712                 if $OPTIONS_HAVE_SECTIONS; then
713                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
714                         REPORT_DIR="$RESULT_BASE/$section"
715                 else
716                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
717                         REPORT_DIR="$RESULT_BASE"
718                 fi
719                 seqres="$REPORT_DIR/$seqnum"
720
721                 mkdir -p $RESULT_DIR
722                 rm -f ${RESULT_DIR}/require_scratch*
723                 rm -f ${RESULT_DIR}/require_test*
724                 echo -n "$seqnum"
725
726                 if $showme; then
727                         _expunge_test $seqnum
728                         if [ $? -eq 1 ]; then
729                             tc_status="expunge"
730                             continue
731                         fi
732                         echo
733                         start=0
734                         stop=0
735                         tc_status="list"
736                         n_notrun=`expr $n_notrun + 1`
737                         continue
738                 fi
739
740                 tc_status="pass"
741                 if [ ! -f $seq ]; then
742                         echo " - no such test?"
743                         continue
744                 fi
745
746                 # really going to try and run this one
747                 rm -f $seqres.out.bad
748
749                 # check if we really should run it
750                 _expunge_test $seqnum
751                 if [ $? -eq 1 ]; then
752                         tc_status="expunge"
753                         continue
754                 fi
755
756                 # record that we really tried to run this test.
757                 try="$try $seqnum"
758                 n_try=`expr $n_try + 1`
759
760                 # slashes now in names, sed barfs on them so use grep
761                 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
762                 if [ "X$lasttime" != X ]; then
763                         echo -n " ${lasttime}s ... "
764                 else
765                         echo -n "       " # prettier output with timestamps.
766                 fi
767                 rm -f core $seqres.notrun
768
769                 start=`_wallclock`
770                 $timestamp && echo -n " ["`date "+%T"`"]"
771                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
772                 $LOGGER_PROG "run xfstest $seqnum"
773                 if [ -w /dev/kmsg ]; then
774                         export date_time=`date +"%F %T"`
775                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
776                         # _check_dmesg depends on this log in dmesg
777                         touch ${RESULT_DIR}/check_dmesg
778                 fi
779                 _try_wipe_scratch_devs > /dev/null 2>&1
780
781                 # clear the WARN_ONCE state to allow a potential problem
782                 # to be reported for each test
783                 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
784
785                 if [ "$DUMP_OUTPUT" = true ]; then
786                         _run_seq 2>&1 | tee $tmp.out
787                         # Because $? would get tee's return code
788                         sts=${PIPESTATUS[0]}
789                 else
790                         _run_seq >$tmp.out 2>&1
791                         sts=$?
792                 fi
793
794                 if [ -f core ]; then
795                         _dump_err_cont "[dumped core]"
796                         mv core $RESULT_BASE/$seqnum.core
797                         err=true
798                 fi
799
800                 if [ -f $seqres.notrun ]; then
801                         $timestamp && _timestamp
802                         stop=`_wallclock`
803                         $timestamp || echo -n "[not run] "
804                         $timestamp && echo " [not run]" && \
805                                       echo -n " $seqnum -- "
806                         cat $seqres.notrun
807                         notrun="$notrun $seqnum"
808                         n_notrun=`expr $n_notrun + 1`
809                         tc_status="notrun"
810                         continue;
811                 fi
812
813                 if [ $sts -ne 0 ]; then
814                         _dump_err_cont "[failed, exit status $sts]"
815                         _test_unmount 2> /dev/null
816                         _scratch_unmount 2> /dev/null
817                         rm -f ${RESULT_DIR}/require_test*
818                         rm -f ${RESULT_DIR}/require_scratch*
819                         err=true
820                 else
821                         # the test apparently passed, so check for corruption
822                         # and log messages that shouldn't be there.
823                         _check_filesystems
824                         _check_dmesg || err=true
825                 fi
826
827                 # Reload the module after each test to check for leaks or
828                 # other problems.
829                 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
830                         _test_unmount 2> /dev/null
831                         _scratch_unmount 2> /dev/null
832                         modprobe -r fs-$FSTYP
833                         modprobe fs-$FSTYP
834                 fi
835
836                 # Scan for memory leaks after every test so that associating
837                 # a leak to a particular test will be as accurate as possible.
838                 _check_kmemleak || err=true
839
840                 # test ends after all checks are done.
841                 $timestamp && _timestamp
842                 stop=`_wallclock`
843
844                 if [ ! -f $seq.out ]; then
845                         _dump_err "no qualified output"
846                         err=true
847                         continue;
848                 fi
849
850                 # coreutils 8.16+ changed quote formats in error messages
851                 # from `foo' to 'foo'. Filter old versions to match the new
852                 # version.
853                 sed -i "s/\`/\'/g" $tmp.out
854                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
855                         if ! $err ; then
856                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
857                                 echo -n " `expr $stop - $start`s"
858                         fi
859                         echo ""
860                 else
861                         _dump_err "- output mismatch (see $seqres.out.bad)"
862                         mv $tmp.out $seqres.out.bad
863                         $diff $seq.out $seqres.out.bad | {
864                         if test "$DIFF_LENGTH" -le 0; then
865                                 cat
866                         else
867                                 head -n "$DIFF_LENGTH"
868                                 echo "..."
869                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
870                                         " to see the entire diff)"
871                         fi; } | sed -e 's/^\(.\)/    \1/'
872                         err=true
873                 fi
874         done
875
876         # make sure we record the status of the last test we ran.
877         if $err ; then
878                 bad="$bad $seqnum"
879                 n_bad=`expr $n_bad + 1`
880                 tc_status="fail"
881         fi
882         if $do_report && ! $first_test ; then
883                 if [ $tc_status != "expunge" ] ; then
884                         _make_testcase_report "$prev_seq" "$tc_status"
885                 fi
886         fi
887
888         sect_stop=`_wallclock`
889         interrupt=false
890         _wrapup
891         interrupt=true
892         echo
893
894         _test_unmount 2> /dev/null
895         _scratch_unmount 2> /dev/null
896 }
897
898 for ((iters = 0; iters < $iterations; iters++)) do
899         for section in $HOST_OPTIONS_SECTIONS; do
900                 run_section $section
901         done
902 done
903
904 interrupt=false
905 status=`expr $sum_bad != 0`
906 exit