common/config: Allow sections names to contain hyphen
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 n_try=0
12 try=""
13 n_bad=0
14 sum_bad=0
15 bad=""
16 n_notrun=0
17 notrun=""
18 interrupt=true
19 diff="diff -u"
20 showme=false
21 have_test_arg=false
22 randomize=false
23 export here=`pwd`
24 xfile=""
25 subdir_xfile=""
26 brief_test_summary=false
27 do_report=false
28 DUMP_OUTPUT=false
29 iterations=1
30
31 # This is a global variable used to pass test failure text to reporting gunk
32 _err_msg=""
33
34 # start the initialisation work now
35 iam=check
36
37 export MSGVERB="text:action"
38 export QA_CHECK_FS=${QA_CHECK_FS:=true}
39
40 # number of diff lines from a failed test, 0 for whole output
41 export DIFF_LENGTH=${DIFF_LENGTH:=10}
42
43 # by default don't output timestamps
44 timestamp=${TIMESTAMP:=false}
45
46 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
47
48 SRC_GROUPS="generic shared"
49 export SRC_DIR="tests"
50
51 usage()
52 {
53     echo "Usage: $0 [options] [testlist]"'
54
55 check options
56     -nfs                test NFS
57     -glusterfs          test GlusterFS
58     -cifs               test CIFS
59     -9p                 test 9p
60     -virtiofs           test virtiofs
61     -overlay            test overlay
62     -pvfs2              test PVFS2
63     -tmpfs              test TMPFS
64     -ubifs              test ubifs
65     -l                  line mode diff
66     -udiff              show unified diff (default)
67     -n                  show me, do not run tests
68     -T                  output timestamps
69     -r                  randomize test order
70     -i <n>              iterate the test list <n> times
71     -d                  dump test output to stdout
72     -b                  brief test summary
73     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
74     --large-fs          optimise scratch device for large filesystems
75     -s section          run only specified section from config file
76     -S section          exclude the specified section from the config file
77
78 testlist options
79     -g group[,group...] include tests from these groups
80     -x group[,group...] exclude tests from these groups
81     -X exclude_file     exclude individual tests
82     -E external_file    exclude individual tests
83     [testlist]          include tests matching names in testlist
84
85 testlist argument is a list of tests in the form of <test dir>/<test name>.
86
87 <test dir> is a directory under tests that contains a group file,
88 with a list of the names of the tests in that directory.
89
90 <test name> may be either a specific test file name (e.g. xfs/001) or
91 a test file name match pattern (e.g. xfs/*).
92
93 group argument is either a name of a tests group to collect from all
94 the test dirs (e.g. quick) or a name of a tests group to collect from
95 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
96 If you want to run all the tests in the test suite, use "-g all" to specify all
97 groups.
98
99 exclude_file argument refers to a name of a file inside each test directory.
100 for every test dir where this file is found, the listed test names are
101 excluded from the list of tests to run from that test dir.
102
103 external_file argument is a path to a single file containing a list of tests
104 to exclude in the form of <test dir>/<test name>.
105
106 examples:
107  check xfs/001
108  check -g quick
109  check -g xfs/quick
110  check -x stress xfs/*
111  check -X .exclude -g auto
112  check -E ~/.xfstests.exclude
113 '
114             exit 0
115 }
116
117 get_sub_group_list()
118 {
119         local d=$1
120         local grp=$2
121
122         test -s "$SRC_DIR/$d/group" || return 1
123
124         local grpl=$(sed -n < $SRC_DIR/$d/group \
125                 -e 's/#.*//' \
126                 -e 's/$/ /' \
127                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
128         echo $grpl
129 }
130
131 get_group_list()
132 {
133         local grp=$1
134         local grpl=""
135         local sub=$(dirname $grp)
136         local fsgroup="$FSTYP"
137
138         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
139                 # group is given as <subdir>/<group> (e.g. xfs/quick)
140                 grp=$(basename $grp)
141                 get_sub_group_list $sub $grp
142                 return
143         fi
144
145         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
146             fsgroup=ext4
147         fi
148         for d in $SRC_GROUPS $fsgroup; do
149                 if ! test -d "$SRC_DIR/$d" ; then
150                         continue
151                 fi
152                 grpl="$grpl $(get_sub_group_list $d $grp)"
153         done
154         echo $grpl
155 }
156
157 # Find all tests, excluding files that are test metadata such as group files.
158 # It matches test names against $VALID_TEST_NAME defined in common/rc
159 get_all_tests()
160 {
161         touch $tmp.list
162         for d in $SRC_GROUPS $FSTYP; do
163                 if ! test -d "$SRC_DIR/$d" ; then
164                         continue
165                 fi
166                 ls $SRC_DIR/$d/* | \
167                         grep -v "\..*" | \
168                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
169                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
170         done
171 }
172
173 # takes the list of tests to run in $tmp.list, and removes the tests passed to
174 # the function from that list.
175 trim_test_list()
176 {
177         test_list="$*"
178
179         rm -f $tmp.grep
180         numsed=0
181         for t in $test_list
182         do
183             if [ $numsed -gt 100 ]; then
184                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
185                 mv $tmp.tmp $tmp.list
186                 numsed=0
187                 rm -f $tmp.grep
188             fi
189             echo "^$t\$" >>$tmp.grep
190             numsed=`expr $numsed + 1`
191         done
192         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
193         mv $tmp.tmp $tmp.list
194         rm -f $tmp.grep
195 }
196
197
198 _wallclock()
199 {
200     date "+%s"
201 }
202
203 _timestamp()
204 {
205     now=`date "+%T"`
206     echo -n " [$now]"
207 }
208
209 _prepare_test_list()
210 {
211         unset list
212         # Tests specified on the command line
213         if [ -s $tmp.arglist ]; then
214                 cat $tmp.arglist > $tmp.list
215         else
216                 touch $tmp.list
217         fi
218
219         # Specified groups to include
220         # Note that the CLI processing adds a leading space to the first group
221         # parameter, so we have to catch that here checking for "all"
222         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
223                 # no test numbers, do everything
224                 get_all_tests
225         else
226                 for group in $GROUP_LIST; do
227                         list=$(get_group_list $group)
228                         if [ -z "$list" ]; then
229                                 echo "Group \"$group\" is empty or not defined?"
230                                 exit 1
231                         fi
232
233                         for t in $list; do
234                                 grep -s "^$t\$" $tmp.list >/dev/null || \
235                                                         echo "$t" >>$tmp.list
236                         done
237                 done
238         fi
239
240         # Specified groups to exclude
241         for xgroup in $XGROUP_LIST; do
242                 list=$(get_group_list $xgroup)
243                 if [ -z "$list" ]; then
244                         echo "Group \"$xgroup\" is empty or not defined?"
245                         exit 1
246                 fi
247
248                 trim_test_list $list
249         done
250
251         # sort the list of tests into numeric order
252         if $randomize; then
253                 if type shuf >& /dev/null; then
254                         sorter="shuf"
255                 else
256                         sorter="awk -v seed=$RANDOM -f randomize.awk"
257                 fi
258         else
259                 sorter="cat"
260         fi
261         list=`sort -n $tmp.list | uniq | $sorter`
262         rm -f $tmp.list
263 }
264
265 # Process command arguments first.
266 while [ $# -gt 0 ]; do
267         case "$1" in
268         -\? | -h | --help) usage ;;
269
270         -nfs)           FSTYP=nfs ;;
271         -glusterfs)     FSTYP=glusterfs ;;
272         -cifs)          FSTYP=cifs ;;
273         -9p)            FSTYP=9p ;;
274         -virtiofs)      FSTYP=virtiofs ;;
275         -overlay)       FSTYP=overlay; export OVERLAY=true ;;
276         -pvfs2)         FSTYP=pvfs2 ;;
277         -tmpfs)         FSTYP=tmpfs ;;
278         -ubifs)         FSTYP=ubifs ;;
279
280         -g)     group=$2 ; shift ;
281                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
282                 ;;
283
284         -x)     xgroup=$2 ; shift ;
285                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
286                 ;;
287
288         -X)     subdir_xfile=$2; shift ;
289                 ;;
290         -E)     xfile=$2; shift ;
291                 if [ -f $xfile ]; then
292                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
293                 fi
294                 ;;
295         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
296         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
297         -l)     diff="diff" ;;
298         -udiff) diff="$diff -u" ;;
299
300         -n)     showme=true ;;
301         -r)     randomize=true ;;
302         -i)     iterations=$2; shift ;;
303         -T)     timestamp=true ;;
304         -d)     DUMP_OUTPUT=true ;;
305         -b)     brief_test_summary=true;;
306         -R)     report_fmt=$2 ; shift ;
307                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
308                 do_report=true
309                 ;;
310         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
311         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
312
313         -*)     usage ;;
314         *)      # not an argument, we've got tests now.
315                 have_test_arg=true ;;
316         esac
317
318         # if we've found a test specification, the break out of the processing
319         # loop before we shift the arguments so that this is the first argument
320         # that we process in the test arg loop below.
321         if $have_test_arg; then
322                 break;
323         fi
324
325         shift
326 done
327
328 # we need common/rc, that also sources common/config. We need to source it
329 # after processing args, overlay needs FSTYP set before sourcing common/config
330 if ! . ./common/rc; then
331         echo "check: failed to source common/rc"
332         exit 1
333 fi
334
335 if [ -n "$subdir_xfile" ]; then
336         for d in $SRC_GROUPS $FSTYP; do
337                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
338                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
339                         echo $d/$f >> $tmp.xlist
340                 done
341         done
342 fi
343
344 # Process tests from command line now.
345 if $have_test_arg; then
346         while [ $# -gt 0 ]; do
347                 case "$1" in
348                 -*)     echo "Arguments before tests, please!"
349                         status=1
350                         exit $status
351                         ;;
352                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
353                         list=$(cd $SRC_DIR; echo $1)
354                         for t in $list; do
355                                 test_dir=`dirname $t`
356                                 test_dir=${test_dir#$SRC_DIR/*}
357                                 test_name=`basename $t`
358                                 group_file=$SRC_DIR/$test_dir/group
359
360                                 if egrep -q "^$test_name" $group_file; then
361                                         # in group file ... OK
362                                         echo $SRC_DIR/$test_dir/$test_name \
363                                                 >>$tmp.arglist
364                                 else
365                                         # oops
366                                         echo "$t - unknown test, ignored"
367                                 fi
368                         done
369                         ;;
370                 esac
371
372                 shift
373         done
374 elif [ -z "$GROUP_LIST" ]; then
375         # default group list is the auto group. If any other group or test is
376         # specified, we use that instead.
377         GROUP_LIST="auto"
378 fi
379
380 if [ `id -u` -ne 0 ]
381 then
382     echo "check: QA must be run as root"
383     exit 1
384 fi
385
386 _wipe_counters()
387 {
388         n_try="0"
389         n_bad="0"
390         n_notrun="0"
391         unset try notrun bad
392 }
393
394 _wrapup()
395 {
396         seq="check"
397         check="$RESULT_BASE/check"
398
399         if $showme; then
400                 if $needwrap; then
401                         if $do_report; then
402                                 _make_section_report
403                         fi
404                         needwrap=false
405                 fi
406         elif $needwrap; then
407                 if [ -f $check.time -a -f $tmp.time ]; then
408                         cat $check.time $tmp.time  \
409                                 | $AWK_PROG '
410                                 { t[$1] = $2 }
411                                 END {
412                                         if (NR > 0) {
413                                                 for (i in t) print i " " t[i]
414                                         }
415                                 }' \
416                                 | sort -n >$tmp.out
417                         mv $tmp.out $check.time
418                 fi
419
420                 echo "" >>$check.log
421                 date >>$check.log
422
423                 echo "SECTION       -- $section" >>$tmp.summary
424                 echo "=========================" >>$tmp.summary
425                 if [ ! -z "$n_try" -a $n_try != 0 ]; then
426                         if [ $brief_test_summary == "false" ]; then
427                                 echo "Ran:$try"
428                                 echo "Ran:$try" >>$tmp.summary
429                         fi
430                         echo "Ran:$try" >>$check.log
431                 fi
432
433                 $interrupt && echo "Interrupted!" | tee -a $check.log
434
435                 if [ ! -z "$notrun" ]; then
436                         if [ $brief_test_summary == "false" ]; then
437                                 echo "Not run:$notrun"
438                                 echo "Not run:$notrun" >>$tmp.summary
439                         fi
440                         echo "Not run:$notrun" >>$check.log
441                 fi
442
443                 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
444                         echo "Failures:$bad"
445                         echo "Failed $n_bad of $n_try tests"
446                         echo "Failures:$bad" >>$check.log
447                         echo "Failed $n_bad of $n_try tests" >>$check.log
448                         echo "Failures:$bad" >>$tmp.summary
449                         echo "Failed $n_bad of $n_try tests" >>$tmp.summary
450                 else
451                         echo "Passed all $n_try tests"
452                         echo "Passed all $n_try tests" >>$check.log
453                         echo "Passed all $n_try tests" >>$tmp.summary
454                 fi
455                 echo "" >>$tmp.summary
456                 if $do_report; then
457                         _make_section_report
458                 fi
459                 needwrap=false
460         fi
461
462         sum_bad=`expr $sum_bad + $n_bad`
463         _wipe_counters
464         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
465         if ! $OPTIONS_HAVE_SECTIONS; then
466                 rm -f $tmp.*
467         fi
468 }
469
470 _summary()
471 {
472         _wrapup
473         if $showme; then
474                 :
475         elif $needsum; then
476                 count=`wc -L $tmp.summary | cut -f1 -d" "`
477                 cat $tmp.summary
478                 needsum=false
479         fi
480         rm -f $tmp.*
481 }
482
483 _check_filesystems()
484 {
485         if [ -f ${RESULT_DIR}/require_test ]; then
486                 _check_test_fs || err=true
487                 rm -f ${RESULT_DIR}/require_test*
488         else
489                 _test_unmount 2> /dev/null
490         fi
491         if [ -f ${RESULT_DIR}/require_scratch ]; then
492                 _check_scratch_fs || err=true
493                 rm -f ${RESULT_DIR}/require_scratch*
494         fi
495         _scratch_unmount 2> /dev/null
496 }
497
498 _expunge_test()
499 {
500         local TEST_ID="$1"
501         if [ -s $tmp.xlist ]; then
502                 if grep -q $TEST_ID $tmp.xlist; then
503                         echo "       [expunged]"
504                         return 1
505                 fi
506         fi
507         return 0
508 }
509
510 # Make the check script unattractive to the OOM killer...
511 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
512 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
513
514 # ...and make the tests themselves somewhat more attractive to it, so that if
515 # the system runs out of memory it'll be the test that gets killed and not the
516 # test framework.
517 _run_seq() {
518         bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
519 }
520
521 _detect_kmemleak
522 _prepare_test_list
523
524 if $OPTIONS_HAVE_SECTIONS; then
525         trap "_summary; exit \$status" 0 1 2 3 15
526 else
527         trap "_wrapup; exit \$status" 0 1 2 3 15
528 fi
529
530 function run_section()
531 {
532         local section=$1
533
534         OLD_FSTYP=$FSTYP
535         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
536         get_next_config $section
537
538         # Do we need to run only some sections ?
539         if [ ! -z "$RUN_SECTION" ]; then
540                 skip=true
541                 for s in $RUN_SECTION; do
542                         if [ $section == $s ]; then
543                                 skip=false
544                                 break;
545                         fi
546                 done
547                 if $skip; then
548                         return
549                 fi
550         fi
551
552         # Did this section get excluded?
553         if [ ! -z "$EXCLUDE_SECTION" ]; then
554                 skip=false
555                 for s in $EXCLUDE_SECTION; do
556                         if [ $section == $s ]; then
557                                 skip=true
558                                 break;
559                         fi
560                 done
561                 if $skip; then
562                         return
563                 fi
564         fi
565
566         mkdir -p $RESULT_BASE
567         if [ ! -d $RESULT_BASE ]; then
568                 echo "failed to create results directory $RESULT_BASE"
569                 status=1
570                 exit
571         fi
572
573         if $OPTIONS_HAVE_SECTIONS; then
574                 echo "SECTION       -- $section"
575         fi
576
577         sect_start=`_wallclock`
578         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
579                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
580                 _test_unmount 2> /dev/null
581                 if ! _test_mkfs >$tmp.err 2>&1
582                 then
583                         echo "our local _test_mkfs routine ..."
584                         cat $tmp.err
585                         echo "check: failed to mkfs \$TEST_DEV using specified options"
586                         status=1
587                         exit
588                 fi
589                 if ! _test_mount
590                 then
591                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
592                         status=1
593                         exit
594                 fi
595                 _prepare_test_list
596         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
597                 _test_unmount 2> /dev/null
598                 if ! _test_mount
599                 then
600                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
601                         status=1
602                         exit
603                 fi
604         fi
605
606         init_rc
607
608         seq="check"
609         check="$RESULT_BASE/check"
610
611         # don't leave old full output behind on a clean run
612         rm -f $check.full
613
614         [ -f $check.time ] || touch $check.time
615
616         # print out our test configuration
617         echo "FSTYP         -- `_full_fstyp_details`"
618         echo "PLATFORM      -- `_full_platform_details`"
619         if [ ! -z "$SCRATCH_DEV" ]; then
620           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
621           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
622         fi
623         echo
624         needwrap=true
625
626         if [ ! -z "$SCRATCH_DEV" ]; then
627           _scratch_unmount 2> /dev/null
628           # call the overridden mkfs - make sure the FS is built
629           # the same as we'll create it later.
630
631           if ! _scratch_mkfs >$tmp.err 2>&1
632           then
633               echo "our local _scratch_mkfs routine ..."
634               cat $tmp.err
635               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
636               status=1
637               exit
638           fi
639
640           # call the overridden mount - make sure the FS mounts with
641           # the same options that we'll mount with later.
642           if ! _try_scratch_mount >$tmp.err 2>&1
643           then
644               echo "our local mount routine ..."
645               cat $tmp.err
646               echo "check: failed to mount \$SCRATCH_DEV using specified options"
647               status=1
648               exit
649           else
650               _scratch_unmount
651           fi
652         fi
653
654         seqres="$check"
655         _check_test_fs
656
657         err=false
658         first_test=true
659         prev_seq=""
660         for seq in $list ; do
661                 # Run report for previous test!
662                 if $err ; then
663                         bad="$bad $seqnum"
664                         n_bad=`expr $n_bad + 1`
665                         tc_status="fail"
666                 fi
667                 if $do_report && ! $first_test ; then
668                         if [ $tc_status != "expunge" ] ; then
669                                 _make_testcase_report "$prev_seq" "$tc_status"
670                         fi
671                 fi
672                 first_test=false
673
674                 err=false
675                 prev_seq="$seq"
676                 if [ ! -f $seq ]; then
677                         # Try to get full name in case the user supplied only
678                         # seq id and the test has a name. A bit of hassle to
679                         # find really the test and not its sample output or
680                         # helping files.
681                         bname=$(basename $seq)
682                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
683                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
684                                      END { print shortest }')
685                         if [ -f $full_seq ] && \
686                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
687                                 seq=$full_seq
688                         fi
689                 fi
690
691                 # the filename for the test and the name output are different.
692                 # we don't include the tests/ directory in the name output.
693                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
694
695                 # Similarly, the result directory needs to replace the tests/
696                 # part of the test location.
697                 group=`dirname $seq`
698                 if $OPTIONS_HAVE_SECTIONS; then
699                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
700                         REPORT_DIR="$RESULT_BASE/$section"
701                 else
702                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
703                         REPORT_DIR="$RESULT_BASE"
704                 fi
705                 seqres="$REPORT_DIR/$seqnum"
706
707                 mkdir -p $RESULT_DIR
708                 rm -f ${RESULT_DIR}/require_scratch*
709                 rm -f ${RESULT_DIR}/require_test*
710                 echo -n "$seqnum"
711
712                 if $showme; then
713                         _expunge_test $seqnum
714                         if [ $? -eq 1 ]; then
715                             tc_status="expunge"
716                             continue
717                         fi
718                         echo
719                         start=0
720                         stop=0
721                         tc_status="list"
722                         n_notrun=`expr $n_notrun + 1`
723                         continue
724                 fi
725
726                 tc_status="pass"
727                 if [ ! -f $seq ]; then
728                         echo " - no such test?"
729                         continue
730                 fi
731
732                 # really going to try and run this one
733                 rm -f $seqres.out.bad
734
735                 # check if we really should run it
736                 _expunge_test $seqnum
737                 if [ $? -eq 1 ]; then
738                         tc_status="expunge"
739                         continue
740                 fi
741
742                 # record that we really tried to run this test.
743                 try="$try $seqnum"
744                 n_try=`expr $n_try + 1`
745
746                 # slashes now in names, sed barfs on them so use grep
747                 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
748                 if [ "X$lasttime" != X ]; then
749                         echo -n " ${lasttime}s ... "
750                 else
751                         echo -n "       " # prettier output with timestamps.
752                 fi
753                 rm -f core $seqres.notrun
754
755                 start=`_wallclock`
756                 $timestamp && echo -n " ["`date "+%T"`"]"
757                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
758                 $LOGGER_PROG "run xfstest $seqnum"
759                 if [ -w /dev/kmsg ]; then
760                         export date_time=`date +"%F %T"`
761                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
762                         # _check_dmesg depends on this log in dmesg
763                         touch ${RESULT_DIR}/check_dmesg
764                 fi
765                 _try_wipe_scratch_devs > /dev/null 2>&1
766
767                 # clear the WARN_ONCE state to allow a potential problem
768                 # to be reported for each test
769                 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
770
771                 if [ "$DUMP_OUTPUT" = true ]; then
772                         _run_seq 2>&1 | tee $tmp.out
773                         # Because $? would get tee's return code
774                         sts=${PIPESTATUS[0]}
775                 else
776                         _run_seq >$tmp.out 2>&1
777                         sts=$?
778                 fi
779
780                 if [ -f core ]; then
781                         _dump_err_cont "[dumped core]"
782                         mv core $RESULT_BASE/$seqnum.core
783                         err=true
784                 fi
785
786                 if [ -f $seqres.notrun ]; then
787                         $timestamp && _timestamp
788                         stop=`_wallclock`
789                         $timestamp || echo -n "[not run] "
790                         $timestamp && echo " [not run]" && \
791                                       echo -n " $seqnum -- "
792                         cat $seqres.notrun
793                         notrun="$notrun $seqnum"
794                         n_notrun=`expr $n_notrun + 1`
795                         tc_status="notrun"
796                         continue;
797                 fi
798
799                 if [ $sts -ne 0 ]; then
800                         _dump_err_cont "[failed, exit status $sts]"
801                         _test_unmount 2> /dev/null
802                         _scratch_unmount 2> /dev/null
803                         rm -f ${RESULT_DIR}/require_test*
804                         rm -f ${RESULT_DIR}/require_scratch*
805                         err=true
806                 else
807                         # the test apparently passed, so check for corruption
808                         # and log messages that shouldn't be there.
809                         _check_filesystems
810                         _check_dmesg || err=true
811                 fi
812
813                 # Scan for memory leaks after every test so that associating
814                 # a leak to a particular test will be as accurate as possible.
815                 _check_kmemleak || err=true
816
817                 # test ends after all checks are done.
818                 $timestamp && _timestamp
819                 stop=`_wallclock`
820
821                 if [ ! -f $seq.out ]; then
822                         _dump_err "no qualified output"
823                         err=true
824                         continue;
825                 fi
826
827                 # coreutils 8.16+ changed quote formats in error messages
828                 # from `foo' to 'foo'. Filter old versions to match the new
829                 # version.
830                 sed -i "s/\`/\'/g" $tmp.out
831                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
832                         if ! $err ; then
833                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
834                                 echo -n " `expr $stop - $start`s"
835                         fi
836                         echo ""
837                 else
838                         _dump_err "- output mismatch (see $seqres.out.bad)"
839                         mv $tmp.out $seqres.out.bad
840                         $diff $seq.out $seqres.out.bad | {
841                         if test "$DIFF_LENGTH" -le 0; then
842                                 cat
843                         else
844                                 head -n "$DIFF_LENGTH"
845                                 echo "..."
846                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
847                                         " to see the entire diff)"
848                         fi; } | sed -e 's/^\(.\)/    \1/'
849                         err=true
850                 fi
851         done
852
853         # make sure we record the status of the last test we ran.
854         if $err ; then
855                 bad="$bad $seqnum"
856                 n_bad=`expr $n_bad + 1`
857                 tc_status="fail"
858         fi
859         if $do_report && ! $first_test ; then
860                 if [ $tc_status != "expunge" ] ; then
861                         _make_testcase_report "$prev_seq" "$tc_status"
862                 fi
863         fi
864
865         sect_stop=`_wallclock`
866         interrupt=false
867         _wrapup
868         interrupt=true
869         echo
870
871         _test_unmount 2> /dev/null
872         _scratch_unmount 2> /dev/null
873 }
874
875 for ((iters = 0; iters < $iterations; iters++)) do
876         for section in $HOST_OPTIONS_SECTIONS; do
877                 run_section $section
878         done
879 done
880
881 interrupt=false
882 status=`expr $sum_bad != 0`
883 exit