check: source common/rc again if TEST_DEV was recreated
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 n_try=0
12 try=""
13 n_bad=0
14 sum_bad=0
15 bad=""
16 n_notrun=0
17 notrun=""
18 interrupt=true
19 diff="diff -u"
20 showme=false
21 have_test_arg=false
22 randomize=false
23 export here=`pwd`
24 xfile=""
25 subdir_xfile=""
26 brief_test_summary=false
27 do_report=false
28 DUMP_OUTPUT=false
29 iterations=1
30
31 # This is a global variable used to pass test failure text to reporting gunk
32 _err_msg=""
33
34 # start the initialisation work now
35 iam=check
36
37 export MSGVERB="text:action"
38 export QA_CHECK_FS=${QA_CHECK_FS:=true}
39
40 # number of diff lines from a failed test, 0 for whole output
41 export DIFF_LENGTH=${DIFF_LENGTH:=10}
42
43 # by default don't output timestamps
44 timestamp=${TIMESTAMP:=false}
45
46 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
47
48 SRC_GROUPS="generic shared"
49 export SRC_DIR="tests"
50
51 usage()
52 {
53     echo "Usage: $0 [options] [testlist]"'
54
55 check options
56     -nfs                test NFS
57     -glusterfs          test GlusterFS
58     -cifs               test CIFS
59     -9p                 test 9p
60     -virtiofs           test virtiofs
61     -overlay            test overlay
62     -pvfs2              test PVFS2
63     -tmpfs              test TMPFS
64     -ubifs              test ubifs
65     -l                  line mode diff
66     -udiff              show unified diff (default)
67     -n                  show me, do not run tests
68     -T                  output timestamps
69     -r                  randomize test order
70     -i <n>              iterate the test list <n> times
71     -d                  dump test output to stdout
72     -b                  brief test summary
73     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
74     --large-fs          optimise scratch device for large filesystems
75     -s section          run only specified section from config file
76     -S section          exclude the specified section from the config file
77
78 testlist options
79     -g group[,group...] include tests from these groups
80     -x group[,group...] exclude tests from these groups
81     -X exclude_file     exclude individual tests
82     -E external_file    exclude individual tests
83     [testlist]          include tests matching names in testlist
84
85 testlist argument is a list of tests in the form of <test dir>/<test name>.
86
87 <test dir> is a directory under tests that contains a group file,
88 with a list of the names of the tests in that directory.
89
90 <test name> may be either a specific test file name (e.g. xfs/001) or
91 a test file name match pattern (e.g. xfs/*).
92
93 group argument is either a name of a tests group to collect from all
94 the test dirs (e.g. quick) or a name of a tests group to collect from
95 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
96 If you want to run all the tests in the test suite, use "-g all" to specify all
97 groups.
98
99 exclude_file argument refers to a name of a file inside each test directory.
100 for every test dir where this file is found, the listed test names are
101 excluded from the list of tests to run from that test dir.
102
103 external_file argument is a path to a single file containing a list of tests
104 to exclude in the form of <test dir>/<test name>.
105
106 examples:
107  check xfs/001
108  check -g quick
109  check -g xfs/quick
110  check -x stress xfs/*
111  check -X .exclude -g auto
112  check -E ~/.xfstests.exclude
113 '
114             exit 0
115 }
116
117 get_sub_group_list()
118 {
119         local d=$1
120         local grp=$2
121
122         test -s "$SRC_DIR/$d/group" || return 1
123
124         local grpl=$(sed -n < $SRC_DIR/$d/group \
125                 -e 's/#.*//' \
126                 -e 's/$/ /' \
127                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
128         echo $grpl
129 }
130
131 get_group_list()
132 {
133         local grp=$1
134         local grpl=""
135         local sub=$(dirname $grp)
136         local fsgroup="$FSTYP"
137
138         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
139                 # group is given as <subdir>/<group> (e.g. xfs/quick)
140                 grp=$(basename $grp)
141                 get_sub_group_list $sub $grp
142                 return
143         fi
144
145         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
146             fsgroup=ext4
147         fi
148         for d in $SRC_GROUPS $fsgroup; do
149                 if ! test -d "$SRC_DIR/$d" ; then
150                         continue
151                 fi
152                 grpl="$grpl $(get_sub_group_list $d $grp)"
153         done
154         echo $grpl
155 }
156
157 # Find all tests, excluding files that are test metadata such as group files.
158 # It matches test names against $VALID_TEST_NAME defined in common/rc
159 get_all_tests()
160 {
161         touch $tmp.list
162         for d in $SRC_GROUPS $FSTYP; do
163                 if ! test -d "$SRC_DIR/$d" ; then
164                         continue
165                 fi
166                 ls $SRC_DIR/$d/* | \
167                         grep -v "\..*" | \
168                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
169                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
170         done
171 }
172
173 # takes the list of tests to run in $tmp.list, and removes the tests passed to
174 # the function from that list.
175 trim_test_list()
176 {
177         test_list="$*"
178
179         rm -f $tmp.grep
180         numsed=0
181         for t in $test_list
182         do
183             if [ $numsed -gt 100 ]; then
184                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
185                 mv $tmp.tmp $tmp.list
186                 numsed=0
187                 rm -f $tmp.grep
188             fi
189             echo "^$t\$" >>$tmp.grep
190             numsed=`expr $numsed + 1`
191         done
192         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
193         mv $tmp.tmp $tmp.list
194         rm -f $tmp.grep
195 }
196
197
198 _wallclock()
199 {
200     date "+%s"
201 }
202
203 _timestamp()
204 {
205     now=`date "+%T"`
206     echo -n " [$now]"
207 }
208
209 _prepare_test_list()
210 {
211         unset list
212         # Tests specified on the command line
213         if [ -s $tmp.arglist ]; then
214                 cat $tmp.arglist > $tmp.list
215         else
216                 touch $tmp.list
217         fi
218
219         # Specified groups to include
220         # Note that the CLI processing adds a leading space to the first group
221         # parameter, so we have to catch that here checking for "all"
222         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
223                 # no test numbers, do everything
224                 get_all_tests
225         else
226                 for group in $GROUP_LIST; do
227                         list=$(get_group_list $group)
228                         if [ -z "$list" ]; then
229                                 echo "Group \"$group\" is empty or not defined?"
230                                 exit 1
231                         fi
232
233                         for t in $list; do
234                                 grep -s "^$t\$" $tmp.list >/dev/null || \
235                                                         echo "$t" >>$tmp.list
236                         done
237                 done
238         fi
239
240         # Specified groups to exclude
241         for xgroup in $XGROUP_LIST; do
242                 list=$(get_group_list $xgroup)
243                 if [ -z "$list" ]; then
244                         echo "Group \"$xgroup\" is empty or not defined?"
245                         exit 1
246                 fi
247
248                 trim_test_list $list
249         done
250
251         # sort the list of tests into numeric order
252         if $randomize; then
253                 if type shuf >& /dev/null; then
254                         sorter="shuf"
255                 else
256                         sorter="awk -v seed=$RANDOM -f randomize.awk"
257                 fi
258         else
259                 sorter="cat"
260         fi
261         list=`sort -n $tmp.list | uniq | $sorter`
262         rm -f $tmp.list
263 }
264
265 # Process command arguments first.
266 while [ $# -gt 0 ]; do
267         case "$1" in
268         -\? | -h | --help) usage ;;
269
270         -nfs)           FSTYP=nfs ;;
271         -glusterfs)     FSTYP=glusterfs ;;
272         -cifs)          FSTYP=cifs ;;
273         -9p)            FSTYP=9p ;;
274         -virtiofs)      FSTYP=virtiofs ;;
275         -overlay)       FSTYP=overlay; export OVERLAY=true ;;
276         -pvfs2)         FSTYP=pvfs2 ;;
277         -tmpfs)         FSTYP=tmpfs ;;
278         -ubifs)         FSTYP=ubifs ;;
279
280         -g)     group=$2 ; shift ;
281                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
282                 ;;
283
284         -x)     xgroup=$2 ; shift ;
285                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
286                 ;;
287
288         -X)     subdir_xfile=$2; shift ;
289                 ;;
290         -E)     xfile=$2; shift ;
291                 if [ -f $xfile ]; then
292                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
293                 fi
294                 ;;
295         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
296         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
297         -l)     diff="diff" ;;
298         -udiff) diff="$diff -u" ;;
299
300         -n)     showme=true ;;
301         -r)     randomize=true ;;
302         -i)     iterations=$2; shift ;;
303         -T)     timestamp=true ;;
304         -d)     DUMP_OUTPUT=true ;;
305         -b)     brief_test_summary=true;;
306         -R)     report_fmt=$2 ; shift ;
307                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
308                 do_report=true
309                 ;;
310         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
311         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
312
313         -*)     usage ;;
314         *)      # not an argument, we've got tests now.
315                 have_test_arg=true ;;
316         esac
317
318         # if we've found a test specification, the break out of the processing
319         # loop before we shift the arguments so that this is the first argument
320         # that we process in the test arg loop below.
321         if $have_test_arg; then
322                 break;
323         fi
324
325         shift
326 done
327
328 # we need common/rc, that also sources common/config. We need to source it
329 # after processing args, overlay needs FSTYP set before sourcing common/config
330 if ! . ./common/rc; then
331         echo "check: failed to source common/rc"
332         exit 1
333 fi
334
335 if [ -n "$subdir_xfile" ]; then
336         for d in $SRC_GROUPS $FSTYP; do
337                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
338                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
339                         echo $d/$f >> $tmp.xlist
340                 done
341         done
342 fi
343
344 # Process tests from command line now.
345 if $have_test_arg; then
346         while [ $# -gt 0 ]; do
347                 case "$1" in
348                 -*)     echo "Arguments before tests, please!"
349                         status=1
350                         exit $status
351                         ;;
352                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
353                         list=$(cd $SRC_DIR; echo $1)
354                         for t in $list; do
355                                 test_dir=`dirname $t`
356                                 test_dir=${test_dir#$SRC_DIR/*}
357                                 test_name=`basename $t`
358                                 group_file=$SRC_DIR/$test_dir/group
359
360                                 if egrep -q "^$test_name" $group_file; then
361                                         # in group file ... OK
362                                         echo $SRC_DIR/$test_dir/$test_name \
363                                                 >>$tmp.arglist
364                                 else
365                                         # oops
366                                         echo "$t - unknown test, ignored"
367                                 fi
368                         done
369                         ;;
370                 esac
371
372                 shift
373         done
374 elif [ -z "$GROUP_LIST" ]; then
375         # default group list is the auto group. If any other group or test is
376         # specified, we use that instead.
377         GROUP_LIST="auto"
378 fi
379
380 if [ `id -u` -ne 0 ]
381 then
382     echo "check: QA must be run as root"
383     exit 1
384 fi
385
386 _wipe_counters()
387 {
388         n_try="0"
389         n_bad="0"
390         n_notrun="0"
391         unset try notrun bad
392 }
393
394 _global_log() {
395         echo "$1" >> $check.log
396         if $OPTIONS_HAVE_SECTIONS; then
397                 echo "$1" >> ${REPORT_DIR}/check.log
398         fi
399 }
400
401 _wrapup()
402 {
403         seq="check"
404         check="$RESULT_BASE/check"
405
406         if $showme; then
407                 if $needwrap; then
408                         if $do_report; then
409                                 _make_section_report
410                         fi
411                         needwrap=false
412                 fi
413         elif $needwrap; then
414                 if [ -f $check.time -a -f $tmp.time ]; then
415                         cat $check.time $tmp.time  \
416                                 | $AWK_PROG '
417                                 { t[$1] = $2 }
418                                 END {
419                                         if (NR > 0) {
420                                                 for (i in t) print i " " t[i]
421                                         }
422                                 }' \
423                                 | sort -n >$tmp.out
424                         mv $tmp.out $check.time
425                         if $OPTIONS_HAVE_SECTIONS; then
426                                 cp $check.time ${REPORT_DIR}/check.time
427                         fi
428                 fi
429
430                 _global_log ""
431                 _global_log "$(date)"
432
433                 echo "SECTION       -- $section" >>$tmp.summary
434                 echo "=========================" >>$tmp.summary
435                 if [ ! -z "$n_try" -a $n_try != 0 ]; then
436                         if [ $brief_test_summary == "false" ]; then
437                                 echo "Ran:$try"
438                                 echo "Ran:$try" >>$tmp.summary
439                         fi
440                         _global_log "Ran:$try"
441                 fi
442
443                 $interrupt && echo "Interrupted!" | tee -a $check.log
444                 if $OPTIONS_HAVE_SECTIONS; then
445                         $interrupt && echo "Interrupted!" | tee -a \
446                                 ${REPORT_DIR}/check.log
447                 fi
448
449                 if [ ! -z "$notrun" ]; then
450                         if [ $brief_test_summary == "false" ]; then
451                                 echo "Not run:$notrun"
452                                 echo "Not run:$notrun" >>$tmp.summary
453                         fi
454                         _global_log "Not run:$notrun"
455                 fi
456
457                 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
458                         echo "Failures:$bad"
459                         echo "Failed $n_bad of $n_try tests"
460                         _global_log "Failures:$bad"
461                         _global_log "Failed $n_bad of $n_try tests"
462                         echo "Failures:$bad" >>$tmp.summary
463                         echo "Failed $n_bad of $n_try tests" >>$tmp.summary
464                 else
465                         echo "Passed all $n_try tests"
466                         _global_log "Passed all $n_try tests"
467                         echo "Passed all $n_try tests" >>$tmp.summary
468                 fi
469                 echo "" >>$tmp.summary
470                 if $do_report; then
471                         _make_section_report
472                 fi
473                 needwrap=false
474         fi
475
476         sum_bad=`expr $sum_bad + $n_bad`
477         _wipe_counters
478         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
479         if ! $OPTIONS_HAVE_SECTIONS; then
480                 rm -f $tmp.*
481         fi
482 }
483
484 _summary()
485 {
486         _wrapup
487         if $showme; then
488                 :
489         elif $needsum; then
490                 count=`wc -L $tmp.summary | cut -f1 -d" "`
491                 cat $tmp.summary
492                 needsum=false
493         fi
494         rm -f $tmp.*
495 }
496
497 _check_filesystems()
498 {
499         if [ -f ${RESULT_DIR}/require_test ]; then
500                 _check_test_fs || err=true
501                 rm -f ${RESULT_DIR}/require_test*
502         else
503                 _test_unmount 2> /dev/null
504         fi
505         if [ -f ${RESULT_DIR}/require_scratch ]; then
506                 _check_scratch_fs || err=true
507                 rm -f ${RESULT_DIR}/require_scratch*
508         fi
509         _scratch_unmount 2> /dev/null
510 }
511
512 _expunge_test()
513 {
514         local TEST_ID="$1"
515         if [ -s $tmp.xlist ]; then
516                 if grep -q $TEST_ID $tmp.xlist; then
517                         echo "       [expunged]"
518                         return 1
519                 fi
520         fi
521         return 0
522 }
523
524 # Can we run systemd scopes?
525 HAVE_SYSTEMD_SCOPES=
526 systemctl reset-failed "fstests-check" &>/dev/null
527 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
528 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
529
530 # Make the check script unattractive to the OOM killer...
531 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
532 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
533
534 # ...and make the tests themselves somewhat more attractive to it, so that if
535 # the system runs out of memory it'll be the test that gets killed and not the
536 # test framework.
537 #
538 # If systemd is available, run the entire test script in a scope so that we can
539 # kill all subprocesses of the test if it fails to clean up after itself.  This
540 # is essential for ensuring that the post-test unmount succeeds.  Note that
541 # systemd doesn't automatically remove transient scopes that fail to terminate
542 # when systemd tells them to terminate (e.g. programs stuck in D state when
543 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
544 _run_seq() {
545         local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
546
547         if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
548                 local unit="$(systemd-escape "fs$seq").scope"
549                 systemctl reset-failed "${unit}" &> /dev/null
550                 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
551                 res=$?
552                 systemctl stop "${unit}" &> /dev/null
553                 return "${res}"
554         else
555                 "${cmd[@]}"
556         fi
557 }
558
559 _detect_kmemleak
560 _prepare_test_list
561
562 if $OPTIONS_HAVE_SECTIONS; then
563         trap "_summary; exit \$status" 0 1 2 3 15
564 else
565         trap "_wrapup; exit \$status" 0 1 2 3 15
566 fi
567
568 function run_section()
569 {
570         local section=$1
571
572         OLD_FSTYP=$FSTYP
573         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
574         get_next_config $section
575
576         # Do we need to run only some sections ?
577         if [ ! -z "$RUN_SECTION" ]; then
578                 skip=true
579                 for s in $RUN_SECTION; do
580                         if [ $section == $s ]; then
581                                 skip=false
582                                 break;
583                         fi
584                 done
585                 if $skip; then
586                         return
587                 fi
588         fi
589
590         # Did this section get excluded?
591         if [ ! -z "$EXCLUDE_SECTION" ]; then
592                 skip=false
593                 for s in $EXCLUDE_SECTION; do
594                         if [ $section == $s ]; then
595                                 skip=true
596                                 break;
597                         fi
598                 done
599                 if $skip; then
600                         return
601                 fi
602         fi
603
604         mkdir -p $RESULT_BASE
605         if [ ! -d $RESULT_BASE ]; then
606                 echo "failed to create results directory $RESULT_BASE"
607                 status=1
608                 exit
609         fi
610
611         if $OPTIONS_HAVE_SECTIONS; then
612                 echo "SECTION       -- $section"
613         fi
614
615         sect_start=`_wallclock`
616         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
617                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
618                 _test_unmount 2> /dev/null
619                 if ! _test_mkfs >$tmp.err 2>&1
620                 then
621                         echo "our local _test_mkfs routine ..."
622                         cat $tmp.err
623                         echo "check: failed to mkfs \$TEST_DEV using specified options"
624                         status=1
625                         exit
626                 fi
627                 if ! _test_mount
628                 then
629                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
630                         status=1
631                         exit
632                 fi
633                 # TEST_DEV has been recreated, previous FSTYP derived from
634                 # TEST_DEV could be changed, source common/rc again with
635                 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
636                 . common/rc
637                 _prepare_test_list
638         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
639                 _test_unmount 2> /dev/null
640                 if ! _test_mount
641                 then
642                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
643                         status=1
644                         exit
645                 fi
646         fi
647
648         init_rc
649
650         seq="check"
651         check="$RESULT_BASE/check"
652
653         # don't leave old full output behind on a clean run
654         rm -f $check.full
655
656         [ -f $check.time ] || touch $check.time
657
658         # print out our test configuration
659         echo "FSTYP         -- `_full_fstyp_details`"
660         echo "PLATFORM      -- `_full_platform_details`"
661         if [ ! -z "$SCRATCH_DEV" ]; then
662           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
663           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
664         fi
665         echo
666         needwrap=true
667
668         if [ ! -z "$SCRATCH_DEV" ]; then
669           _scratch_unmount 2> /dev/null
670           # call the overridden mkfs - make sure the FS is built
671           # the same as we'll create it later.
672
673           if ! _scratch_mkfs >$tmp.err 2>&1
674           then
675               echo "our local _scratch_mkfs routine ..."
676               cat $tmp.err
677               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
678               status=1
679               exit
680           fi
681
682           # call the overridden mount - make sure the FS mounts with
683           # the same options that we'll mount with later.
684           if ! _try_scratch_mount >$tmp.err 2>&1
685           then
686               echo "our local mount routine ..."
687               cat $tmp.err
688               echo "check: failed to mount \$SCRATCH_DEV using specified options"
689               status=1
690               exit
691           else
692               _scratch_unmount
693           fi
694         fi
695
696         seqres="$check"
697         _check_test_fs
698
699         err=false
700         first_test=true
701         prev_seq=""
702         for seq in $list ; do
703                 # Run report for previous test!
704                 if $err ; then
705                         bad="$bad $seqnum"
706                         n_bad=`expr $n_bad + 1`
707                         tc_status="fail"
708                 fi
709                 if $do_report && ! $first_test ; then
710                         if [ $tc_status != "expunge" ] ; then
711                                 _make_testcase_report "$prev_seq" "$tc_status"
712                         fi
713                 fi
714                 first_test=false
715
716                 err=false
717                 prev_seq="$seq"
718                 if [ ! -f $seq ]; then
719                         # Try to get full name in case the user supplied only
720                         # seq id and the test has a name. A bit of hassle to
721                         # find really the test and not its sample output or
722                         # helping files.
723                         bname=$(basename $seq)
724                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
725                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
726                                      END { print shortest }')
727                         if [ -f $full_seq ] && \
728                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
729                                 seq=$full_seq
730                         fi
731                 fi
732
733                 # the filename for the test and the name output are different.
734                 # we don't include the tests/ directory in the name output.
735                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
736
737                 # Similarly, the result directory needs to replace the tests/
738                 # part of the test location.
739                 group=`dirname $seq`
740                 if $OPTIONS_HAVE_SECTIONS; then
741                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
742                         REPORT_DIR="$RESULT_BASE/$section"
743                 else
744                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
745                         REPORT_DIR="$RESULT_BASE"
746                 fi
747                 seqres="$REPORT_DIR/$seqnum"
748
749                 mkdir -p $RESULT_DIR
750                 rm -f ${RESULT_DIR}/require_scratch*
751                 rm -f ${RESULT_DIR}/require_test*
752                 echo -n "$seqnum"
753
754                 if $showme; then
755                         _expunge_test $seqnum
756                         if [ $? -eq 1 ]; then
757                             tc_status="expunge"
758                             continue
759                         fi
760                         echo
761                         start=0
762                         stop=0
763                         tc_status="list"
764                         n_notrun=`expr $n_notrun + 1`
765                         continue
766                 fi
767
768                 tc_status="pass"
769                 if [ ! -f $seq ]; then
770                         echo " - no such test?"
771                         continue
772                 fi
773
774                 # really going to try and run this one
775                 rm -f $seqres.out.bad
776
777                 # check if we really should run it
778                 _expunge_test $seqnum
779                 if [ $? -eq 1 ]; then
780                         tc_status="expunge"
781                         continue
782                 fi
783
784                 # record that we really tried to run this test.
785                 try="$try $seqnum"
786                 n_try=`expr $n_try + 1`
787
788                 # slashes now in names, sed barfs on them so use grep
789                 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
790                 if [ "X$lasttime" != X ]; then
791                         echo -n " ${lasttime}s ... "
792                 else
793                         echo -n "       " # prettier output with timestamps.
794                 fi
795                 rm -f core $seqres.notrun
796
797                 start=`_wallclock`
798                 $timestamp && echo -n " ["`date "+%T"`"]"
799                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
800                 $LOGGER_PROG "run xfstest $seqnum"
801                 if [ -w /dev/kmsg ]; then
802                         export date_time=`date +"%F %T"`
803                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
804                         # _check_dmesg depends on this log in dmesg
805                         touch ${RESULT_DIR}/check_dmesg
806                 fi
807                 _try_wipe_scratch_devs > /dev/null 2>&1
808
809                 # clear the WARN_ONCE state to allow a potential problem
810                 # to be reported for each test
811                 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
812
813                 if [ "$DUMP_OUTPUT" = true ]; then
814                         _run_seq 2>&1 | tee $tmp.out
815                         # Because $? would get tee's return code
816                         sts=${PIPESTATUS[0]}
817                 else
818                         _run_seq >$tmp.out 2>&1
819                         sts=$?
820                 fi
821
822                 if [ -f core ]; then
823                         _dump_err_cont "[dumped core]"
824                         mv core $RESULT_BASE/$seqnum.core
825                         err=true
826                 fi
827
828                 if [ -f $seqres.notrun ]; then
829                         $timestamp && _timestamp
830                         stop=`_wallclock`
831                         $timestamp || echo -n "[not run] "
832                         $timestamp && echo " [not run]" && \
833                                       echo -n " $seqnum -- "
834                         cat $seqres.notrun
835                         notrun="$notrun $seqnum"
836                         n_notrun=`expr $n_notrun + 1`
837                         tc_status="notrun"
838                         continue;
839                 fi
840
841                 if [ $sts -ne 0 ]; then
842                         _dump_err_cont "[failed, exit status $sts]"
843                         _test_unmount 2> /dev/null
844                         _scratch_unmount 2> /dev/null
845                         rm -f ${RESULT_DIR}/require_test*
846                         rm -f ${RESULT_DIR}/require_scratch*
847                         err=true
848                 else
849                         # the test apparently passed, so check for corruption
850                         # and log messages that shouldn't be there.
851                         _check_filesystems
852                         _check_dmesg || err=true
853                 fi
854
855                 # Reload the module after each test to check for leaks or
856                 # other problems.
857                 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
858                         _test_unmount 2> /dev/null
859                         _scratch_unmount 2> /dev/null
860                         modprobe -r fs-$FSTYP
861                         modprobe fs-$FSTYP
862                 fi
863
864                 # Scan for memory leaks after every test so that associating
865                 # a leak to a particular test will be as accurate as possible.
866                 _check_kmemleak || err=true
867
868                 # test ends after all checks are done.
869                 $timestamp && _timestamp
870                 stop=`_wallclock`
871
872                 if [ ! -f $seq.out ]; then
873                         _dump_err "no qualified output"
874                         err=true
875                         continue;
876                 fi
877
878                 # coreutils 8.16+ changed quote formats in error messages
879                 # from `foo' to 'foo'. Filter old versions to match the new
880                 # version.
881                 sed -i "s/\`/\'/g" $tmp.out
882                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
883                         if ! $err ; then
884                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
885                                 echo -n " `expr $stop - $start`s"
886                         fi
887                         echo ""
888                 else
889                         _dump_err "- output mismatch (see $seqres.out.bad)"
890                         mv $tmp.out $seqres.out.bad
891                         $diff $seq.out $seqres.out.bad | {
892                         if test "$DIFF_LENGTH" -le 0; then
893                                 cat
894                         else
895                                 head -n "$DIFF_LENGTH"
896                                 echo "..."
897                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
898                                         " to see the entire diff)"
899                         fi; } | sed -e 's/^\(.\)/    \1/'
900                         err=true
901                 fi
902         done
903
904         # make sure we record the status of the last test we ran.
905         if $err ; then
906                 bad="$bad $seqnum"
907                 n_bad=`expr $n_bad + 1`
908                 tc_status="fail"
909         fi
910         if $do_report && ! $first_test ; then
911                 if [ $tc_status != "expunge" ] ; then
912                         _make_testcase_report "$prev_seq" "$tc_status"
913                 fi
914         fi
915
916         sect_stop=`_wallclock`
917         interrupt=false
918         _wrapup
919         interrupt=true
920         echo
921
922         _test_unmount 2> /dev/null
923         _scratch_unmount 2> /dev/null
924 }
925
926 for ((iters = 0; iters < $iterations; iters++)) do
927         for section in $HOST_OPTIONS_SECTIONS; do
928                 run_section $section
929         done
930 done
931
932 interrupt=false
933 status=`expr $sum_bad != 0`
934 exit