btrfs: make sure we rescan all devices after unregistering
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 n_try=0
12 try=""
13 n_bad=0
14 sum_bad=0
15 bad=""
16 n_notrun=0
17 notrun=""
18 interrupt=true
19 diff="diff -u"
20 showme=false
21 have_test_arg=false
22 randomize=false
23 exact_order=false
24 export here=`pwd`
25 xfile=""
26 subdir_xfile=""
27 brief_test_summary=false
28 do_report=false
29 DUMP_OUTPUT=false
30 iterations=1
31
32 # This is a global variable used to pass test failure text to reporting gunk
33 _err_msg=""
34
35 # start the initialisation work now
36 iam=check
37
38 export MSGVERB="text:action"
39 export QA_CHECK_FS=${QA_CHECK_FS:=true}
40
41 # number of diff lines from a failed test, 0 for whole output
42 export DIFF_LENGTH=${DIFF_LENGTH:=10}
43
44 # by default don't output timestamps
45 timestamp=${TIMESTAMP:=false}
46
47 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
48
49 SRC_GROUPS="generic shared"
50 export SRC_DIR="tests"
51
52 usage()
53 {
54     echo "Usage: $0 [options] [testlist]"'
55
56 check options
57     -nfs                test NFS
58     -glusterfs          test GlusterFS
59     -cifs               test CIFS
60     -9p                 test 9p
61     -virtiofs           test virtiofs
62     -overlay            test overlay
63     -pvfs2              test PVFS2
64     -tmpfs              test TMPFS
65     -ubifs              test ubifs
66     -l                  line mode diff
67     -udiff              show unified diff (default)
68     -n                  show me, do not run tests
69     -T                  output timestamps
70     -r                  randomize test order
71     --exact-order       run tests in the exact order specified
72     -i <n>              iterate the test list <n> times
73     -d                  dump test output to stdout
74     -b                  brief test summary
75     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
76     --large-fs          optimise scratch device for large filesystems
77     -s section          run only specified section from config file
78     -S section          exclude the specified section from the config file
79
80 testlist options
81     -g group[,group...] include tests from these groups
82     -x group[,group...] exclude tests from these groups
83     -X exclude_file     exclude individual tests
84     -e testlist         exclude a specific list of tests
85     -E external_file    exclude individual tests
86     [testlist]          include tests matching names in testlist
87
88 testlist argument is a list of tests in the form of <test dir>/<test name>.
89
90 <test dir> is a directory under tests that contains a group file,
91 with a list of the names of the tests in that directory.
92
93 <test name> may be either a specific test file name (e.g. xfs/001) or
94 a test file name match pattern (e.g. xfs/*).
95
96 group argument is either a name of a tests group to collect from all
97 the test dirs (e.g. quick) or a name of a tests group to collect from
98 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
99 If you want to run all the tests in the test suite, use "-g all" to specify all
100 groups.
101
102 exclude_file argument refers to a name of a file inside each test directory.
103 for every test dir where this file is found, the listed test names are
104 excluded from the list of tests to run from that test dir.
105
106 external_file argument is a path to a single file containing a list of tests
107 to exclude in the form of <test dir>/<test name>.
108
109 examples:
110  check xfs/001
111  check -g quick
112  check -g xfs/quick
113  check -x stress xfs/*
114  check -X .exclude -g auto
115  check -E ~/.xfstests.exclude
116 '
117             exit 0
118 }
119
120 get_sub_group_list()
121 {
122         local d=$1
123         local grp=$2
124
125         test -s "$SRC_DIR/$d/group" || return 1
126
127         local grpl=$(sed -n < $SRC_DIR/$d/group \
128                 -e 's/#.*//' \
129                 -e 's/$/ /' \
130                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
131         echo $grpl
132 }
133
134 get_group_list()
135 {
136         local grp=$1
137         local grpl=""
138         local sub=$(dirname $grp)
139         local fsgroup="$FSTYP"
140
141         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
142                 # group is given as <subdir>/<group> (e.g. xfs/quick)
143                 grp=$(basename $grp)
144                 get_sub_group_list $sub $grp
145                 return
146         fi
147
148         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
149             fsgroup=ext4
150         fi
151         for d in $SRC_GROUPS $fsgroup; do
152                 if ! test -d "$SRC_DIR/$d" ; then
153                         continue
154                 fi
155                 grpl="$grpl $(get_sub_group_list $d $grp)"
156         done
157         echo $grpl
158 }
159
160 # Find all tests, excluding files that are test metadata such as group files.
161 # It matches test names against $VALID_TEST_NAME defined in common/rc
162 get_all_tests()
163 {
164         touch $tmp.list
165         for d in $SRC_GROUPS $FSTYP; do
166                 if ! test -d "$SRC_DIR/$d" ; then
167                         continue
168                 fi
169                 ls $SRC_DIR/$d/* | \
170                         grep -v "\..*" | \
171                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
172                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
173         done
174 }
175
176 # takes the list of tests to run in $tmp.list, and removes the tests passed to
177 # the function from that list.
178 trim_test_list()
179 {
180         test_list="$*"
181
182         rm -f $tmp.grep
183         numsed=0
184         for t in $test_list
185         do
186             if [ $numsed -gt 100 ]; then
187                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
188                 mv $tmp.tmp $tmp.list
189                 numsed=0
190                 rm -f $tmp.grep
191             fi
192             echo "^$t\$" >>$tmp.grep
193             numsed=`expr $numsed + 1`
194         done
195         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
196         mv $tmp.tmp $tmp.list
197         rm -f $tmp.grep
198 }
199
200
201 _wallclock()
202 {
203     date "+%s"
204 }
205
206 _timestamp()
207 {
208     now=`date "+%T"`
209     echo -n " [$now]"
210 }
211
212 _prepare_test_list()
213 {
214         unset list
215         # Tests specified on the command line
216         if [ -s $tmp.arglist ]; then
217                 cat $tmp.arglist > $tmp.list
218         else
219                 touch $tmp.list
220         fi
221
222         # Specified groups to include
223         # Note that the CLI processing adds a leading space to the first group
224         # parameter, so we have to catch that here checking for "all"
225         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
226                 # no test numbers, do everything
227                 get_all_tests
228         else
229                 for group in $GROUP_LIST; do
230                         list=$(get_group_list $group)
231                         if [ -z "$list" ]; then
232                                 echo "Group \"$group\" is empty or not defined?"
233                                 exit 1
234                         fi
235
236                         for t in $list; do
237                                 grep -s "^$t\$" $tmp.list >/dev/null || \
238                                                         echo "$t" >>$tmp.list
239                         done
240                 done
241         fi
242
243         # Specified groups to exclude
244         for xgroup in $XGROUP_LIST; do
245                 list=$(get_group_list $xgroup)
246                 if [ -z "$list" ]; then
247                         echo "Group \"$xgroup\" is empty or not defined?"
248                         continue
249                 fi
250
251                 trim_test_list $list
252         done
253
254         # sort the list of tests into numeric order unless we're running tests
255         # in the exact order specified
256         if ! $exact_order; then
257                 if $randomize; then
258                         if type shuf >& /dev/null; then
259                                 sorter="shuf"
260                         else
261                                 sorter="awk -v seed=$RANDOM -f randomize.awk"
262                         fi
263                 else
264                         sorter="cat"
265                 fi
266                 list=`sort -n $tmp.list | uniq | $sorter`
267         else
268                 list=`cat $tmp.list`
269         fi
270         rm -f $tmp.list
271 }
272
273 # Process command arguments first.
274 while [ $# -gt 0 ]; do
275         case "$1" in
276         -\? | -h | --help) usage ;;
277
278         -nfs)           FSTYP=nfs ;;
279         -glusterfs)     FSTYP=glusterfs ;;
280         -cifs)          FSTYP=cifs ;;
281         -9p)            FSTYP=9p ;;
282         -virtiofs)      FSTYP=virtiofs ;;
283         -overlay)       FSTYP=overlay; export OVERLAY=true ;;
284         -pvfs2)         FSTYP=pvfs2 ;;
285         -tmpfs)         FSTYP=tmpfs ;;
286         -ubifs)         FSTYP=ubifs ;;
287
288         -g)     group=$2 ; shift ;
289                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
290                 ;;
291
292         -x)     xgroup=$2 ; shift ;
293                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
294                 ;;
295
296         -X)     subdir_xfile=$2; shift ;
297                 ;;
298         -e)
299                 xfile=$2; shift ;
300                 echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
301                 ;;
302
303         -E)     xfile=$2; shift ;
304                 if [ -f $xfile ]; then
305                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
306                 fi
307                 ;;
308         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
309         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
310         -l)     diff="diff" ;;
311         -udiff) diff="$diff -u" ;;
312
313         -n)     showme=true ;;
314         -r)
315                 if $exact_order; then
316                         echo "Cannot specify -r and --exact-order."
317                         exit 1
318                 fi
319                 randomize=true
320                 ;;
321         --exact-order)
322                 if $randomize; then
323                         echo "Cannnot specify --exact-order and -r."
324                         exit 1
325                 fi
326                 exact_order=true
327                 ;;
328         -i)     iterations=$2; shift ;;
329         -T)     timestamp=true ;;
330         -d)     DUMP_OUTPUT=true ;;
331         -b)     brief_test_summary=true;;
332         -R)     report_fmt=$2 ; shift ;
333                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
334                 do_report=true
335                 ;;
336         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
337         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
338
339         -*)     usage ;;
340         *)      # not an argument, we've got tests now.
341                 have_test_arg=true ;;
342         esac
343
344         # if we've found a test specification, the break out of the processing
345         # loop before we shift the arguments so that this is the first argument
346         # that we process in the test arg loop below.
347         if $have_test_arg; then
348                 break;
349         fi
350
351         shift
352 done
353
354 # we need common/rc, that also sources common/config. We need to source it
355 # after processing args, overlay needs FSTYP set before sourcing common/config
356 if ! . ./common/rc; then
357         echo "check: failed to source common/rc"
358         exit 1
359 fi
360
361 if [ -n "$subdir_xfile" ]; then
362         for d in $SRC_GROUPS $FSTYP; do
363                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
364                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
365                         echo $d/$f >> $tmp.xlist
366                 done
367         done
368 fi
369
370 # Process tests from command line now.
371 if $have_test_arg; then
372         while [ $# -gt 0 ]; do
373                 case "$1" in
374                 -*)     echo "Arguments before tests, please!"
375                         status=1
376                         exit $status
377                         ;;
378                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
379                         list=$(cd $SRC_DIR; echo $1)
380                         for t in $list; do
381                                 test_dir=`dirname $t`
382                                 test_dir=${test_dir#$SRC_DIR/*}
383                                 test_name=`basename $t`
384                                 group_file=$SRC_DIR/$test_dir/group
385
386                                 if egrep -q "^$test_name" $group_file; then
387                                         # in group file ... OK
388                                         echo $SRC_DIR/$test_dir/$test_name \
389                                                 >>$tmp.arglist
390                                 else
391                                         # oops
392                                         echo "$t - unknown test, ignored"
393                                 fi
394                         done
395                         ;;
396                 esac
397
398                 shift
399         done
400 elif [ -z "$GROUP_LIST" ]; then
401         # default group list is the auto group. If any other group or test is
402         # specified, we use that instead.
403         GROUP_LIST="auto"
404 fi
405
406 if [ `id -u` -ne 0 ]
407 then
408     echo "check: QA must be run as root"
409     exit 1
410 fi
411
412 _wipe_counters()
413 {
414         n_try="0"
415         n_bad="0"
416         n_notrun="0"
417         unset try notrun bad
418 }
419
420 _global_log() {
421         echo "$1" >> $check.log
422         if $OPTIONS_HAVE_SECTIONS; then
423                 echo "$1" >> ${REPORT_DIR}/check.log
424         fi
425 }
426
427 _wrapup()
428 {
429         seq="check"
430         check="$RESULT_BASE/check"
431
432         if $showme; then
433                 if $needwrap; then
434                         if $do_report; then
435                                 _make_section_report
436                         fi
437                         needwrap=false
438                 fi
439         elif $needwrap; then
440                 if [ -f $check.time -a -f $tmp.time ]; then
441                         cat $check.time $tmp.time  \
442                                 | $AWK_PROG '
443                                 { t[$1] = $2 }
444                                 END {
445                                         if (NR > 0) {
446                                                 for (i in t) print i " " t[i]
447                                         }
448                                 }' \
449                                 | sort -n >$tmp.out
450                         mv $tmp.out $check.time
451                         if $OPTIONS_HAVE_SECTIONS; then
452                                 cp $check.time ${REPORT_DIR}/check.time
453                         fi
454                 fi
455
456                 _global_log ""
457                 _global_log "$(date)"
458
459                 echo "SECTION       -- $section" >>$tmp.summary
460                 echo "=========================" >>$tmp.summary
461                 if [ ! -z "$n_try" -a $n_try != 0 ]; then
462                         if [ $brief_test_summary == "false" ]; then
463                                 echo "Ran:$try"
464                                 echo "Ran:$try" >>$tmp.summary
465                         fi
466                         _global_log "Ran:$try"
467                 fi
468
469                 $interrupt && echo "Interrupted!" | tee -a $check.log
470                 if $OPTIONS_HAVE_SECTIONS; then
471                         $interrupt && echo "Interrupted!" | tee -a \
472                                 ${REPORT_DIR}/check.log
473                 fi
474
475                 if [ ! -z "$notrun" ]; then
476                         if [ $brief_test_summary == "false" ]; then
477                                 echo "Not run:$notrun"
478                                 echo "Not run:$notrun" >>$tmp.summary
479                         fi
480                         _global_log "Not run:$notrun"
481                 fi
482
483                 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
484                         echo "Failures:$bad"
485                         echo "Failed $n_bad of $n_try tests"
486                         _global_log "Failures:$bad"
487                         _global_log "Failed $n_bad of $n_try tests"
488                         echo "Failures:$bad" >>$tmp.summary
489                         echo "Failed $n_bad of $n_try tests" >>$tmp.summary
490                 else
491                         echo "Passed all $n_try tests"
492                         _global_log "Passed all $n_try tests"
493                         echo "Passed all $n_try tests" >>$tmp.summary
494                 fi
495                 echo "" >>$tmp.summary
496                 if $do_report; then
497                         _make_section_report
498                 fi
499                 needwrap=false
500         fi
501
502         sum_bad=`expr $sum_bad + $n_bad`
503         _wipe_counters
504         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
505         if ! $OPTIONS_HAVE_SECTIONS; then
506                 rm -f $tmp.*
507         fi
508 }
509
510 _summary()
511 {
512         _wrapup
513         if $showme; then
514                 :
515         elif $needsum; then
516                 count=`wc -L $tmp.summary | cut -f1 -d" "`
517                 cat $tmp.summary
518                 needsum=false
519         fi
520         rm -f $tmp.*
521 }
522
523 _check_filesystems()
524 {
525         if [ -f ${RESULT_DIR}/require_test ]; then
526                 _check_test_fs || err=true
527                 rm -f ${RESULT_DIR}/require_test*
528         else
529                 _test_unmount 2> /dev/null
530         fi
531         if [ -f ${RESULT_DIR}/require_scratch ]; then
532                 _check_scratch_fs || err=true
533                 rm -f ${RESULT_DIR}/require_scratch*
534         fi
535         _scratch_unmount 2> /dev/null
536 }
537
538 _expunge_test()
539 {
540         local TEST_ID="$1"
541         if [ -s $tmp.xlist ]; then
542                 if grep -q $TEST_ID $tmp.xlist; then
543                         echo "       [expunged]"
544                         return 1
545                 fi
546         fi
547         return 0
548 }
549
550 # Can we run systemd scopes?
551 HAVE_SYSTEMD_SCOPES=
552 systemctl reset-failed "fstests-check" &>/dev/null
553 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
554 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
555
556 # Make the check script unattractive to the OOM killer...
557 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
558 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
559
560 # ...and make the tests themselves somewhat more attractive to it, so that if
561 # the system runs out of memory it'll be the test that gets killed and not the
562 # test framework.
563 #
564 # If systemd is available, run the entire test script in a scope so that we can
565 # kill all subprocesses of the test if it fails to clean up after itself.  This
566 # is essential for ensuring that the post-test unmount succeeds.  Note that
567 # systemd doesn't automatically remove transient scopes that fail to terminate
568 # when systemd tells them to terminate (e.g. programs stuck in D state when
569 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
570 _run_seq() {
571         local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
572
573         if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
574                 local unit="$(systemd-escape "fs$seq").scope"
575                 systemctl reset-failed "${unit}" &> /dev/null
576                 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
577                 res=$?
578                 systemctl stop "${unit}" &> /dev/null
579                 return "${res}"
580         else
581                 "${cmd[@]}"
582         fi
583 }
584
585 _detect_kmemleak
586 _prepare_test_list
587
588 if $OPTIONS_HAVE_SECTIONS; then
589         trap "_summary; exit \$status" 0 1 2 3 15
590 else
591         trap "_wrapup; exit \$status" 0 1 2 3 15
592 fi
593
594 function run_section()
595 {
596         local section=$1
597
598         OLD_FSTYP=$FSTYP
599         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
600         get_next_config $section
601
602         # Do we need to run only some sections ?
603         if [ ! -z "$RUN_SECTION" ]; then
604                 skip=true
605                 for s in $RUN_SECTION; do
606                         if [ $section == $s ]; then
607                                 skip=false
608                                 break;
609                         fi
610                 done
611                 if $skip; then
612                         return
613                 fi
614         fi
615
616         # Did this section get excluded?
617         if [ ! -z "$EXCLUDE_SECTION" ]; then
618                 skip=false
619                 for s in $EXCLUDE_SECTION; do
620                         if [ $section == $s ]; then
621                                 skip=true
622                                 break;
623                         fi
624                 done
625                 if $skip; then
626                         return
627                 fi
628         fi
629
630         mkdir -p $RESULT_BASE
631         if [ ! -d $RESULT_BASE ]; then
632                 echo "failed to create results directory $RESULT_BASE"
633                 status=1
634                 exit
635         fi
636
637         if $OPTIONS_HAVE_SECTIONS; then
638                 echo "SECTION       -- $section"
639         fi
640
641         sect_start=`_wallclock`
642         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
643                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
644                 _test_unmount 2> /dev/null
645                 if ! _test_mkfs >$tmp.err 2>&1
646                 then
647                         echo "our local _test_mkfs routine ..."
648                         cat $tmp.err
649                         echo "check: failed to mkfs \$TEST_DEV using specified options"
650                         status=1
651                         exit
652                 fi
653                 if ! _test_mount
654                 then
655                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
656                         status=1
657                         exit
658                 fi
659                 # TEST_DEV has been recreated, previous FSTYP derived from
660                 # TEST_DEV could be changed, source common/rc again with
661                 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
662                 . common/rc
663                 _prepare_test_list
664         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
665                 _test_unmount 2> /dev/null
666                 if ! _test_mount
667                 then
668                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
669                         status=1
670                         exit
671                 fi
672         fi
673
674         init_rc
675
676         seq="check"
677         check="$RESULT_BASE/check"
678
679         # don't leave old full output behind on a clean run
680         rm -f $check.full
681
682         [ -f $check.time ] || touch $check.time
683
684         # print out our test configuration
685         echo "FSTYP         -- `_full_fstyp_details`"
686         echo "PLATFORM      -- `_full_platform_details`"
687         if [ ! -z "$SCRATCH_DEV" ]; then
688           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
689           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
690         fi
691         echo
692         needwrap=true
693
694         if [ ! -z "$SCRATCH_DEV" ]; then
695           _scratch_unmount 2> /dev/null
696           # call the overridden mkfs - make sure the FS is built
697           # the same as we'll create it later.
698
699           if ! _scratch_mkfs >$tmp.err 2>&1
700           then
701               echo "our local _scratch_mkfs routine ..."
702               cat $tmp.err
703               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
704               status=1
705               exit
706           fi
707
708           # call the overridden mount - make sure the FS mounts with
709           # the same options that we'll mount with later.
710           if ! _try_scratch_mount >$tmp.err 2>&1
711           then
712               echo "our local mount routine ..."
713               cat $tmp.err
714               echo "check: failed to mount \$SCRATCH_DEV using specified options"
715               status=1
716               exit
717           else
718               _scratch_unmount
719           fi
720         fi
721
722         seqres="$check"
723         _check_test_fs
724
725         err=false
726         first_test=true
727         prev_seq=""
728         for seq in $list ; do
729                 # Run report for previous test!
730                 if $err ; then
731                         bad="$bad $seqnum"
732                         n_bad=`expr $n_bad + 1`
733                         tc_status="fail"
734                 fi
735                 if $do_report && ! $first_test ; then
736                         if [ $tc_status != "expunge" ] ; then
737                                 _make_testcase_report "$prev_seq" "$tc_status"
738                         fi
739                 fi
740                 first_test=false
741
742                 err=false
743                 prev_seq="$seq"
744                 if [ ! -f $seq ]; then
745                         # Try to get full name in case the user supplied only
746                         # seq id and the test has a name. A bit of hassle to
747                         # find really the test and not its sample output or
748                         # helping files.
749                         bname=$(basename $seq)
750                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
751                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
752                                      END { print shortest }')
753                         if [ -f $full_seq ] && \
754                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
755                                 seq=$full_seq
756                         fi
757                 fi
758
759                 # the filename for the test and the name output are different.
760                 # we don't include the tests/ directory in the name output.
761                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
762
763                 # Similarly, the result directory needs to replace the tests/
764                 # part of the test location.
765                 group=`dirname $seq`
766                 if $OPTIONS_HAVE_SECTIONS; then
767                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
768                         REPORT_DIR="$RESULT_BASE/$section"
769                 else
770                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
771                         REPORT_DIR="$RESULT_BASE"
772                 fi
773                 seqres="$REPORT_DIR/$seqnum"
774
775                 mkdir -p $RESULT_DIR
776                 rm -f ${RESULT_DIR}/require_scratch*
777                 rm -f ${RESULT_DIR}/require_test*
778                 echo -n "$seqnum"
779
780                 if $showme; then
781                         _expunge_test $seqnum
782                         if [ $? -eq 1 ]; then
783                             tc_status="expunge"
784                             continue
785                         fi
786                         echo
787                         start=0
788                         stop=0
789                         tc_status="list"
790                         n_notrun=`expr $n_notrun + 1`
791                         continue
792                 fi
793
794                 tc_status="pass"
795                 if [ ! -f $seq ]; then
796                         echo " - no such test?"
797                         continue
798                 fi
799
800                 # really going to try and run this one
801                 rm -f $seqres.out.bad
802
803                 # check if we really should run it
804                 _expunge_test $seqnum
805                 if [ $? -eq 1 ]; then
806                         tc_status="expunge"
807                         continue
808                 fi
809
810                 # record that we really tried to run this test.
811                 try="$try $seqnum"
812                 n_try=`expr $n_try + 1`
813
814                 # slashes now in names, sed barfs on them so use grep
815                 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
816                 if [ "X$lasttime" != X ]; then
817                         echo -n " ${lasttime}s ... "
818                 else
819                         echo -n "       " # prettier output with timestamps.
820                 fi
821                 rm -f core $seqres.notrun
822
823                 start=`_wallclock`
824                 $timestamp && echo -n " ["`date "+%T"`"]"
825                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
826                 $LOGGER_PROG "run xfstest $seqnum"
827                 if [ -w /dev/kmsg ]; then
828                         export date_time=`date +"%F %T"`
829                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
830                         # _check_dmesg depends on this log in dmesg
831                         touch ${RESULT_DIR}/check_dmesg
832                 fi
833                 _try_wipe_scratch_devs > /dev/null 2>&1
834
835                 # clear the WARN_ONCE state to allow a potential problem
836                 # to be reported for each test
837                 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
838
839                 if [ "$DUMP_OUTPUT" = true ]; then
840                         _run_seq 2>&1 | tee $tmp.out
841                         # Because $? would get tee's return code
842                         sts=${PIPESTATUS[0]}
843                 else
844                         _run_seq >$tmp.out 2>&1
845                         sts=$?
846                 fi
847
848                 if [ -f core ]; then
849                         _dump_err_cont "[dumped core]"
850                         mv core $RESULT_BASE/$seqnum.core
851                         err=true
852                 fi
853
854                 if [ -f $seqres.notrun ]; then
855                         $timestamp && _timestamp
856                         stop=`_wallclock`
857                         $timestamp || echo -n "[not run] "
858                         $timestamp && echo " [not run]" && \
859                                       echo -n " $seqnum -- "
860                         cat $seqres.notrun
861                         notrun="$notrun $seqnum"
862                         n_notrun=`expr $n_notrun + 1`
863                         tc_status="notrun"
864                         continue;
865                 fi
866
867                 if [ $sts -ne 0 ]; then
868                         _dump_err_cont "[failed, exit status $sts]"
869                         _test_unmount 2> /dev/null
870                         _scratch_unmount 2> /dev/null
871                         rm -f ${RESULT_DIR}/require_test*
872                         rm -f ${RESULT_DIR}/require_scratch*
873                         err=true
874                 else
875                         # the test apparently passed, so check for corruption
876                         # and log messages that shouldn't be there.
877                         _check_filesystems
878                         _check_dmesg || err=true
879                 fi
880
881                 # Reload the module after each test to check for leaks or
882                 # other problems.
883                 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
884                         _test_unmount 2> /dev/null
885                         _scratch_unmount 2> /dev/null
886                         modprobe -r fs-$FSTYP
887                         modprobe fs-$FSTYP
888                 fi
889
890                 # Scan for memory leaks after every test so that associating
891                 # a leak to a particular test will be as accurate as possible.
892                 _check_kmemleak || err=true
893
894                 # test ends after all checks are done.
895                 $timestamp && _timestamp
896                 stop=`_wallclock`
897
898                 if [ ! -f $seq.out ]; then
899                         _dump_err "no qualified output"
900                         err=true
901                         continue;
902                 fi
903
904                 # coreutils 8.16+ changed quote formats in error messages
905                 # from `foo' to 'foo'. Filter old versions to match the new
906                 # version.
907                 sed -i "s/\`/\'/g" $tmp.out
908                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
909                         if ! $err ; then
910                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
911                                 echo -n " `expr $stop - $start`s"
912                         fi
913                         echo ""
914                 else
915                         _dump_err "- output mismatch (see $seqres.out.bad)"
916                         mv $tmp.out $seqres.out.bad
917                         $diff $seq.out $seqres.out.bad | {
918                         if test "$DIFF_LENGTH" -le 0; then
919                                 cat
920                         else
921                                 head -n "$DIFF_LENGTH"
922                                 echo "..."
923                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
924                                         " to see the entire diff)"
925                         fi; } | sed -e 's/^\(.\)/    \1/'
926                         err=true
927                 fi
928         done
929
930         # make sure we record the status of the last test we ran.
931         if $err ; then
932                 bad="$bad $seqnum"
933                 n_bad=`expr $n_bad + 1`
934                 tc_status="fail"
935         fi
936         if $do_report && ! $first_test ; then
937                 if [ $tc_status != "expunge" ] ; then
938                         _make_testcase_report "$prev_seq" "$tc_status"
939                 fi
940         fi
941
942         sect_stop=`_wallclock`
943         interrupt=false
944         _wrapup
945         interrupt=true
946         echo
947
948         _test_unmount 2> /dev/null
949         _scratch_unmount 2> /dev/null
950 }
951
952 for ((iters = 0; iters < $iterations; iters++)) do
953         for section in $HOST_OPTIONS_SECTIONS; do
954                 run_section $section
955         done
956 done
957
958 interrupt=false
959 status=`expr $sum_bad != 0`
960 exit