seek_sanity_test: use XFS ioctls to determine file allocation unit size
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 try=()
12 sum_bad=0
13 bad=()
14 notrun=()
15 interrupt=true
16 diff="diff -u"
17 showme=false
18 have_test_arg=false
19 randomize=false
20 exact_order=false
21 export here=`pwd`
22 xfile=""
23 subdir_xfile=""
24 brief_test_summary=false
25 do_report=false
26 DUMP_OUTPUT=false
27 iterations=1
28 istop=false
29
30 # This is a global variable used to pass test failure text to reporting gunk
31 _err_msg=""
32
33 # start the initialisation work now
34 iam=check
35
36 export MSGVERB="text:action"
37 export QA_CHECK_FS=${QA_CHECK_FS:=true}
38
39 # number of diff lines from a failed test, 0 for whole output
40 export DIFF_LENGTH=${DIFF_LENGTH:=10}
41
42 # by default don't output timestamps
43 timestamp=${TIMESTAMP:=false}
44
45 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
46
47 SRC_GROUPS="generic shared"
48 export SRC_DIR="tests"
49
50 usage()
51 {
52     echo "Usage: $0 [options] [testlist]"'
53
54 check options
55     -nfs                test NFS
56     -glusterfs          test GlusterFS
57     -cifs               test CIFS
58     -9p                 test 9p
59     -virtiofs           test virtiofs
60     -overlay            test overlay
61     -pvfs2              test PVFS2
62     -tmpfs              test TMPFS
63     -ubifs              test ubifs
64     -l                  line mode diff
65     -udiff              show unified diff (default)
66     -n                  show me, do not run tests
67     -T                  output timestamps
68     -r                  randomize test order
69     --exact-order       run tests in the exact order specified
70     -i <n>              iterate the test list <n> times
71     -I <n>              iterate the test list <n> times, but stops iterating further in case of any test failure
72     -d                  dump test output to stdout
73     -b                  brief test summary
74     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
75     --large-fs          optimise scratch device for large filesystems
76     -s section          run only specified section from config file
77     -S section          exclude the specified section from the config file
78
79 testlist options
80     -g group[,group...] include tests from these groups
81     -x group[,group...] exclude tests from these groups
82     -X exclude_file     exclude individual tests
83     -e testlist         exclude a specific list of tests
84     -E external_file    exclude individual tests
85     [testlist]          include tests matching names in testlist
86
87 testlist argument is a list of tests in the form of <test dir>/<test name>.
88
89 <test dir> is a directory under tests that contains a group file,
90 with a list of the names of the tests in that directory.
91
92 <test name> may be either a specific test file name (e.g. xfs/001) or
93 a test file name match pattern (e.g. xfs/*).
94
95 group argument is either a name of a tests group to collect from all
96 the test dirs (e.g. quick) or a name of a tests group to collect from
97 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
98 If you want to run all the tests in the test suite, use "-g all" to specify all
99 groups.
100
101 exclude_file argument refers to a name of a file inside each test directory.
102 for every test dir where this file is found, the listed test names are
103 excluded from the list of tests to run from that test dir.
104
105 external_file argument is a path to a single file containing a list of tests
106 to exclude in the form of <test dir>/<test name>.
107
108 examples:
109  check xfs/001
110  check -g quick
111  check -g xfs/quick
112  check -x stress xfs/*
113  check -X .exclude -g auto
114  check -E ~/.xfstests.exclude
115 '
116             exit 1
117 }
118
119 get_sub_group_list()
120 {
121         local d=$1
122         local grp=$2
123
124         test -s "$SRC_DIR/$d/group.list" || return 1
125
126         local grpl=$(sed -n < $SRC_DIR/$d/group.list \
127                 -e 's/#.*//' \
128                 -e 's/$/ /' \
129                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
130         echo $grpl
131 }
132
133 get_group_list()
134 {
135         local grp=$1
136         local grpl=""
137         local sub=$(dirname $grp)
138         local fsgroup="$FSTYP"
139
140         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
141                 # group is given as <subdir>/<group> (e.g. xfs/quick)
142                 grp=$(basename $grp)
143                 get_sub_group_list $sub $grp
144                 return
145         fi
146
147         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
148             fsgroup=ext4
149         fi
150         for d in $SRC_GROUPS $fsgroup; do
151                 if ! test -d "$SRC_DIR/$d" ; then
152                         continue
153                 fi
154                 grpl="$grpl $(get_sub_group_list $d $grp)"
155         done
156         echo $grpl
157 }
158
159 # Find all tests, excluding files that are test metadata such as group files.
160 # It matches test names against $VALID_TEST_NAME defined in common/rc
161 get_all_tests()
162 {
163         touch $tmp.list
164         for d in $SRC_GROUPS $FSTYP; do
165                 if ! test -d "$SRC_DIR/$d" ; then
166                         continue
167                 fi
168                 ls $SRC_DIR/$d/* | \
169                         grep -v "\..*" | \
170                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
171                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
172         done
173 }
174
175 # takes the list of tests to run in $tmp.list, and removes the tests passed to
176 # the function from that list.
177 trim_test_list()
178 {
179         test_list="$*"
180
181         rm -f $tmp.grep
182         numsed=0
183         for t in $test_list
184         do
185             if [ $numsed -gt 100 ]; then
186                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
187                 mv $tmp.tmp $tmp.list
188                 numsed=0
189                 rm -f $tmp.grep
190             fi
191             echo "^$t\$" >>$tmp.grep
192             numsed=`expr $numsed + 1`
193         done
194         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
195         mv $tmp.tmp $tmp.list
196         rm -f $tmp.grep
197 }
198
199
200 _wallclock()
201 {
202     date "+%s"
203 }
204
205 _timestamp()
206 {
207     now=`date "+%T"`
208     echo -n " [$now]"
209 }
210
211 _prepare_test_list()
212 {
213         unset list
214         # Tests specified on the command line
215         if [ -s $tmp.arglist ]; then
216                 cat $tmp.arglist > $tmp.list
217         else
218                 touch $tmp.list
219         fi
220
221         # Specified groups to include
222         # Note that the CLI processing adds a leading space to the first group
223         # parameter, so we have to catch that here checking for "all"
224         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
225                 # no test numbers, do everything
226                 get_all_tests
227         else
228                 for group in $GROUP_LIST; do
229                         list=$(get_group_list $group)
230                         if [ -z "$list" ]; then
231                                 echo "Group \"$group\" is empty or not defined?"
232                                 exit 1
233                         fi
234
235                         for t in $list; do
236                                 grep -s "^$t\$" $tmp.list >/dev/null || \
237                                                         echo "$t" >>$tmp.list
238                         done
239                 done
240         fi
241
242         # Specified groups to exclude
243         for xgroup in $XGROUP_LIST; do
244                 list=$(get_group_list $xgroup)
245                 if [ -z "$list" ]; then
246                         echo "Group \"$xgroup\" is empty or not defined?"
247                         continue
248                 fi
249
250                 trim_test_list $list
251         done
252
253         # sort the list of tests into numeric order unless we're running tests
254         # in the exact order specified
255         if ! $exact_order; then
256                 if $randomize; then
257                         if type shuf >& /dev/null; then
258                                 sorter="shuf"
259                         else
260                                 sorter="awk -v seed=$RANDOM -f randomize.awk"
261                         fi
262                 else
263                         sorter="cat"
264                 fi
265                 list=`sort -n $tmp.list | uniq | $sorter`
266         else
267                 list=`cat $tmp.list`
268         fi
269         rm -f $tmp.list
270 }
271
272 # Process command arguments first.
273 while [ $# -gt 0 ]; do
274         case "$1" in
275         -\? | -h | --help) usage ;;
276
277         -nfs|-glusterfs|-cifs|-9p|-virtiofs|-pvfs2|-tmpfs|-ubifs)
278                 FSTYP="${1:1}"
279                 ;;
280         -overlay)
281                 FSTYP=overlay
282                 export OVERLAY=true
283                 ;;
284
285         -g)     group=$2 ; shift ;
286                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
287                 ;;
288
289         -x)     xgroup=$2 ; shift ;
290                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
291                 ;;
292
293         -X)     subdir_xfile=$2; shift ;
294                 ;;
295         -e)
296                 xfile=$2; shift ;
297                 echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
298                 ;;
299
300         -E)     xfile=$2; shift ;
301                 if [ -f $xfile ]; then
302                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
303                 fi
304                 ;;
305         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
306         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
307         -l)     diff="diff" ;;
308         -udiff) diff="$diff -u" ;;
309
310         -n)     showme=true ;;
311         -r)
312                 if $exact_order; then
313                         echo "Cannot specify -r and --exact-order."
314                         exit 1
315                 fi
316                 randomize=true
317                 ;;
318         --exact-order)
319                 if $randomize; then
320                         echo "Cannnot specify --exact-order and -r."
321                         exit 1
322                 fi
323                 exact_order=true
324                 ;;
325         -i)     iterations=$2; shift ;;
326         -I)     iterations=$2; istop=true; shift ;;
327         -T)     timestamp=true ;;
328         -d)     DUMP_OUTPUT=true ;;
329         -b)     brief_test_summary=true;;
330         -R)     report_fmt=$2 ; shift ;
331                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
332                 do_report=true
333                 ;;
334         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
335         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
336
337         -*)     usage ;;
338         *)      # not an argument, we've got tests now.
339                 have_test_arg=true ;;
340         esac
341
342         # if we've found a test specification, the break out of the processing
343         # loop before we shift the arguments so that this is the first argument
344         # that we process in the test arg loop below.
345         if $have_test_arg; then
346                 break;
347         fi
348
349         shift
350 done
351
352 # we need common/rc, that also sources common/config. We need to source it
353 # after processing args, overlay needs FSTYP set before sourcing common/config
354 if ! . ./common/rc; then
355         echo "check: failed to source common/rc"
356         exit 1
357 fi
358
359 if [ -n "$subdir_xfile" ]; then
360         for d in $SRC_GROUPS $FSTYP; do
361                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
362                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
363                         echo $d/$f >> $tmp.xlist
364                 done
365         done
366 fi
367
368 # Process tests from command line now.
369 if $have_test_arg; then
370         while [ $# -gt 0 ]; do
371                 case "$1" in
372                 -*)     echo "Arguments before tests, please!"
373                         status=1
374                         exit $status
375                         ;;
376                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
377                         list=$(cd $SRC_DIR; echo $1)
378                         for t in $list; do
379                                 test_dir=`dirname $t`
380                                 test_dir=${test_dir#$SRC_DIR/*}
381                                 test_name=`basename $t`
382                                 group_file=$SRC_DIR/$test_dir/group.list
383
384                                 if egrep -q "^$test_name" $group_file; then
385                                         # in group file ... OK
386                                         echo $SRC_DIR/$test_dir/$test_name \
387                                                 >>$tmp.arglist
388                                 else
389                                         # oops
390                                         echo "$t - unknown test, ignored"
391                                 fi
392                         done
393                         ;;
394                 esac
395
396                 shift
397         done
398 elif [ -z "$GROUP_LIST" ]; then
399         # default group list is the auto group. If any other group or test is
400         # specified, we use that instead.
401         GROUP_LIST="auto"
402 fi
403
404 if [ `id -u` -ne 0 ]
405 then
406     echo "check: QA must be run as root"
407     exit 1
408 fi
409
410 _wipe_counters()
411 {
412         try=()
413         notrun=()
414         bad=()
415 }
416
417 _global_log() {
418         echo "$1" >> $check.log
419         if $OPTIONS_HAVE_SECTIONS; then
420                 echo "$1" >> ${REPORT_DIR}/check.log
421         fi
422 }
423
424 _wrapup()
425 {
426         seq="check"
427         check="$RESULT_BASE/check"
428
429         if $showme && $needwrap; then
430                 if $do_report; then
431                         # $showme = all selected tests are notrun (no tries)
432                         _make_section_report "${#notrun[*]}" "0" "${#notrun[*]}"
433                 fi
434                 needwrap=false
435         elif $needwrap; then
436                 if [ -f $check.time -a -f $tmp.time ]; then
437                         cat $check.time $tmp.time  \
438                                 | $AWK_PROG '
439                                 { t[$1] = $2 }
440                                 END {
441                                         if (NR > 0) {
442                                                 for (i in t) print i " " t[i]
443                                         }
444                                 }' \
445                                 | sort -n >$tmp.out
446                         mv $tmp.out $check.time
447                         if $OPTIONS_HAVE_SECTIONS; then
448                                 cp $check.time ${REPORT_DIR}/check.time
449                         fi
450                 fi
451
452                 _global_log ""
453                 _global_log "$(date)"
454
455                 echo "SECTION       -- $section" >>$tmp.summary
456                 echo "=========================" >>$tmp.summary
457                 if ((${#try[*]} > 0)); then
458                         if [ $brief_test_summary == "false" ]; then
459                                 echo "Ran: ${try[*]}"
460                                 echo "Ran: ${try[*]}" >>$tmp.summary
461                         fi
462                         _global_log "Ran: ${try[*]}"
463                 fi
464
465                 $interrupt && echo "Interrupted!" | tee -a $check.log
466                 if $OPTIONS_HAVE_SECTIONS; then
467                         $interrupt && echo "Interrupted!" | tee -a \
468                                 ${REPORT_DIR}/check.log
469                 fi
470
471                 if ((${#notrun[*]} > 0)); then
472                         if [ $brief_test_summary == "false" ]; then
473                                 echo "Not run: ${notrun[*]}"
474                                 echo "Not run: ${notrun[*]}" >>$tmp.summary
475                         fi
476                         _global_log "Not run: ${notrun[*]}"
477                 fi
478
479                 if ((${#bad[*]} > 0)); then
480                         echo "Failures: ${bad[*]}"
481                         echo "Failed ${#bad[*]} of ${#try[*]} tests"
482                         _global_log "Failures: ${bad[*]}"
483                         _global_log "Failed ${#bad[*]} of ${#try[*]} tests"
484                         echo "Failures: ${bad[*]}" >>$tmp.summary
485                         echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary
486                 else
487                         echo "Passed all ${#try[*]} tests"
488                         _global_log "Passed all ${#try[*]} tests"
489                         echo "Passed all ${#try[*]} tests" >>$tmp.summary
490                 fi
491                 echo "" >>$tmp.summary
492                 if $do_report; then
493                         _make_section_report "${#try[*]}" "${#bad[*]}" "${#notrun[*]}"
494                 fi
495                 needwrap=false
496         fi
497
498         sum_bad=`expr $sum_bad + ${#bad[*]}`
499         _wipe_counters
500         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
501         if ! $OPTIONS_HAVE_SECTIONS; then
502                 rm -f $tmp.*
503         fi
504 }
505
506 _summary()
507 {
508         _wrapup
509         if $showme; then
510                 :
511         elif $needsum; then
512                 count=`wc -L $tmp.summary | cut -f1 -d" "`
513                 cat $tmp.summary
514                 needsum=false
515         fi
516         rm -f $tmp.*
517 }
518
519 _check_filesystems()
520 {
521         local ret=0
522
523         if [ -f ${RESULT_DIR}/require_test ]; then
524                 _check_test_fs || ret=1
525                 rm -f ${RESULT_DIR}/require_test*
526         else
527                 _test_unmount 2> /dev/null
528         fi
529         if [ -f ${RESULT_DIR}/require_scratch ]; then
530                 _check_scratch_fs || ret=1
531                 rm -f ${RESULT_DIR}/require_scratch*
532         fi
533         _scratch_unmount 2> /dev/null
534         return $ret
535 }
536
537 _expunge_test()
538 {
539         local TEST_ID="$1"
540         if [ -s $tmp.xlist ]; then
541                 if grep -q $TEST_ID $tmp.xlist; then
542                         echo "       [expunged]"
543                         return 1
544                 fi
545         fi
546         return 0
547 }
548
549 # Can we run systemd scopes?
550 HAVE_SYSTEMD_SCOPES=
551 systemctl reset-failed "fstests-check" &>/dev/null
552 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
553 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
554
555 # Make the check script unattractive to the OOM killer...
556 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
557 function _adjust_oom_score() {
558         test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
559 }
560 _adjust_oom_score -500
561
562 # ...and make the tests themselves somewhat more attractive to it, so that if
563 # the system runs out of memory it'll be the test that gets killed and not the
564 # test framework.  The test is run in a separate process without any of our
565 # functions, so we open-code adjusting the OOM score.
566 #
567 # If systemd is available, run the entire test script in a scope so that we can
568 # kill all subprocesses of the test if it fails to clean up after itself.  This
569 # is essential for ensuring that the post-test unmount succeeds.  Note that
570 # systemd doesn't automatically remove transient scopes that fail to terminate
571 # when systemd tells them to terminate (e.g. programs stuck in D state when
572 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
573 _run_seq() {
574         local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
575
576         if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
577                 local unit="$(systemd-escape "fs$seq").scope"
578                 systemctl reset-failed "${unit}" &> /dev/null
579                 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
580                 res=$?
581                 systemctl stop "${unit}" &> /dev/null
582                 return "${res}"
583         else
584                 "${cmd[@]}"
585         fi
586 }
587
588 _detect_kmemleak
589 _prepare_test_list
590
591 if $OPTIONS_HAVE_SECTIONS; then
592         trap "_summary; exit \$status" 0 1 2 3 15
593 else
594         trap "_wrapup; exit \$status" 0 1 2 3 15
595 fi
596
597 function run_section()
598 {
599         local section=$1
600
601         OLD_FSTYP=$FSTYP
602         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
603         get_next_config $section
604
605         # Do we need to run only some sections ?
606         if [ ! -z "$RUN_SECTION" ]; then
607                 skip=true
608                 for s in $RUN_SECTION; do
609                         if [ $section == $s ]; then
610                                 skip=false
611                                 break;
612                         fi
613                 done
614                 if $skip; then
615                         return
616                 fi
617         fi
618
619         # Did this section get excluded?
620         if [ ! -z "$EXCLUDE_SECTION" ]; then
621                 skip=false
622                 for s in $EXCLUDE_SECTION; do
623                         if [ $section == $s ]; then
624                                 skip=true
625                                 break;
626                         fi
627                 done
628                 if $skip; then
629                         return
630                 fi
631         fi
632
633         mkdir -p $RESULT_BASE
634         if [ ! -d $RESULT_BASE ]; then
635                 echo "failed to create results directory $RESULT_BASE"
636                 status=1
637                 exit
638         fi
639
640         if $OPTIONS_HAVE_SECTIONS; then
641                 echo "SECTION       -- $section"
642         fi
643
644         sect_start=`_wallclock`
645         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
646                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
647                 _test_unmount 2> /dev/null
648                 if ! _test_mkfs >$tmp.err 2>&1
649                 then
650                         echo "our local _test_mkfs routine ..."
651                         cat $tmp.err
652                         echo "check: failed to mkfs \$TEST_DEV using specified options"
653                         status=1
654                         exit
655                 fi
656                 if ! _test_mount
657                 then
658                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
659                         status=1
660                         exit
661                 fi
662                 # TEST_DEV has been recreated, previous FSTYP derived from
663                 # TEST_DEV could be changed, source common/rc again with
664                 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
665                 . common/rc
666                 _prepare_test_list
667         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
668                 _test_unmount 2> /dev/null
669                 if ! _test_mount
670                 then
671                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
672                         status=1
673                         exit
674                 fi
675         fi
676
677         init_rc
678
679         seq="check"
680         check="$RESULT_BASE/check"
681
682         # don't leave old full output behind on a clean run
683         rm -f $check.full
684
685         [ -f $check.time ] || touch $check.time
686
687         # print out our test configuration
688         echo "FSTYP         -- `_full_fstyp_details`"
689         echo "PLATFORM      -- `_full_platform_details`"
690         if [ ! -z "$SCRATCH_DEV" ]; then
691           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
692           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
693         fi
694         echo
695         needwrap=true
696
697         if [ ! -z "$SCRATCH_DEV" ]; then
698           _scratch_unmount 2> /dev/null
699           # call the overridden mkfs - make sure the FS is built
700           # the same as we'll create it later.
701
702           if ! _scratch_mkfs >$tmp.err 2>&1
703           then
704               echo "our local _scratch_mkfs routine ..."
705               cat $tmp.err
706               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
707               status=1
708               exit
709           fi
710
711           # call the overridden mount - make sure the FS mounts with
712           # the same options that we'll mount with later.
713           if ! _try_scratch_mount >$tmp.err 2>&1
714           then
715               echo "our local mount routine ..."
716               cat $tmp.err
717               echo "check: failed to mount \$SCRATCH_DEV using specified options"
718               status=1
719               exit
720           else
721               _scratch_unmount
722           fi
723         fi
724
725         seqres="$check"
726         _check_test_fs
727
728         local tc_status="init"
729         prev_seq=""
730         for seq in $list ; do
731                 # Run report for previous test!
732                 if [ "$tc_status" == "fail" ]; then
733                         bad+=("$seqnum")
734                 fi
735                 if $do_report && [[ ! $tc_status =~ ^(init|expunge)$ ]]; then
736                         _make_testcase_report "$prev_seq" "$tc_status"
737                 fi
738
739                 prev_seq="$seq"
740                 if [ ! -f $seq ]; then
741                         # Try to get full name in case the user supplied only
742                         # seq id and the test has a name. A bit of hassle to
743                         # find really the test and not its sample output or
744                         # helping files.
745                         bname=$(basename $seq)
746                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
747                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
748                                      END { print shortest }')
749                         if [ -f $full_seq ] && \
750                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
751                                 seq=$full_seq
752                         fi
753                 fi
754
755                 # the filename for the test and the name output are different.
756                 # we don't include the tests/ directory in the name output.
757                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
758
759                 # Similarly, the result directory needs to replace the tests/
760                 # part of the test location.
761                 group=`dirname $seq`
762                 if $OPTIONS_HAVE_SECTIONS; then
763                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
764                         REPORT_DIR="$RESULT_BASE/$section"
765                 else
766                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
767                         REPORT_DIR="$RESULT_BASE"
768                 fi
769                 seqres="$REPORT_DIR/$seqnum"
770
771                 mkdir -p $RESULT_DIR
772                 rm -f ${RESULT_DIR}/require_scratch*
773                 rm -f ${RESULT_DIR}/require_test*
774                 echo -n "$seqnum"
775
776                 if $showme; then
777                         _expunge_test $seqnum
778                         if [ $? -eq 1 ]; then
779                             tc_status="expunge"
780                             continue
781                         fi
782                         echo
783                         start=0
784                         stop=0
785                         tc_status="list"
786                         notrun+=("$seqnum")
787                         continue
788                 fi
789
790                 tc_status="pass"
791                 if [ ! -f $seq ]; then
792                         echo " - no such test?"
793                         continue
794                 fi
795
796                 # really going to try and run this one
797                 rm -f $seqres.out.bad $seqres.hints
798
799                 # check if we really should run it
800                 _expunge_test $seqnum
801                 if [ $? -eq 1 ]; then
802                         tc_status="expunge"
803                         continue
804                 fi
805
806                 # record that we really tried to run this test.
807                 try+=("$seqnum")
808
809                 awk 'BEGIN {lasttime="       "} \
810                      $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \
811                      END {printf "%s", lasttime}' "$check.time"
812                 rm -f core $seqres.notrun
813
814                 start=`_wallclock`
815                 $timestamp && echo -n " ["`date "+%T"`"]"
816                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
817                 $LOGGER_PROG "run xfstest $seqnum"
818                 if [ -w /dev/kmsg ]; then
819                         export date_time=`date +"%F %T"`
820                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
821                         # _check_dmesg depends on this log in dmesg
822                         touch ${RESULT_DIR}/check_dmesg
823                 fi
824                 _try_wipe_scratch_devs > /dev/null 2>&1
825
826                 # clear the WARN_ONCE state to allow a potential problem
827                 # to be reported for each test
828                 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
829
830                 if [ "$DUMP_OUTPUT" = true ]; then
831                         _run_seq 2>&1 | tee $tmp.out
832                         # Because $? would get tee's return code
833                         sts=${PIPESTATUS[0]}
834                 else
835                         _run_seq >$tmp.out 2>&1
836                         sts=$?
837                 fi
838
839                 if [ -f core ]; then
840                         _dump_err_cont "[dumped core]"
841                         mv core $RESULT_BASE/$seqnum.core
842                         tc_status="fail"
843                 fi
844
845                 if [ -f $seqres.notrun ]; then
846                         $timestamp && _timestamp
847                         stop=`_wallclock`
848                         $timestamp || echo -n "[not run] "
849                         $timestamp && echo " [not run]" && \
850                                       echo -n " $seqnum -- "
851                         cat $seqres.notrun
852                         notrun+=("$seqnum")
853                         tc_status="notrun"
854
855                         # Unmount the scratch fs so that we can wipe the scratch
856                         # dev state prior to the next test run.
857                         _scratch_unmount 2> /dev/null
858                         continue;
859                 fi
860
861                 if [ $sts -ne 0 ]; then
862                         _dump_err_cont "[failed, exit status $sts]"
863                         _test_unmount 2> /dev/null
864                         _scratch_unmount 2> /dev/null
865                         rm -f ${RESULT_DIR}/require_test*
866                         rm -f ${RESULT_DIR}/require_scratch*
867                         tc_status="fail"
868                 else
869                         # The test apparently passed, so check for corruption
870                         # and log messages that shouldn't be there.  Run the
871                         # checking tools from a subshell with adjusted OOM
872                         # score so that the OOM killer will target them instead
873                         # of the check script itself.
874                         (_adjust_oom_score 250; _check_filesystems) || tc_status="fail"
875                         _check_dmesg || tc_status="fail"
876                 fi
877
878                 # Reload the module after each test to check for leaks or
879                 # other problems.
880                 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
881                         _test_unmount 2> /dev/null
882                         _scratch_unmount 2> /dev/null
883                         modprobe -r fs-$FSTYP
884                         modprobe fs-$FSTYP
885                 fi
886
887                 # Scan for memory leaks after every test so that associating
888                 # a leak to a particular test will be as accurate as possible.
889                 _check_kmemleak || tc_status="fail"
890
891                 # test ends after all checks are done.
892                 $timestamp && _timestamp
893                 stop=`_wallclock`
894
895                 if [ ! -f $seq.out ]; then
896                         _dump_err "no qualified output"
897                         tc_status="fail"
898                         continue;
899                 fi
900
901                 # coreutils 8.16+ changed quote formats in error messages
902                 # from `foo' to 'foo'. Filter old versions to match the new
903                 # version.
904                 sed -i "s/\`/\'/g" $tmp.out
905                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
906                         if [ "$tc_status" != "fail" ]; then
907                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
908                                 echo -n " `expr $stop - $start`s"
909                         fi
910                         echo ""
911                 else
912                         _dump_err "- output mismatch (see $seqres.out.bad)"
913                         mv $tmp.out $seqres.out.bad
914                         $diff $seq.out $seqres.out.bad | {
915                         if test "$DIFF_LENGTH" -le 0; then
916                                 cat
917                         else
918                                 head -n "$DIFF_LENGTH"
919                                 echo "..."
920                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
921                                         " to see the entire diff)"
922                         fi; } | sed -e 's/^\(.\)/    \1/'
923                         tc_status="fail"
924                 fi
925                 if [ -f $seqres.hints ]; then
926                         if [ "$tc_status" == "fail" ]; then
927                                 echo
928                                 cat $seqres.hints
929                         else
930                                 rm -f $seqres.hints
931                         fi
932                 fi
933         done
934
935         # make sure we record the status of the last test we ran.
936         if [ "$tc_status" == "fail" ]; then
937                 bad+=("$seqnum")
938         fi
939         if $do_report && [[ ! $tc_status =~ ^(init|expunge)$ ]]; then
940                 _make_testcase_report "$prev_seq" "$tc_status"
941         fi
942
943         sect_stop=`_wallclock`
944         interrupt=false
945         _wrapup
946         interrupt=true
947         echo
948
949         _test_unmount 2> /dev/null
950         _scratch_unmount 2> /dev/null
951 }
952
953 for ((iters = 0; iters < $iterations; iters++)) do
954         for section in $HOST_OPTIONS_SECTIONS; do
955                 run_section $section
956                 if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
957                         interrupt=false
958                         status=`expr $sum_bad != 0`
959                         exit
960                 fi
961         done
962 done
963
964 interrupt=false
965 status=`expr $sum_bad != 0`
966 exit