xfs: test fixes for new 5.17 behaviors
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 n_try=0
12 try=""
13 n_bad=0
14 sum_bad=0
15 bad=""
16 n_notrun=0
17 notrun=""
18 interrupt=true
19 diff="diff -u"
20 showme=false
21 have_test_arg=false
22 randomize=false
23 exact_order=false
24 export here=`pwd`
25 xfile=""
26 subdir_xfile=""
27 brief_test_summary=false
28 do_report=false
29 DUMP_OUTPUT=false
30 iterations=1
31 istop=false
32
33 # This is a global variable used to pass test failure text to reporting gunk
34 _err_msg=""
35
36 # start the initialisation work now
37 iam=check
38
39 export MSGVERB="text:action"
40 export QA_CHECK_FS=${QA_CHECK_FS:=true}
41
42 # number of diff lines from a failed test, 0 for whole output
43 export DIFF_LENGTH=${DIFF_LENGTH:=10}
44
45 # by default don't output timestamps
46 timestamp=${TIMESTAMP:=false}
47
48 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
49
50 SRC_GROUPS="generic shared"
51 export SRC_DIR="tests"
52
53 usage()
54 {
55     echo "Usage: $0 [options] [testlist]"'
56
57 check options
58     -nfs                test NFS
59     -glusterfs          test GlusterFS
60     -cifs               test CIFS
61     -9p                 test 9p
62     -virtiofs           test virtiofs
63     -overlay            test overlay
64     -pvfs2              test PVFS2
65     -tmpfs              test TMPFS
66     -ubifs              test ubifs
67     -l                  line mode diff
68     -udiff              show unified diff (default)
69     -n                  show me, do not run tests
70     -T                  output timestamps
71     -r                  randomize test order
72     --exact-order       run tests in the exact order specified
73     -i <n>              iterate the test list <n> times
74     -I <n>              iterate the test list <n> times, but stops iterating further in case of any test failure
75     -d                  dump test output to stdout
76     -b                  brief test summary
77     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
78     --large-fs          optimise scratch device for large filesystems
79     -s section          run only specified section from config file
80     -S section          exclude the specified section from the config file
81
82 testlist options
83     -g group[,group...] include tests from these groups
84     -x group[,group...] exclude tests from these groups
85     -X exclude_file     exclude individual tests
86     -e testlist         exclude a specific list of tests
87     -E external_file    exclude individual tests
88     [testlist]          include tests matching names in testlist
89
90 testlist argument is a list of tests in the form of <test dir>/<test name>.
91
92 <test dir> is a directory under tests that contains a group file,
93 with a list of the names of the tests in that directory.
94
95 <test name> may be either a specific test file name (e.g. xfs/001) or
96 a test file name match pattern (e.g. xfs/*).
97
98 group argument is either a name of a tests group to collect from all
99 the test dirs (e.g. quick) or a name of a tests group to collect from
100 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
101 If you want to run all the tests in the test suite, use "-g all" to specify all
102 groups.
103
104 exclude_file argument refers to a name of a file inside each test directory.
105 for every test dir where this file is found, the listed test names are
106 excluded from the list of tests to run from that test dir.
107
108 external_file argument is a path to a single file containing a list of tests
109 to exclude in the form of <test dir>/<test name>.
110
111 examples:
112  check xfs/001
113  check -g quick
114  check -g xfs/quick
115  check -x stress xfs/*
116  check -X .exclude -g auto
117  check -E ~/.xfstests.exclude
118 '
119             exit 1
120 }
121
122 get_sub_group_list()
123 {
124         local d=$1
125         local grp=$2
126
127         test -s "$SRC_DIR/$d/group.list" || return 1
128
129         local grpl=$(sed -n < $SRC_DIR/$d/group.list \
130                 -e 's/#.*//' \
131                 -e 's/$/ /' \
132                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
133         echo $grpl
134 }
135
136 get_group_list()
137 {
138         local grp=$1
139         local grpl=""
140         local sub=$(dirname $grp)
141         local fsgroup="$FSTYP"
142
143         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
144                 # group is given as <subdir>/<group> (e.g. xfs/quick)
145                 grp=$(basename $grp)
146                 get_sub_group_list $sub $grp
147                 return
148         fi
149
150         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
151             fsgroup=ext4
152         fi
153         for d in $SRC_GROUPS $fsgroup; do
154                 if ! test -d "$SRC_DIR/$d" ; then
155                         continue
156                 fi
157                 grpl="$grpl $(get_sub_group_list $d $grp)"
158         done
159         echo $grpl
160 }
161
162 # Find all tests, excluding files that are test metadata such as group files.
163 # It matches test names against $VALID_TEST_NAME defined in common/rc
164 get_all_tests()
165 {
166         touch $tmp.list
167         for d in $SRC_GROUPS $FSTYP; do
168                 if ! test -d "$SRC_DIR/$d" ; then
169                         continue
170                 fi
171                 ls $SRC_DIR/$d/* | \
172                         grep -v "\..*" | \
173                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
174                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
175         done
176 }
177
178 # takes the list of tests to run in $tmp.list, and removes the tests passed to
179 # the function from that list.
180 trim_test_list()
181 {
182         test_list="$*"
183
184         rm -f $tmp.grep
185         numsed=0
186         for t in $test_list
187         do
188             if [ $numsed -gt 100 ]; then
189                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
190                 mv $tmp.tmp $tmp.list
191                 numsed=0
192                 rm -f $tmp.grep
193             fi
194             echo "^$t\$" >>$tmp.grep
195             numsed=`expr $numsed + 1`
196         done
197         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
198         mv $tmp.tmp $tmp.list
199         rm -f $tmp.grep
200 }
201
202
203 _wallclock()
204 {
205     date "+%s"
206 }
207
208 _timestamp()
209 {
210     now=`date "+%T"`
211     echo -n " [$now]"
212 }
213
214 _prepare_test_list()
215 {
216         unset list
217         # Tests specified on the command line
218         if [ -s $tmp.arglist ]; then
219                 cat $tmp.arglist > $tmp.list
220         else
221                 touch $tmp.list
222         fi
223
224         # Specified groups to include
225         # Note that the CLI processing adds a leading space to the first group
226         # parameter, so we have to catch that here checking for "all"
227         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
228                 # no test numbers, do everything
229                 get_all_tests
230         else
231                 for group in $GROUP_LIST; do
232                         list=$(get_group_list $group)
233                         if [ -z "$list" ]; then
234                                 echo "Group \"$group\" is empty or not defined?"
235                                 exit 1
236                         fi
237
238                         for t in $list; do
239                                 grep -s "^$t\$" $tmp.list >/dev/null || \
240                                                         echo "$t" >>$tmp.list
241                         done
242                 done
243         fi
244
245         # Specified groups to exclude
246         for xgroup in $XGROUP_LIST; do
247                 list=$(get_group_list $xgroup)
248                 if [ -z "$list" ]; then
249                         echo "Group \"$xgroup\" is empty or not defined?"
250                         continue
251                 fi
252
253                 trim_test_list $list
254         done
255
256         # sort the list of tests into numeric order unless we're running tests
257         # in the exact order specified
258         if ! $exact_order; then
259                 if $randomize; then
260                         if type shuf >& /dev/null; then
261                                 sorter="shuf"
262                         else
263                                 sorter="awk -v seed=$RANDOM -f randomize.awk"
264                         fi
265                 else
266                         sorter="cat"
267                 fi
268                 list=`sort -n $tmp.list | uniq | $sorter`
269         else
270                 list=`cat $tmp.list`
271         fi
272         rm -f $tmp.list
273 }
274
275 # Process command arguments first.
276 while [ $# -gt 0 ]; do
277         case "$1" in
278         -\? | -h | --help) usage ;;
279
280         -nfs)           FSTYP=nfs ;;
281         -glusterfs)     FSTYP=glusterfs ;;
282         -cifs)          FSTYP=cifs ;;
283         -9p)            FSTYP=9p ;;
284         -virtiofs)      FSTYP=virtiofs ;;
285         -overlay)       FSTYP=overlay; export OVERLAY=true ;;
286         -pvfs2)         FSTYP=pvfs2 ;;
287         -tmpfs)         FSTYP=tmpfs ;;
288         -ubifs)         FSTYP=ubifs ;;
289
290         -g)     group=$2 ; shift ;
291                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
292                 ;;
293
294         -x)     xgroup=$2 ; shift ;
295                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
296                 ;;
297
298         -X)     subdir_xfile=$2; shift ;
299                 ;;
300         -e)
301                 xfile=$2; shift ;
302                 echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
303                 ;;
304
305         -E)     xfile=$2; shift ;
306                 if [ -f $xfile ]; then
307                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
308                 fi
309                 ;;
310         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
311         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
312         -l)     diff="diff" ;;
313         -udiff) diff="$diff -u" ;;
314
315         -n)     showme=true ;;
316         -r)
317                 if $exact_order; then
318                         echo "Cannot specify -r and --exact-order."
319                         exit 1
320                 fi
321                 randomize=true
322                 ;;
323         --exact-order)
324                 if $randomize; then
325                         echo "Cannnot specify --exact-order and -r."
326                         exit 1
327                 fi
328                 exact_order=true
329                 ;;
330         -i)     iterations=$2; shift ;;
331         -I)     iterations=$2; istop=true; shift ;;
332         -T)     timestamp=true ;;
333         -d)     DUMP_OUTPUT=true ;;
334         -b)     brief_test_summary=true;;
335         -R)     report_fmt=$2 ; shift ;
336                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
337                 do_report=true
338                 ;;
339         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
340         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
341
342         -*)     usage ;;
343         *)      # not an argument, we've got tests now.
344                 have_test_arg=true ;;
345         esac
346
347         # if we've found a test specification, the break out of the processing
348         # loop before we shift the arguments so that this is the first argument
349         # that we process in the test arg loop below.
350         if $have_test_arg; then
351                 break;
352         fi
353
354         shift
355 done
356
357 # we need common/rc, that also sources common/config. We need to source it
358 # after processing args, overlay needs FSTYP set before sourcing common/config
359 if ! . ./common/rc; then
360         echo "check: failed to source common/rc"
361         exit 1
362 fi
363
364 if [ -n "$subdir_xfile" ]; then
365         for d in $SRC_GROUPS $FSTYP; do
366                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
367                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
368                         echo $d/$f >> $tmp.xlist
369                 done
370         done
371 fi
372
373 # Process tests from command line now.
374 if $have_test_arg; then
375         while [ $# -gt 0 ]; do
376                 case "$1" in
377                 -*)     echo "Arguments before tests, please!"
378                         status=1
379                         exit $status
380                         ;;
381                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
382                         list=$(cd $SRC_DIR; echo $1)
383                         for t in $list; do
384                                 test_dir=`dirname $t`
385                                 test_dir=${test_dir#$SRC_DIR/*}
386                                 test_name=`basename $t`
387                                 group_file=$SRC_DIR/$test_dir/group.list
388
389                                 if egrep -q "^$test_name" $group_file; then
390                                         # in group file ... OK
391                                         echo $SRC_DIR/$test_dir/$test_name \
392                                                 >>$tmp.arglist
393                                 else
394                                         # oops
395                                         echo "$t - unknown test, ignored"
396                                 fi
397                         done
398                         ;;
399                 esac
400
401                 shift
402         done
403 elif [ -z "$GROUP_LIST" ]; then
404         # default group list is the auto group. If any other group or test is
405         # specified, we use that instead.
406         GROUP_LIST="auto"
407 fi
408
409 if [ `id -u` -ne 0 ]
410 then
411     echo "check: QA must be run as root"
412     exit 1
413 fi
414
415 _wipe_counters()
416 {
417         n_try="0"
418         n_bad="0"
419         n_notrun="0"
420         unset try notrun bad
421 }
422
423 _global_log() {
424         echo "$1" >> $check.log
425         if $OPTIONS_HAVE_SECTIONS; then
426                 echo "$1" >> ${REPORT_DIR}/check.log
427         fi
428 }
429
430 _wrapup()
431 {
432         seq="check"
433         check="$RESULT_BASE/check"
434
435         if $showme; then
436                 if $needwrap; then
437                         if $do_report; then
438                                 _make_section_report
439                         fi
440                         needwrap=false
441                 fi
442         elif $needwrap; then
443                 if [ -f $check.time -a -f $tmp.time ]; then
444                         cat $check.time $tmp.time  \
445                                 | $AWK_PROG '
446                                 { t[$1] = $2 }
447                                 END {
448                                         if (NR > 0) {
449                                                 for (i in t) print i " " t[i]
450                                         }
451                                 }' \
452                                 | sort -n >$tmp.out
453                         mv $tmp.out $check.time
454                         if $OPTIONS_HAVE_SECTIONS; then
455                                 cp $check.time ${REPORT_DIR}/check.time
456                         fi
457                 fi
458
459                 _global_log ""
460                 _global_log "$(date)"
461
462                 echo "SECTION       -- $section" >>$tmp.summary
463                 echo "=========================" >>$tmp.summary
464                 if [ ! -z "$n_try" -a $n_try != 0 ]; then
465                         if [ $brief_test_summary == "false" ]; then
466                                 echo "Ran:$try"
467                                 echo "Ran:$try" >>$tmp.summary
468                         fi
469                         _global_log "Ran:$try"
470                 fi
471
472                 $interrupt && echo "Interrupted!" | tee -a $check.log
473                 if $OPTIONS_HAVE_SECTIONS; then
474                         $interrupt && echo "Interrupted!" | tee -a \
475                                 ${REPORT_DIR}/check.log
476                 fi
477
478                 if [ ! -z "$notrun" ]; then
479                         if [ $brief_test_summary == "false" ]; then
480                                 echo "Not run:$notrun"
481                                 echo "Not run:$notrun" >>$tmp.summary
482                         fi
483                         _global_log "Not run:$notrun"
484                 fi
485
486                 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
487                         echo "Failures:$bad"
488                         echo "Failed $n_bad of $n_try tests"
489                         _global_log "Failures:$bad"
490                         _global_log "Failed $n_bad of $n_try tests"
491                         echo "Failures:$bad" >>$tmp.summary
492                         echo "Failed $n_bad of $n_try tests" >>$tmp.summary
493                 else
494                         echo "Passed all $n_try tests"
495                         _global_log "Passed all $n_try tests"
496                         echo "Passed all $n_try tests" >>$tmp.summary
497                 fi
498                 echo "" >>$tmp.summary
499                 if $do_report; then
500                         _make_section_report
501                 fi
502                 needwrap=false
503         fi
504
505         sum_bad=`expr $sum_bad + $n_bad`
506         _wipe_counters
507         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
508         if ! $OPTIONS_HAVE_SECTIONS; then
509                 rm -f $tmp.*
510         fi
511 }
512
513 _summary()
514 {
515         _wrapup
516         if $showme; then
517                 :
518         elif $needsum; then
519                 count=`wc -L $tmp.summary | cut -f1 -d" "`
520                 cat $tmp.summary
521                 needsum=false
522         fi
523         rm -f $tmp.*
524 }
525
526 _check_filesystems()
527 {
528         local ret=0
529
530         if [ -f ${RESULT_DIR}/require_test ]; then
531                 _check_test_fs || ret=1
532                 rm -f ${RESULT_DIR}/require_test*
533         else
534                 _test_unmount 2> /dev/null
535         fi
536         if [ -f ${RESULT_DIR}/require_scratch ]; then
537                 _check_scratch_fs || ret=1
538                 rm -f ${RESULT_DIR}/require_scratch*
539         fi
540         _scratch_unmount 2> /dev/null
541         return $ret
542 }
543
544 _expunge_test()
545 {
546         local TEST_ID="$1"
547         if [ -s $tmp.xlist ]; then
548                 if grep -q $TEST_ID $tmp.xlist; then
549                         echo "       [expunged]"
550                         return 1
551                 fi
552         fi
553         return 0
554 }
555
556 # Can we run systemd scopes?
557 HAVE_SYSTEMD_SCOPES=
558 systemctl reset-failed "fstests-check" &>/dev/null
559 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
560 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
561
562 # Make the check script unattractive to the OOM killer...
563 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
564 function _adjust_oom_score() {
565         test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
566 }
567 _adjust_oom_score -500
568
569 # ...and make the tests themselves somewhat more attractive to it, so that if
570 # the system runs out of memory it'll be the test that gets killed and not the
571 # test framework.  The test is run in a separate process without any of our
572 # functions, so we open-code adjusting the OOM score.
573 #
574 # If systemd is available, run the entire test script in a scope so that we can
575 # kill all subprocesses of the test if it fails to clean up after itself.  This
576 # is essential for ensuring that the post-test unmount succeeds.  Note that
577 # systemd doesn't automatically remove transient scopes that fail to terminate
578 # when systemd tells them to terminate (e.g. programs stuck in D state when
579 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
580 _run_seq() {
581         local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
582
583         if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
584                 local unit="$(systemd-escape "fs$seq").scope"
585                 systemctl reset-failed "${unit}" &> /dev/null
586                 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
587                 res=$?
588                 systemctl stop "${unit}" &> /dev/null
589                 return "${res}"
590         else
591                 "${cmd[@]}"
592         fi
593 }
594
595 _detect_kmemleak
596 _prepare_test_list
597
598 if $OPTIONS_HAVE_SECTIONS; then
599         trap "_summary; exit \$status" 0 1 2 3 15
600 else
601         trap "_wrapup; exit \$status" 0 1 2 3 15
602 fi
603
604 function run_section()
605 {
606         local section=$1
607
608         OLD_FSTYP=$FSTYP
609         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
610         get_next_config $section
611
612         # Do we need to run only some sections ?
613         if [ ! -z "$RUN_SECTION" ]; then
614                 skip=true
615                 for s in $RUN_SECTION; do
616                         if [ $section == $s ]; then
617                                 skip=false
618                                 break;
619                         fi
620                 done
621                 if $skip; then
622                         return
623                 fi
624         fi
625
626         # Did this section get excluded?
627         if [ ! -z "$EXCLUDE_SECTION" ]; then
628                 skip=false
629                 for s in $EXCLUDE_SECTION; do
630                         if [ $section == $s ]; then
631                                 skip=true
632                                 break;
633                         fi
634                 done
635                 if $skip; then
636                         return
637                 fi
638         fi
639
640         mkdir -p $RESULT_BASE
641         if [ ! -d $RESULT_BASE ]; then
642                 echo "failed to create results directory $RESULT_BASE"
643                 status=1
644                 exit
645         fi
646
647         if $OPTIONS_HAVE_SECTIONS; then
648                 echo "SECTION       -- $section"
649         fi
650
651         sect_start=`_wallclock`
652         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
653                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
654                 _test_unmount 2> /dev/null
655                 if ! _test_mkfs >$tmp.err 2>&1
656                 then
657                         echo "our local _test_mkfs routine ..."
658                         cat $tmp.err
659                         echo "check: failed to mkfs \$TEST_DEV using specified options"
660                         status=1
661                         exit
662                 fi
663                 if ! _test_mount
664                 then
665                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
666                         status=1
667                         exit
668                 fi
669                 # TEST_DEV has been recreated, previous FSTYP derived from
670                 # TEST_DEV could be changed, source common/rc again with
671                 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
672                 . common/rc
673                 _prepare_test_list
674         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
675                 _test_unmount 2> /dev/null
676                 if ! _test_mount
677                 then
678                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
679                         status=1
680                         exit
681                 fi
682         fi
683
684         init_rc
685
686         seq="check"
687         check="$RESULT_BASE/check"
688
689         # don't leave old full output behind on a clean run
690         rm -f $check.full
691
692         [ -f $check.time ] || touch $check.time
693
694         # print out our test configuration
695         echo "FSTYP         -- `_full_fstyp_details`"
696         echo "PLATFORM      -- `_full_platform_details`"
697         if [ ! -z "$SCRATCH_DEV" ]; then
698           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
699           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
700         fi
701         echo
702         needwrap=true
703
704         if [ ! -z "$SCRATCH_DEV" ]; then
705           _scratch_unmount 2> /dev/null
706           # call the overridden mkfs - make sure the FS is built
707           # the same as we'll create it later.
708
709           if ! _scratch_mkfs >$tmp.err 2>&1
710           then
711               echo "our local _scratch_mkfs routine ..."
712               cat $tmp.err
713               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
714               status=1
715               exit
716           fi
717
718           # call the overridden mount - make sure the FS mounts with
719           # the same options that we'll mount with later.
720           if ! _try_scratch_mount >$tmp.err 2>&1
721           then
722               echo "our local mount routine ..."
723               cat $tmp.err
724               echo "check: failed to mount \$SCRATCH_DEV using specified options"
725               status=1
726               exit
727           else
728               _scratch_unmount
729           fi
730         fi
731
732         seqres="$check"
733         _check_test_fs
734
735         err=false
736         first_test=true
737         prev_seq=""
738         for seq in $list ; do
739                 # Run report for previous test!
740                 if $err ; then
741                         bad="$bad $seqnum"
742                         n_bad=`expr $n_bad + 1`
743                         tc_status="fail"
744                 fi
745                 if $do_report && ! $first_test ; then
746                         if [ $tc_status != "expunge" ] ; then
747                                 _make_testcase_report "$prev_seq" "$tc_status"
748                         fi
749                 fi
750                 first_test=false
751
752                 err=false
753                 prev_seq="$seq"
754                 if [ ! -f $seq ]; then
755                         # Try to get full name in case the user supplied only
756                         # seq id and the test has a name. A bit of hassle to
757                         # find really the test and not its sample output or
758                         # helping files.
759                         bname=$(basename $seq)
760                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
761                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
762                                      END { print shortest }')
763                         if [ -f $full_seq ] && \
764                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
765                                 seq=$full_seq
766                         fi
767                 fi
768
769                 # the filename for the test and the name output are different.
770                 # we don't include the tests/ directory in the name output.
771                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
772
773                 # Similarly, the result directory needs to replace the tests/
774                 # part of the test location.
775                 group=`dirname $seq`
776                 if $OPTIONS_HAVE_SECTIONS; then
777                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
778                         REPORT_DIR="$RESULT_BASE/$section"
779                 else
780                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
781                         REPORT_DIR="$RESULT_BASE"
782                 fi
783                 seqres="$REPORT_DIR/$seqnum"
784
785                 mkdir -p $RESULT_DIR
786                 rm -f ${RESULT_DIR}/require_scratch*
787                 rm -f ${RESULT_DIR}/require_test*
788                 echo -n "$seqnum"
789
790                 if $showme; then
791                         _expunge_test $seqnum
792                         if [ $? -eq 1 ]; then
793                             tc_status="expunge"
794                             continue
795                         fi
796                         echo
797                         start=0
798                         stop=0
799                         tc_status="list"
800                         n_notrun=`expr $n_notrun + 1`
801                         continue
802                 fi
803
804                 tc_status="pass"
805                 if [ ! -f $seq ]; then
806                         echo " - no such test?"
807                         continue
808                 fi
809
810                 # really going to try and run this one
811                 rm -f $seqres.out.bad
812
813                 # check if we really should run it
814                 _expunge_test $seqnum
815                 if [ $? -eq 1 ]; then
816                         tc_status="expunge"
817                         continue
818                 fi
819
820                 # record that we really tried to run this test.
821                 try="$try $seqnum"
822                 n_try=`expr $n_try + 1`
823
824                 # slashes now in names, sed barfs on them so use grep
825                 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
826                 if [ "X$lasttime" != X ]; then
827                         echo -n " ${lasttime}s ... "
828                 else
829                         echo -n "       " # prettier output with timestamps.
830                 fi
831                 rm -f core $seqres.notrun
832
833                 start=`_wallclock`
834                 $timestamp && echo -n " ["`date "+%T"`"]"
835                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
836                 $LOGGER_PROG "run xfstest $seqnum"
837                 if [ -w /dev/kmsg ]; then
838                         export date_time=`date +"%F %T"`
839                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
840                         # _check_dmesg depends on this log in dmesg
841                         touch ${RESULT_DIR}/check_dmesg
842                 fi
843                 _try_wipe_scratch_devs > /dev/null 2>&1
844
845                 # clear the WARN_ONCE state to allow a potential problem
846                 # to be reported for each test
847                 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
848
849                 if [ "$DUMP_OUTPUT" = true ]; then
850                         _run_seq 2>&1 | tee $tmp.out
851                         # Because $? would get tee's return code
852                         sts=${PIPESTATUS[0]}
853                 else
854                         _run_seq >$tmp.out 2>&1
855                         sts=$?
856                 fi
857
858                 if [ -f core ]; then
859                         _dump_err_cont "[dumped core]"
860                         mv core $RESULT_BASE/$seqnum.core
861                         err=true
862                 fi
863
864                 if [ -f $seqres.notrun ]; then
865                         $timestamp && _timestamp
866                         stop=`_wallclock`
867                         $timestamp || echo -n "[not run] "
868                         $timestamp && echo " [not run]" && \
869                                       echo -n " $seqnum -- "
870                         cat $seqres.notrun
871                         notrun="$notrun $seqnum"
872                         n_notrun=`expr $n_notrun + 1`
873                         tc_status="notrun"
874
875                         # Unmount the scratch fs so that we can wipe the scratch
876                         # dev state prior to the next test run.
877                         _scratch_unmount 2> /dev/null
878                         continue;
879                 fi
880
881                 if [ $sts -ne 0 ]; then
882                         _dump_err_cont "[failed, exit status $sts]"
883                         _test_unmount 2> /dev/null
884                         _scratch_unmount 2> /dev/null
885                         rm -f ${RESULT_DIR}/require_test*
886                         rm -f ${RESULT_DIR}/require_scratch*
887                         err=true
888                 else
889                         # The test apparently passed, so check for corruption
890                         # and log messages that shouldn't be there.  Run the
891                         # checking tools from a subshell with adjusted OOM
892                         # score so that the OOM killer will target them instead
893                         # of the check script itself.
894                         (_adjust_oom_score 250; _check_filesystems) || err=true
895                         _check_dmesg || err=true
896                 fi
897
898                 # Reload the module after each test to check for leaks or
899                 # other problems.
900                 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
901                         _test_unmount 2> /dev/null
902                         _scratch_unmount 2> /dev/null
903                         modprobe -r fs-$FSTYP
904                         modprobe fs-$FSTYP
905                 fi
906
907                 # Scan for memory leaks after every test so that associating
908                 # a leak to a particular test will be as accurate as possible.
909                 _check_kmemleak || err=true
910
911                 # test ends after all checks are done.
912                 $timestamp && _timestamp
913                 stop=`_wallclock`
914
915                 if [ ! -f $seq.out ]; then
916                         _dump_err "no qualified output"
917                         err=true
918                         continue;
919                 fi
920
921                 # coreutils 8.16+ changed quote formats in error messages
922                 # from `foo' to 'foo'. Filter old versions to match the new
923                 # version.
924                 sed -i "s/\`/\'/g" $tmp.out
925                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
926                         if ! $err ; then
927                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
928                                 echo -n " `expr $stop - $start`s"
929                         fi
930                         echo ""
931                 else
932                         _dump_err "- output mismatch (see $seqres.out.bad)"
933                         mv $tmp.out $seqres.out.bad
934                         $diff $seq.out $seqres.out.bad | {
935                         if test "$DIFF_LENGTH" -le 0; then
936                                 cat
937                         else
938                                 head -n "$DIFF_LENGTH"
939                                 echo "..."
940                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
941                                         " to see the entire diff)"
942                         fi; } | sed -e 's/^\(.\)/    \1/'
943                         err=true
944                 fi
945         done
946
947         # make sure we record the status of the last test we ran.
948         if $err ; then
949                 bad="$bad $seqnum"
950                 n_bad=`expr $n_bad + 1`
951                 tc_status="fail"
952         fi
953         if $do_report && ! $first_test ; then
954                 if [ $tc_status != "expunge" ] ; then
955                         _make_testcase_report "$prev_seq" "$tc_status"
956                 fi
957         fi
958
959         sect_stop=`_wallclock`
960         interrupt=false
961         _wrapup
962         interrupt=true
963         echo
964
965         _test_unmount 2> /dev/null
966         _scratch_unmount 2> /dev/null
967 }
968
969 for ((iters = 0; iters < $iterations; iters++)) do
970         for section in $HOST_OPTIONS_SECTIONS; do
971                 run_section $section
972                 if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
973                         interrupt=false
974                         status=`expr $sum_bad != 0`
975                         exit
976                 fi
977         done
978 done
979
980 interrupt=false
981 status=`expr $sum_bad != 0`
982 exit