common/xfs: Add a helper to get an inode fork's extent count
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 n_try=0
12 try=""
13 n_bad=0
14 sum_bad=0
15 bad=""
16 n_notrun=0
17 notrun=""
18 interrupt=true
19 diff="diff -u"
20 showme=false
21 have_test_arg=false
22 randomize=false
23 exact_order=false
24 export here=`pwd`
25 xfile=""
26 subdir_xfile=""
27 brief_test_summary=false
28 do_report=false
29 DUMP_OUTPUT=false
30 iterations=1
31 istop=false
32
33 # This is a global variable used to pass test failure text to reporting gunk
34 _err_msg=""
35
36 # start the initialisation work now
37 iam=check
38
39 export MSGVERB="text:action"
40 export QA_CHECK_FS=${QA_CHECK_FS:=true}
41
42 # number of diff lines from a failed test, 0 for whole output
43 export DIFF_LENGTH=${DIFF_LENGTH:=10}
44
45 # by default don't output timestamps
46 timestamp=${TIMESTAMP:=false}
47
48 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
49
50 SRC_GROUPS="generic shared"
51 export SRC_DIR="tests"
52
53 usage()
54 {
55     echo "Usage: $0 [options] [testlist]"'
56
57 check options
58     -nfs                test NFS
59     -glusterfs          test GlusterFS
60     -cifs               test CIFS
61     -9p                 test 9p
62     -virtiofs           test virtiofs
63     -overlay            test overlay
64     -pvfs2              test PVFS2
65     -tmpfs              test TMPFS
66     -ubifs              test ubifs
67     -l                  line mode diff
68     -udiff              show unified diff (default)
69     -n                  show me, do not run tests
70     -T                  output timestamps
71     -r                  randomize test order
72     --exact-order       run tests in the exact order specified
73     -i <n>              iterate the test list <n> times
74     -I <n>              iterate the test list <n> times, but stops iterating further in case of any test failure
75     -d                  dump test output to stdout
76     -b                  brief test summary
77     -R fmt[,fmt]        generate report in formats specified. Supported format: [xunit]
78     --large-fs          optimise scratch device for large filesystems
79     -s section          run only specified section from config file
80     -S section          exclude the specified section from the config file
81
82 testlist options
83     -g group[,group...] include tests from these groups
84     -x group[,group...] exclude tests from these groups
85     -X exclude_file     exclude individual tests
86     -e testlist         exclude a specific list of tests
87     -E external_file    exclude individual tests
88     [testlist]          include tests matching names in testlist
89
90 testlist argument is a list of tests in the form of <test dir>/<test name>.
91
92 <test dir> is a directory under tests that contains a group file,
93 with a list of the names of the tests in that directory.
94
95 <test name> may be either a specific test file name (e.g. xfs/001) or
96 a test file name match pattern (e.g. xfs/*).
97
98 group argument is either a name of a tests group to collect from all
99 the test dirs (e.g. quick) or a name of a tests group to collect from
100 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
101 If you want to run all the tests in the test suite, use "-g all" to specify all
102 groups.
103
104 exclude_file argument refers to a name of a file inside each test directory.
105 for every test dir where this file is found, the listed test names are
106 excluded from the list of tests to run from that test dir.
107
108 external_file argument is a path to a single file containing a list of tests
109 to exclude in the form of <test dir>/<test name>.
110
111 examples:
112  check xfs/001
113  check -g quick
114  check -g xfs/quick
115  check -x stress xfs/*
116  check -X .exclude -g auto
117  check -E ~/.xfstests.exclude
118 '
119             exit 0
120 }
121
122 get_sub_group_list()
123 {
124         local d=$1
125         local grp=$2
126
127         test -s "$SRC_DIR/$d/group" || return 1
128
129         local grpl=$(sed -n < $SRC_DIR/$d/group \
130                 -e 's/#.*//' \
131                 -e 's/$/ /' \
132                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
133         echo $grpl
134 }
135
136 get_group_list()
137 {
138         local grp=$1
139         local grpl=""
140         local sub=$(dirname $grp)
141         local fsgroup="$FSTYP"
142
143         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
144                 # group is given as <subdir>/<group> (e.g. xfs/quick)
145                 grp=$(basename $grp)
146                 get_sub_group_list $sub $grp
147                 return
148         fi
149
150         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
151             fsgroup=ext4
152         fi
153         for d in $SRC_GROUPS $fsgroup; do
154                 if ! test -d "$SRC_DIR/$d" ; then
155                         continue
156                 fi
157                 grpl="$grpl $(get_sub_group_list $d $grp)"
158         done
159         echo $grpl
160 }
161
162 # Find all tests, excluding files that are test metadata such as group files.
163 # It matches test names against $VALID_TEST_NAME defined in common/rc
164 get_all_tests()
165 {
166         touch $tmp.list
167         for d in $SRC_GROUPS $FSTYP; do
168                 if ! test -d "$SRC_DIR/$d" ; then
169                         continue
170                 fi
171                 ls $SRC_DIR/$d/* | \
172                         grep -v "\..*" | \
173                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
174                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
175         done
176 }
177
178 # takes the list of tests to run in $tmp.list, and removes the tests passed to
179 # the function from that list.
180 trim_test_list()
181 {
182         test_list="$*"
183
184         rm -f $tmp.grep
185         numsed=0
186         for t in $test_list
187         do
188             if [ $numsed -gt 100 ]; then
189                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
190                 mv $tmp.tmp $tmp.list
191                 numsed=0
192                 rm -f $tmp.grep
193             fi
194             echo "^$t\$" >>$tmp.grep
195             numsed=`expr $numsed + 1`
196         done
197         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
198         mv $tmp.tmp $tmp.list
199         rm -f $tmp.grep
200 }
201
202
203 _wallclock()
204 {
205     date "+%s"
206 }
207
208 _timestamp()
209 {
210     now=`date "+%T"`
211     echo -n " [$now]"
212 }
213
214 _prepare_test_list()
215 {
216         unset list
217         # Tests specified on the command line
218         if [ -s $tmp.arglist ]; then
219                 cat $tmp.arglist > $tmp.list
220         else
221                 touch $tmp.list
222         fi
223
224         # Specified groups to include
225         # Note that the CLI processing adds a leading space to the first group
226         # parameter, so we have to catch that here checking for "all"
227         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
228                 # no test numbers, do everything
229                 get_all_tests
230         else
231                 for group in $GROUP_LIST; do
232                         list=$(get_group_list $group)
233                         if [ -z "$list" ]; then
234                                 echo "Group \"$group\" is empty or not defined?"
235                                 exit 1
236                         fi
237
238                         for t in $list; do
239                                 grep -s "^$t\$" $tmp.list >/dev/null || \
240                                                         echo "$t" >>$tmp.list
241                         done
242                 done
243         fi
244
245         # Specified groups to exclude
246         for xgroup in $XGROUP_LIST; do
247                 list=$(get_group_list $xgroup)
248                 if [ -z "$list" ]; then
249                         echo "Group \"$xgroup\" is empty or not defined?"
250                         continue
251                 fi
252
253                 trim_test_list $list
254         done
255
256         # sort the list of tests into numeric order unless we're running tests
257         # in the exact order specified
258         if ! $exact_order; then
259                 if $randomize; then
260                         if type shuf >& /dev/null; then
261                                 sorter="shuf"
262                         else
263                                 sorter="awk -v seed=$RANDOM -f randomize.awk"
264                         fi
265                 else
266                         sorter="cat"
267                 fi
268                 list=`sort -n $tmp.list | uniq | $sorter`
269         else
270                 list=`cat $tmp.list`
271         fi
272         rm -f $tmp.list
273 }
274
275 # Process command arguments first.
276 while [ $# -gt 0 ]; do
277         case "$1" in
278         -\? | -h | --help) usage ;;
279
280         -nfs)           FSTYP=nfs ;;
281         -glusterfs)     FSTYP=glusterfs ;;
282         -cifs)          FSTYP=cifs ;;
283         -9p)            FSTYP=9p ;;
284         -virtiofs)      FSTYP=virtiofs ;;
285         -overlay)       FSTYP=overlay; export OVERLAY=true ;;
286         -pvfs2)         FSTYP=pvfs2 ;;
287         -tmpfs)         FSTYP=tmpfs ;;
288         -ubifs)         FSTYP=ubifs ;;
289
290         -g)     group=$2 ; shift ;
291                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
292                 ;;
293
294         -x)     xgroup=$2 ; shift ;
295                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
296                 ;;
297
298         -X)     subdir_xfile=$2; shift ;
299                 ;;
300         -e)
301                 xfile=$2; shift ;
302                 echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
303                 ;;
304
305         -E)     xfile=$2; shift ;
306                 if [ -f $xfile ]; then
307                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
308                 fi
309                 ;;
310         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
311         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
312         -l)     diff="diff" ;;
313         -udiff) diff="$diff -u" ;;
314
315         -n)     showme=true ;;
316         -r)
317                 if $exact_order; then
318                         echo "Cannot specify -r and --exact-order."
319                         exit 1
320                 fi
321                 randomize=true
322                 ;;
323         --exact-order)
324                 if $randomize; then
325                         echo "Cannnot specify --exact-order and -r."
326                         exit 1
327                 fi
328                 exact_order=true
329                 ;;
330         -i)     iterations=$2; shift ;;
331         -I)     iterations=$2; istop=true; shift ;;
332         -T)     timestamp=true ;;
333         -d)     DUMP_OUTPUT=true ;;
334         -b)     brief_test_summary=true;;
335         -R)     report_fmt=$2 ; shift ;
336                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
337                 do_report=true
338                 ;;
339         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
340         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
341
342         -*)     usage ;;
343         *)      # not an argument, we've got tests now.
344                 have_test_arg=true ;;
345         esac
346
347         # if we've found a test specification, the break out of the processing
348         # loop before we shift the arguments so that this is the first argument
349         # that we process in the test arg loop below.
350         if $have_test_arg; then
351                 break;
352         fi
353
354         shift
355 done
356
357 # we need common/rc, that also sources common/config. We need to source it
358 # after processing args, overlay needs FSTYP set before sourcing common/config
359 if ! . ./common/rc; then
360         echo "check: failed to source common/rc"
361         exit 1
362 fi
363
364 if [ -n "$subdir_xfile" ]; then
365         for d in $SRC_GROUPS $FSTYP; do
366                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
367                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
368                         echo $d/$f >> $tmp.xlist
369                 done
370         done
371 fi
372
373 # Process tests from command line now.
374 if $have_test_arg; then
375         while [ $# -gt 0 ]; do
376                 case "$1" in
377                 -*)     echo "Arguments before tests, please!"
378                         status=1
379                         exit $status
380                         ;;
381                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
382                         list=$(cd $SRC_DIR; echo $1)
383                         for t in $list; do
384                                 test_dir=`dirname $t`
385                                 test_dir=${test_dir#$SRC_DIR/*}
386                                 test_name=`basename $t`
387                                 group_file=$SRC_DIR/$test_dir/group
388
389                                 if egrep -q "^$test_name" $group_file; then
390                                         # in group file ... OK
391                                         echo $SRC_DIR/$test_dir/$test_name \
392                                                 >>$tmp.arglist
393                                 else
394                                         # oops
395                                         echo "$t - unknown test, ignored"
396                                 fi
397                         done
398                         ;;
399                 esac
400
401                 shift
402         done
403 elif [ -z "$GROUP_LIST" ]; then
404         # default group list is the auto group. If any other group or test is
405         # specified, we use that instead.
406         GROUP_LIST="auto"
407 fi
408
409 if [ `id -u` -ne 0 ]
410 then
411     echo "check: QA must be run as root"
412     exit 1
413 fi
414
415 _wipe_counters()
416 {
417         n_try="0"
418         n_bad="0"
419         n_notrun="0"
420         unset try notrun bad
421 }
422
423 _global_log() {
424         echo "$1" >> $check.log
425         if $OPTIONS_HAVE_SECTIONS; then
426                 echo "$1" >> ${REPORT_DIR}/check.log
427         fi
428 }
429
430 _wrapup()
431 {
432         seq="check"
433         check="$RESULT_BASE/check"
434
435         if $showme; then
436                 if $needwrap; then
437                         if $do_report; then
438                                 _make_section_report
439                         fi
440                         needwrap=false
441                 fi
442         elif $needwrap; then
443                 if [ -f $check.time -a -f $tmp.time ]; then
444                         cat $check.time $tmp.time  \
445                                 | $AWK_PROG '
446                                 { t[$1] = $2 }
447                                 END {
448                                         if (NR > 0) {
449                                                 for (i in t) print i " " t[i]
450                                         }
451                                 }' \
452                                 | sort -n >$tmp.out
453                         mv $tmp.out $check.time
454                         if $OPTIONS_HAVE_SECTIONS; then
455                                 cp $check.time ${REPORT_DIR}/check.time
456                         fi
457                 fi
458
459                 _global_log ""
460                 _global_log "$(date)"
461
462                 echo "SECTION       -- $section" >>$tmp.summary
463                 echo "=========================" >>$tmp.summary
464                 if [ ! -z "$n_try" -a $n_try != 0 ]; then
465                         if [ $brief_test_summary == "false" ]; then
466                                 echo "Ran:$try"
467                                 echo "Ran:$try" >>$tmp.summary
468                         fi
469                         _global_log "Ran:$try"
470                 fi
471
472                 $interrupt && echo "Interrupted!" | tee -a $check.log
473                 if $OPTIONS_HAVE_SECTIONS; then
474                         $interrupt && echo "Interrupted!" | tee -a \
475                                 ${REPORT_DIR}/check.log
476                 fi
477
478                 if [ ! -z "$notrun" ]; then
479                         if [ $brief_test_summary == "false" ]; then
480                                 echo "Not run:$notrun"
481                                 echo "Not run:$notrun" >>$tmp.summary
482                         fi
483                         _global_log "Not run:$notrun"
484                 fi
485
486                 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
487                         echo "Failures:$bad"
488                         echo "Failed $n_bad of $n_try tests"
489                         _global_log "Failures:$bad"
490                         _global_log "Failed $n_bad of $n_try tests"
491                         echo "Failures:$bad" >>$tmp.summary
492                         echo "Failed $n_bad of $n_try tests" >>$tmp.summary
493                 else
494                         echo "Passed all $n_try tests"
495                         _global_log "Passed all $n_try tests"
496                         echo "Passed all $n_try tests" >>$tmp.summary
497                 fi
498                 echo "" >>$tmp.summary
499                 if $do_report; then
500                         _make_section_report
501                 fi
502                 needwrap=false
503         fi
504
505         sum_bad=`expr $sum_bad + $n_bad`
506         _wipe_counters
507         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
508         if ! $OPTIONS_HAVE_SECTIONS; then
509                 rm -f $tmp.*
510         fi
511 }
512
513 _summary()
514 {
515         _wrapup
516         if $showme; then
517                 :
518         elif $needsum; then
519                 count=`wc -L $tmp.summary | cut -f1 -d" "`
520                 cat $tmp.summary
521                 needsum=false
522         fi
523         rm -f $tmp.*
524 }
525
526 _check_filesystems()
527 {
528         if [ -f ${RESULT_DIR}/require_test ]; then
529                 _check_test_fs || err=true
530                 rm -f ${RESULT_DIR}/require_test*
531         else
532                 _test_unmount 2> /dev/null
533         fi
534         if [ -f ${RESULT_DIR}/require_scratch ]; then
535                 _check_scratch_fs || err=true
536                 rm -f ${RESULT_DIR}/require_scratch*
537         fi
538         _scratch_unmount 2> /dev/null
539 }
540
541 _expunge_test()
542 {
543         local TEST_ID="$1"
544         if [ -s $tmp.xlist ]; then
545                 if grep -q $TEST_ID $tmp.xlist; then
546                         echo "       [expunged]"
547                         return 1
548                 fi
549         fi
550         return 0
551 }
552
553 # Can we run systemd scopes?
554 HAVE_SYSTEMD_SCOPES=
555 systemctl reset-failed "fstests-check" &>/dev/null
556 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
557 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
558
559 # Make the check script unattractive to the OOM killer...
560 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
561 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
562
563 # ...and make the tests themselves somewhat more attractive to it, so that if
564 # the system runs out of memory it'll be the test that gets killed and not the
565 # test framework.
566 #
567 # If systemd is available, run the entire test script in a scope so that we can
568 # kill all subprocesses of the test if it fails to clean up after itself.  This
569 # is essential for ensuring that the post-test unmount succeeds.  Note that
570 # systemd doesn't automatically remove transient scopes that fail to terminate
571 # when systemd tells them to terminate (e.g. programs stuck in D state when
572 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
573 _run_seq() {
574         local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
575
576         if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
577                 local unit="$(systemd-escape "fs$seq").scope"
578                 systemctl reset-failed "${unit}" &> /dev/null
579                 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
580                 res=$?
581                 systemctl stop "${unit}" &> /dev/null
582                 return "${res}"
583         else
584                 "${cmd[@]}"
585         fi
586 }
587
588 _detect_kmemleak
589 _prepare_test_list
590
591 if $OPTIONS_HAVE_SECTIONS; then
592         trap "_summary; exit \$status" 0 1 2 3 15
593 else
594         trap "_wrapup; exit \$status" 0 1 2 3 15
595 fi
596
597 function run_section()
598 {
599         local section=$1
600
601         OLD_FSTYP=$FSTYP
602         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
603         get_next_config $section
604
605         # Do we need to run only some sections ?
606         if [ ! -z "$RUN_SECTION" ]; then
607                 skip=true
608                 for s in $RUN_SECTION; do
609                         if [ $section == $s ]; then
610                                 skip=false
611                                 break;
612                         fi
613                 done
614                 if $skip; then
615                         return
616                 fi
617         fi
618
619         # Did this section get excluded?
620         if [ ! -z "$EXCLUDE_SECTION" ]; then
621                 skip=false
622                 for s in $EXCLUDE_SECTION; do
623                         if [ $section == $s ]; then
624                                 skip=true
625                                 break;
626                         fi
627                 done
628                 if $skip; then
629                         return
630                 fi
631         fi
632
633         mkdir -p $RESULT_BASE
634         if [ ! -d $RESULT_BASE ]; then
635                 echo "failed to create results directory $RESULT_BASE"
636                 status=1
637                 exit
638         fi
639
640         if $OPTIONS_HAVE_SECTIONS; then
641                 echo "SECTION       -- $section"
642         fi
643
644         sect_start=`_wallclock`
645         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
646                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
647                 _test_unmount 2> /dev/null
648                 if ! _test_mkfs >$tmp.err 2>&1
649                 then
650                         echo "our local _test_mkfs routine ..."
651                         cat $tmp.err
652                         echo "check: failed to mkfs \$TEST_DEV using specified options"
653                         status=1
654                         exit
655                 fi
656                 if ! _test_mount
657                 then
658                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
659                         status=1
660                         exit
661                 fi
662                 # TEST_DEV has been recreated, previous FSTYP derived from
663                 # TEST_DEV could be changed, source common/rc again with
664                 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
665                 . common/rc
666                 _prepare_test_list
667         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
668                 _test_unmount 2> /dev/null
669                 if ! _test_mount
670                 then
671                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
672                         status=1
673                         exit
674                 fi
675         fi
676
677         init_rc
678
679         seq="check"
680         check="$RESULT_BASE/check"
681
682         # don't leave old full output behind on a clean run
683         rm -f $check.full
684
685         [ -f $check.time ] || touch $check.time
686
687         # print out our test configuration
688         echo "FSTYP         -- `_full_fstyp_details`"
689         echo "PLATFORM      -- `_full_platform_details`"
690         if [ ! -z "$SCRATCH_DEV" ]; then
691           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
692           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
693         fi
694         echo
695         needwrap=true
696
697         if [ ! -z "$SCRATCH_DEV" ]; then
698           _scratch_unmount 2> /dev/null
699           # call the overridden mkfs - make sure the FS is built
700           # the same as we'll create it later.
701
702           if ! _scratch_mkfs >$tmp.err 2>&1
703           then
704               echo "our local _scratch_mkfs routine ..."
705               cat $tmp.err
706               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
707               status=1
708               exit
709           fi
710
711           # call the overridden mount - make sure the FS mounts with
712           # the same options that we'll mount with later.
713           if ! _try_scratch_mount >$tmp.err 2>&1
714           then
715               echo "our local mount routine ..."
716               cat $tmp.err
717               echo "check: failed to mount \$SCRATCH_DEV using specified options"
718               status=1
719               exit
720           else
721               _scratch_unmount
722           fi
723         fi
724
725         seqres="$check"
726         _check_test_fs
727
728         err=false
729         first_test=true
730         prev_seq=""
731         for seq in $list ; do
732                 # Run report for previous test!
733                 if $err ; then
734                         bad="$bad $seqnum"
735                         n_bad=`expr $n_bad + 1`
736                         tc_status="fail"
737                 fi
738                 if $do_report && ! $first_test ; then
739                         if [ $tc_status != "expunge" ] ; then
740                                 _make_testcase_report "$prev_seq" "$tc_status"
741                         fi
742                 fi
743                 first_test=false
744
745                 err=false
746                 prev_seq="$seq"
747                 if [ ! -f $seq ]; then
748                         # Try to get full name in case the user supplied only
749                         # seq id and the test has a name. A bit of hassle to
750                         # find really the test and not its sample output or
751                         # helping files.
752                         bname=$(basename $seq)
753                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
754                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
755                                      END { print shortest }')
756                         if [ -f $full_seq ] && \
757                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
758                                 seq=$full_seq
759                         fi
760                 fi
761
762                 # the filename for the test and the name output are different.
763                 # we don't include the tests/ directory in the name output.
764                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
765
766                 # Similarly, the result directory needs to replace the tests/
767                 # part of the test location.
768                 group=`dirname $seq`
769                 if $OPTIONS_HAVE_SECTIONS; then
770                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
771                         REPORT_DIR="$RESULT_BASE/$section"
772                 else
773                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
774                         REPORT_DIR="$RESULT_BASE"
775                 fi
776                 seqres="$REPORT_DIR/$seqnum"
777
778                 mkdir -p $RESULT_DIR
779                 rm -f ${RESULT_DIR}/require_scratch*
780                 rm -f ${RESULT_DIR}/require_test*
781                 echo -n "$seqnum"
782
783                 if $showme; then
784                         _expunge_test $seqnum
785                         if [ $? -eq 1 ]; then
786                             tc_status="expunge"
787                             continue
788                         fi
789                         echo
790                         start=0
791                         stop=0
792                         tc_status="list"
793                         n_notrun=`expr $n_notrun + 1`
794                         continue
795                 fi
796
797                 tc_status="pass"
798                 if [ ! -f $seq ]; then
799                         echo " - no such test?"
800                         continue
801                 fi
802
803                 # really going to try and run this one
804                 rm -f $seqres.out.bad
805
806                 # check if we really should run it
807                 _expunge_test $seqnum
808                 if [ $? -eq 1 ]; then
809                         tc_status="expunge"
810                         continue
811                 fi
812
813                 # record that we really tried to run this test.
814                 try="$try $seqnum"
815                 n_try=`expr $n_try + 1`
816
817                 # slashes now in names, sed barfs on them so use grep
818                 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
819                 if [ "X$lasttime" != X ]; then
820                         echo -n " ${lasttime}s ... "
821                 else
822                         echo -n "       " # prettier output with timestamps.
823                 fi
824                 rm -f core $seqres.notrun
825
826                 start=`_wallclock`
827                 $timestamp && echo -n " ["`date "+%T"`"]"
828                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
829                 $LOGGER_PROG "run xfstest $seqnum"
830                 if [ -w /dev/kmsg ]; then
831                         export date_time=`date +"%F %T"`
832                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
833                         # _check_dmesg depends on this log in dmesg
834                         touch ${RESULT_DIR}/check_dmesg
835                 fi
836                 _try_wipe_scratch_devs > /dev/null 2>&1
837
838                 # clear the WARN_ONCE state to allow a potential problem
839                 # to be reported for each test
840                 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
841
842                 if [ "$DUMP_OUTPUT" = true ]; then
843                         _run_seq 2>&1 | tee $tmp.out
844                         # Because $? would get tee's return code
845                         sts=${PIPESTATUS[0]}
846                 else
847                         _run_seq >$tmp.out 2>&1
848                         sts=$?
849                 fi
850
851                 if [ -f core ]; then
852                         _dump_err_cont "[dumped core]"
853                         mv core $RESULT_BASE/$seqnum.core
854                         err=true
855                 fi
856
857                 if [ -f $seqres.notrun ]; then
858                         $timestamp && _timestamp
859                         stop=`_wallclock`
860                         $timestamp || echo -n "[not run] "
861                         $timestamp && echo " [not run]" && \
862                                       echo -n " $seqnum -- "
863                         cat $seqres.notrun
864                         notrun="$notrun $seqnum"
865                         n_notrun=`expr $n_notrun + 1`
866                         tc_status="notrun"
867                         continue;
868                 fi
869
870                 if [ $sts -ne 0 ]; then
871                         _dump_err_cont "[failed, exit status $sts]"
872                         _test_unmount 2> /dev/null
873                         _scratch_unmount 2> /dev/null
874                         rm -f ${RESULT_DIR}/require_test*
875                         rm -f ${RESULT_DIR}/require_scratch*
876                         err=true
877                 else
878                         # the test apparently passed, so check for corruption
879                         # and log messages that shouldn't be there.
880                         _check_filesystems
881                         _check_dmesg || err=true
882                 fi
883
884                 # Reload the module after each test to check for leaks or
885                 # other problems.
886                 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
887                         _test_unmount 2> /dev/null
888                         _scratch_unmount 2> /dev/null
889                         modprobe -r fs-$FSTYP
890                         modprobe fs-$FSTYP
891                 fi
892
893                 # Scan for memory leaks after every test so that associating
894                 # a leak to a particular test will be as accurate as possible.
895                 _check_kmemleak || err=true
896
897                 # test ends after all checks are done.
898                 $timestamp && _timestamp
899                 stop=`_wallclock`
900
901                 if [ ! -f $seq.out ]; then
902                         _dump_err "no qualified output"
903                         err=true
904                         continue;
905                 fi
906
907                 # coreutils 8.16+ changed quote formats in error messages
908                 # from `foo' to 'foo'. Filter old versions to match the new
909                 # version.
910                 sed -i "s/\`/\'/g" $tmp.out
911                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
912                         if ! $err ; then
913                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
914                                 echo -n " `expr $stop - $start`s"
915                         fi
916                         echo ""
917                 else
918                         _dump_err "- output mismatch (see $seqres.out.bad)"
919                         mv $tmp.out $seqres.out.bad
920                         $diff $seq.out $seqres.out.bad | {
921                         if test "$DIFF_LENGTH" -le 0; then
922                                 cat
923                         else
924                                 head -n "$DIFF_LENGTH"
925                                 echo "..."
926                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
927                                         " to see the entire diff)"
928                         fi; } | sed -e 's/^\(.\)/    \1/'
929                         err=true
930                 fi
931         done
932
933         # make sure we record the status of the last test we ran.
934         if $err ; then
935                 bad="$bad $seqnum"
936                 n_bad=`expr $n_bad + 1`
937                 tc_status="fail"
938         fi
939         if $do_report && ! $first_test ; then
940                 if [ $tc_status != "expunge" ] ; then
941                         _make_testcase_report "$prev_seq" "$tc_status"
942                 fi
943         fi
944
945         sect_stop=`_wallclock`
946         interrupt=false
947         _wrapup
948         interrupt=true
949         echo
950
951         _test_unmount 2> /dev/null
952         _scratch_unmount 2> /dev/null
953 }
954
955 for ((iters = 0; iters < $iterations; iters++)) do
956         for section in $HOST_OPTIONS_SECTIONS; do
957                 run_section $section
958                 if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
959                         interrupt=false
960                         status=`expr $sum_bad != 0`
961                         exit
962                 fi
963         done
964 done
965
966 interrupt=false
967 status=`expr $sum_bad != 0`
968 exit