generic: test MADV_POPULATE_READ with IO errors
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 try=()
12 sum_bad=0
13 bad=()
14 notrun=()
15 interrupt=true
16 diff="diff -u"
17 showme=false
18 have_test_arg=false
19 randomize=false
20 exact_order=false
21 export here=`pwd`
22 xfile=""
23 subdir_xfile=""
24 brief_test_summary=false
25 do_report=false
26 DUMP_OUTPUT=false
27 iterations=1
28 istop=false
29 loop_on_fail=0
30
31 # This is a global variable used to pass test failure text to reporting gunk
32 _err_msg=""
33
34 # start the initialisation work now
35 iam=check
36
37 # mkfs.xfs uses the presence of both of these variables to enable formerly
38 # supported tiny filesystem configurations that fstests use for fuzz testing
39 # in a controlled environment
40 export MSGVERB="text:action"
41 export QA_CHECK_FS=${QA_CHECK_FS:=true}
42
43 # number of diff lines from a failed test, 0 for whole output
44 export DIFF_LENGTH=${DIFF_LENGTH:=10}
45
46 # by default don't output timestamps
47 timestamp=${TIMESTAMP:=false}
48
49 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
50
51 SRC_GROUPS="generic shared"
52 export SRC_DIR="tests"
53
54 usage()
55 {
56     echo "Usage: $0 [options] [testlist]"'
57
58 check options
59     -nfs                test NFS
60     -glusterfs          test GlusterFS
61     -cifs               test CIFS
62     -9p                 test 9p
63     -virtiofs           test virtiofs
64     -overlay            test overlay
65     -pvfs2              test PVFS2
66     -tmpfs              test TMPFS
67     -ubifs              test ubifs
68     -l                  line mode diff
69     -udiff              show unified diff (default)
70     -n                  show me, do not run tests
71     -T                  output timestamps
72     -r                  randomize test order
73     --exact-order       run tests in the exact order specified
74     -i <n>              iterate the test list <n> times
75     -I <n>              iterate the test list <n> times, but stops iterating further in case of any test failure
76     -d                  dump test output to stdout
77     -b                  brief test summary
78     -R fmt[,fmt]        generate report in formats specified. Supported formats: xunit, xunit-quiet
79     --large-fs          optimise scratch device for large filesystems
80     -s section          run only specified section from config file
81     -S section          exclude the specified section from the config file
82     -L <n>              loop tests <n> times following a failure, measuring aggregate pass/fail metrics
83
84 testlist options
85     -g group[,group...] include tests from these groups
86     -x group[,group...] exclude tests from these groups
87     -X exclude_file     exclude individual tests
88     -e testlist         exclude a specific list of tests
89     -E external_file    exclude individual tests
90     [testlist]          include tests matching names in testlist
91
92 testlist argument is a list of tests in the form of <test dir>/<test name>.
93
94 <test dir> is a directory under tests that contains a group file,
95 with a list of the names of the tests in that directory.
96
97 <test name> may be either a specific test file name (e.g. xfs/001) or
98 a test file name match pattern (e.g. xfs/*).
99
100 group argument is either a name of a tests group to collect from all
101 the test dirs (e.g. quick) or a name of a tests group to collect from
102 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
103 If you want to run all the tests in the test suite, use "-g all" to specify all
104 groups.
105
106 exclude_file argument refers to a name of a file inside each test directory.
107 for every test dir where this file is found, the listed test names are
108 excluded from the list of tests to run from that test dir.
109
110 external_file argument is a path to a single file containing a list of tests
111 to exclude in the form of <test dir>/<test name>.
112
113 examples:
114  check xfs/001
115  check -g quick
116  check -g xfs/quick
117  check -x stress xfs/*
118  check -X .exclude -g auto
119  check -E ~/.xfstests.exclude
120 '
121             exit 1
122 }
123
124 get_sub_group_list()
125 {
126         local d=$1
127         local grp=$2
128
129         test -s "$SRC_DIR/$d/group.list" || return 1
130
131         local grpl=$(sed -n < $SRC_DIR/$d/group.list \
132                 -e 's/#.*//' \
133                 -e 's/$/ /' \
134                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
135         echo $grpl
136 }
137
138 get_group_list()
139 {
140         local grp=$1
141         local grpl=""
142         local sub=$(dirname $grp)
143         local fsgroup="$FSTYP"
144
145         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
146                 # group is given as <subdir>/<group> (e.g. xfs/quick)
147                 grp=$(basename $grp)
148                 get_sub_group_list $sub $grp
149                 return
150         fi
151
152         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
153             fsgroup=ext4
154         fi
155         for d in $SRC_GROUPS $fsgroup; do
156                 if ! test -d "$SRC_DIR/$d" ; then
157                         continue
158                 fi
159                 grpl="$grpl $(get_sub_group_list $d $grp)"
160         done
161         echo $grpl
162 }
163
164 # Find all tests, excluding files that are test metadata such as group files.
165 # It matches test names against $VALID_TEST_NAME defined in common/rc
166 get_all_tests()
167 {
168         touch $tmp.list
169         for d in $SRC_GROUPS $FSTYP; do
170                 if ! test -d "$SRC_DIR/$d" ; then
171                         continue
172                 fi
173                 ls $SRC_DIR/$d/* | \
174                         grep -v "\..*" | \
175                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
176                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
177         done
178 }
179
180 # takes the list of tests to run in $tmp.list, and removes the tests passed to
181 # the function from that list.
182 trim_test_list()
183 {
184         local test_list="$*"
185
186         rm -f $tmp.grep
187         local numsed=0
188         for t in $test_list
189         do
190             if [ $numsed -gt 100 ]; then
191                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
192                 mv $tmp.tmp $tmp.list
193                 numsed=0
194                 rm -f $tmp.grep
195             fi
196             echo "^$t\$" >>$tmp.grep
197             numsed=`expr $numsed + 1`
198         done
199         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
200         mv $tmp.tmp $tmp.list
201         rm -f $tmp.grep
202 }
203
204
205 _wallclock()
206 {
207     date "+%s"
208 }
209
210 _timestamp()
211 {
212     local now=`date "+%T"`
213     echo -n " [$now]"
214 }
215
216 _prepare_test_list()
217 {
218         unset list
219         # Tests specified on the command line
220         if [ -s $tmp.arglist ]; then
221                 cat $tmp.arglist > $tmp.list
222         else
223                 touch $tmp.list
224         fi
225
226         # Specified groups to include
227         # Note that the CLI processing adds a leading space to the first group
228         # parameter, so we have to catch that here checking for "all"
229         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
230                 # no test numbers, do everything
231                 get_all_tests
232         else
233                 for group in $GROUP_LIST; do
234                         list=$(get_group_list $group)
235                         if [ -z "$list" ]; then
236                                 echo "Group \"$group\" is empty or not defined?"
237                                 exit 1
238                         fi
239
240                         for t in $list; do
241                                 grep -s "^$t\$" $tmp.list >/dev/null || \
242                                                         echo "$t" >>$tmp.list
243                         done
244                 done
245         fi
246
247         # Specified groups to exclude
248         for xgroup in $XGROUP_LIST; do
249                 list=$(get_group_list $xgroup)
250                 if [ -z "$list" ]; then
251                         echo "Group \"$xgroup\" is empty or not defined?"
252                         continue
253                 fi
254
255                 trim_test_list $list
256         done
257
258         # sort the list of tests into numeric order unless we're running tests
259         # in the exact order specified
260         if ! $exact_order; then
261                 if $randomize; then
262                         if type shuf >& /dev/null; then
263                                 sorter="shuf"
264                         else
265                                 sorter="awk -v seed=$RANDOM -f randomize.awk"
266                         fi
267                 else
268                         sorter="cat"
269                 fi
270                 list=`sort -n $tmp.list | uniq | $sorter`
271         else
272                 list=`cat $tmp.list`
273         fi
274         rm -f $tmp.list
275 }
276
277 # Process command arguments first.
278 while [ $# -gt 0 ]; do
279         case "$1" in
280         -\? | -h | --help) usage ;;
281
282         -nfs|-glusterfs|-cifs|-9p|-virtiofs|-pvfs2|-tmpfs|-ubifs)
283                 FSTYP="${1:1}"
284                 ;;
285         -overlay)
286                 FSTYP=overlay
287                 export OVERLAY=true
288                 ;;
289
290         -g)     group=$2 ; shift ;
291                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
292                 ;;
293
294         -x)     xgroup=$2 ; shift ;
295                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
296                 ;;
297
298         -X)     subdir_xfile=$2; shift ;
299                 ;;
300         -e)
301                 xfile=$2; shift ;
302                 echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
303                 ;;
304
305         -E)     xfile=$2; shift ;
306                 if [ -f $xfile ]; then
307                         sed "s/#.*$//" "$xfile" >> $tmp.xlist
308                 fi
309                 ;;
310         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
311         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
312         -l)     diff="diff" ;;
313         -udiff) diff="$diff -u" ;;
314
315         -n)     showme=true ;;
316         -r)
317                 if $exact_order; then
318                         echo "Cannot specify -r and --exact-order."
319                         exit 1
320                 fi
321                 randomize=true
322                 ;;
323         --exact-order)
324                 if $randomize; then
325                         echo "Cannnot specify --exact-order and -r."
326                         exit 1
327                 fi
328                 exact_order=true
329                 ;;
330         -i)     iterations=$2; shift ;;
331         -I)     iterations=$2; istop=true; shift ;;
332         -T)     timestamp=true ;;
333         -d)     DUMP_OUTPUT=true ;;
334         -b)     brief_test_summary=true;;
335         -R)     report_fmt=$2 ; shift ;
336                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
337                 do_report=true
338                 ;;
339         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
340         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
341         -L)     [[ $2 =~ ^[0-9]+$ ]] || usage
342                 loop_on_fail=$2; shift
343                 ;;
344
345         -*)     usage ;;
346         *)      # not an argument, we've got tests now.
347                 have_test_arg=true ;;
348         esac
349
350         # if we've found a test specification, the break out of the processing
351         # loop before we shift the arguments so that this is the first argument
352         # that we process in the test arg loop below.
353         if $have_test_arg; then
354                 break;
355         fi
356
357         shift
358 done
359
360 # we need common/rc, that also sources common/config. We need to source it
361 # after processing args, overlay needs FSTYP set before sourcing common/config
362 if ! . ./common/rc; then
363         echo "check: failed to source common/rc"
364         exit 1
365 fi
366
367 if [ -n "$subdir_xfile" ]; then
368         for d in $SRC_GROUPS $FSTYP; do
369                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
370                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
371                         echo $d/$f >> $tmp.xlist
372                 done
373         done
374 fi
375
376 # Process tests from command line now.
377 if $have_test_arg; then
378         while [ $# -gt 0 ]; do
379                 case "$1" in
380                 -*)     echo "Arguments before tests, please!"
381                         status=1
382                         exit $status
383                         ;;
384                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
385                         list=$(cd $SRC_DIR; echo $1)
386                         for t in $list; do
387                                 test_dir=`dirname $t`
388                                 test_dir=${test_dir#$SRC_DIR/*}
389                                 test_name=`basename $t`
390                                 group_file=$SRC_DIR/$test_dir/group.list
391
392                                 if egrep -q "^$test_name" $group_file; then
393                                         # in group file ... OK
394                                         echo $SRC_DIR/$test_dir/$test_name \
395                                                 >>$tmp.arglist
396                                 else
397                                         # oops
398                                         echo "$t - unknown test, ignored"
399                                 fi
400                         done
401                         ;;
402                 esac
403
404                 shift
405         done
406 elif [ -z "$GROUP_LIST" ]; then
407         # default group list is the auto group. If any other group or test is
408         # specified, we use that instead.
409         GROUP_LIST="auto"
410 fi
411
412 if [ `id -u` -ne 0 ]
413 then
414     echo "check: QA must be run as root"
415     exit 1
416 fi
417
418 _wipe_counters()
419 {
420         try=()
421         notrun=()
422         bad=()
423 }
424
425 _global_log() {
426         echo "$1" >> $check.log
427         if $OPTIONS_HAVE_SECTIONS; then
428                 echo "$1" >> ${REPORT_DIR}/check.log
429         fi
430 }
431
432 _wrapup()
433 {
434         seq="check"
435         check="$RESULT_BASE/check"
436
437         if $showme && $needwrap; then
438                 if $do_report; then
439                         # $showme = all selected tests are notrun (no tries)
440                         _make_section_report "$section" "${#notrun[*]}" "0" \
441                                              "${#notrun[*]}" \
442                                              "$((sect_stop - sect_start))"
443                 fi
444                 needwrap=false
445         elif $needwrap; then
446                 if [ -f $check.time -a -f $tmp.time ]; then
447                         cat $check.time $tmp.time  \
448                                 | $AWK_PROG '
449                                 { t[$1] = $2 }
450                                 END {
451                                         if (NR > 0) {
452                                                 for (i in t) print i " " t[i]
453                                         }
454                                 }' \
455                                 | sort -n >$tmp.out
456                         mv $tmp.out $check.time
457                         if $OPTIONS_HAVE_SECTIONS; then
458                                 cp $check.time ${REPORT_DIR}/check.time
459                         fi
460                 fi
461
462                 _global_log ""
463                 _global_log "$(date)"
464
465                 echo "SECTION       -- $section" >>$tmp.summary
466                 echo "=========================" >>$tmp.summary
467                 if ((${#try[*]} > 0)); then
468                         if [ $brief_test_summary == "false" ]; then
469                                 echo "Ran: ${try[*]}"
470                                 echo "Ran: ${try[*]}" >>$tmp.summary
471                         fi
472                         _global_log "Ran: ${try[*]}"
473                 fi
474
475                 $interrupt && echo "Interrupted!" | tee -a $check.log
476                 if $OPTIONS_HAVE_SECTIONS; then
477                         $interrupt && echo "Interrupted!" | tee -a \
478                                 ${REPORT_DIR}/check.log
479                 fi
480
481                 if ((${#notrun[*]} > 0)); then
482                         if [ $brief_test_summary == "false" ]; then
483                                 echo "Not run: ${notrun[*]}"
484                                 echo "Not run: ${notrun[*]}" >>$tmp.summary
485                         fi
486                         _global_log "Not run: ${notrun[*]}"
487                 fi
488
489                 if ((${#bad[*]} > 0)); then
490                         echo "Failures: ${bad[*]}"
491                         echo "Failed ${#bad[*]} of ${#try[*]} tests"
492                         _global_log "Failures: ${bad[*]}"
493                         _global_log "Failed ${#bad[*]} of ${#try[*]} tests"
494                         echo "Failures: ${bad[*]}" >>$tmp.summary
495                         echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary
496                 else
497                         echo "Passed all ${#try[*]} tests"
498                         _global_log "Passed all ${#try[*]} tests"
499                         echo "Passed all ${#try[*]} tests" >>$tmp.summary
500                 fi
501                 echo "" >>$tmp.summary
502                 if $do_report; then
503                         _make_section_report "$section" "${#try[*]}" \
504                                              "${#bad[*]}" "${#notrun[*]}" \
505                                              "$((sect_stop - sect_start))"
506                 fi
507                 needwrap=false
508         fi
509
510         sum_bad=`expr $sum_bad + ${#bad[*]}`
511         _wipe_counters
512         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
513         if ! $OPTIONS_HAVE_SECTIONS; then
514                 rm -f $tmp.*
515         fi
516 }
517
518 _summary()
519 {
520         _wrapup
521         if $showme; then
522                 :
523         elif $needsum; then
524                 count=`wc -L $tmp.summary | cut -f1 -d" "`
525                 cat $tmp.summary
526                 needsum=false
527         fi
528         rm -f $tmp.*
529 }
530
531 _check_filesystems()
532 {
533         local ret=0
534
535         if [ -f ${RESULT_DIR}/require_test ]; then
536                 _check_test_fs || ret=1
537                 rm -f ${RESULT_DIR}/require_test*
538         else
539                 _test_unmount 2> /dev/null
540         fi
541         if [ -f ${RESULT_DIR}/require_scratch ]; then
542                 _check_scratch_fs || ret=1
543                 rm -f ${RESULT_DIR}/require_scratch*
544         fi
545         _scratch_unmount 2> /dev/null
546         return $ret
547 }
548
549 _expunge_test()
550 {
551         local TEST_ID="$1"
552         if [ -s $tmp.xlist ]; then
553                 if grep -q $TEST_ID $tmp.xlist; then
554                         echo "       [expunged]"
555                         return 1
556                 fi
557         fi
558         return 0
559 }
560
561 # retain files which would be overwritten in subsequent reruns of the same test
562 _stash_fail_loop_files() {
563         local seq_prefix="${REPORT_DIR}/${1}"
564         local cp_suffix="$2"
565
566         for i in ".full" ".dmesg" ".out.bad" ".notrun" ".core" ".hints"; do
567                 rm -f "${seq_prefix}${i}${cp_suffix}"
568                 if [ -f "${seq_prefix}${i}" ]; then
569                         cp "${seq_prefix}${i}" "${seq_prefix}${i}${cp_suffix}"
570                 fi
571         done
572 }
573
574 # Retain in @bad / @notrun the result of the just-run @test_seq. @try array
575 # entries are added prior to execution.
576 _stash_test_status() {
577         local test_seq="$1"
578         local test_status="$2"
579
580         if $do_report && [[ $test_status != "expunge" ]]; then
581                 _make_testcase_report "$section" "$test_seq" \
582                                       "$test_status" "$((stop - start))"
583         fi
584
585         if ((${#loop_status[*]} > 0)); then
586                 # continuing or completing rerun-on-failure loop
587                 _stash_fail_loop_files "$test_seq" ".rerun${#loop_status[*]}"
588                 loop_status+=("$test_status")
589                 if ((${#loop_status[*]} > loop_on_fail)); then
590                         printf "%s aggregate results across %d runs: " \
591                                 "$test_seq" "${#loop_status[*]}"
592                         awk "BEGIN {
593                                 n=split(\"${loop_status[*]}\", arr);"'
594                                 for (i = 1; i <= n; i++)
595                                         stats[arr[i]]++;
596                                 for (x in stats)
597                                         printf("%s=%d (%.1f%%)",
598                                                (i-- > n ? x : ", " x),
599                                                stats[x], 100 * stats[x] / n);
600                                 }'
601                         echo
602                         loop_status=()
603                 fi
604                 return  # only stash @bad result for initial failure in loop
605         fi
606
607         case "$test_status" in
608         fail)
609                 if ((loop_on_fail > 0)); then
610                         # initial failure, start rerun-on-failure loop
611                         _stash_fail_loop_files "$test_seq" ".rerun0"
612                         loop_status+=("$test_status")
613                 fi
614                 bad+=("$test_seq")
615                 ;;
616         list|notrun)
617                 notrun+=("$test_seq")
618                 ;;
619         pass|expunge)
620                 ;;
621         *)
622                 echo "Unexpected test $test_seq status: $test_status"
623                 ;;
624         esac
625 }
626
627 # Can we run systemd scopes?
628 HAVE_SYSTEMD_SCOPES=
629 systemctl reset-failed "fstests-check" &>/dev/null
630 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
631 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
632
633 # Make the check script unattractive to the OOM killer...
634 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
635 function _adjust_oom_score() {
636         test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
637 }
638 _adjust_oom_score -500
639
640 # ...and make the tests themselves somewhat more attractive to it, so that if
641 # the system runs out of memory it'll be the test that gets killed and not the
642 # test framework.  The test is run in a separate process without any of our
643 # functions, so we open-code adjusting the OOM score.
644 #
645 # If systemd is available, run the entire test script in a scope so that we can
646 # kill all subprocesses of the test if it fails to clean up after itself.  This
647 # is essential for ensuring that the post-test unmount succeeds.  Note that
648 # systemd doesn't automatically remove transient scopes that fail to terminate
649 # when systemd tells them to terminate (e.g. programs stuck in D state when
650 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
651 _run_seq() {
652         local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
653
654         if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
655                 local unit="$(systemd-escape "fs$seq").scope"
656                 systemctl reset-failed "${unit}" &> /dev/null
657                 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
658                 res=$?
659                 systemctl stop "${unit}" &> /dev/null
660                 return "${res}"
661         else
662                 "${cmd[@]}"
663         fi
664 }
665
666 _detect_kmemleak
667 _prepare_test_list
668
669 if $OPTIONS_HAVE_SECTIONS; then
670         trap "_summary; exit \$status" 0 1 2 3 15
671 else
672         trap "_wrapup; exit \$status" 0 1 2 3 15
673 fi
674
675 function run_section()
676 {
677         local section=$1 skip
678
679         OLD_FSTYP=$FSTYP
680         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
681         get_next_config $section
682
683         # Do we need to run only some sections ?
684         if [ ! -z "$RUN_SECTION" ]; then
685                 skip=true
686                 for s in $RUN_SECTION; do
687                         if [ $section == $s ]; then
688                                 skip=false
689                                 break;
690                         fi
691                 done
692                 if $skip; then
693                         return
694                 fi
695         fi
696
697         # Did this section get excluded?
698         if [ ! -z "$EXCLUDE_SECTION" ]; then
699                 skip=false
700                 for s in $EXCLUDE_SECTION; do
701                         if [ $section == $s ]; then
702                                 skip=true
703                                 break;
704                         fi
705                 done
706                 if $skip; then
707                         return
708                 fi
709         fi
710
711         mkdir -p $RESULT_BASE
712         if [ ! -d $RESULT_BASE ]; then
713                 echo "failed to create results directory $RESULT_BASE"
714                 status=1
715                 exit
716         fi
717
718         if $OPTIONS_HAVE_SECTIONS; then
719                 echo "SECTION       -- $section"
720         fi
721
722         sect_start=`_wallclock`
723         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
724                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
725                 _test_unmount 2> /dev/null
726                 if ! _test_mkfs >$tmp.err 2>&1
727                 then
728                         echo "our local _test_mkfs routine ..."
729                         cat $tmp.err
730                         echo "check: failed to mkfs \$TEST_DEV using specified options"
731                         status=1
732                         exit
733                 fi
734                 if ! _test_mount
735                 then
736                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
737                         status=1
738                         exit
739                 fi
740                 # TEST_DEV has been recreated, previous FSTYP derived from
741                 # TEST_DEV could be changed, source common/rc again with
742                 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
743                 . common/rc
744                 _prepare_test_list
745         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
746                 _test_unmount 2> /dev/null
747                 if ! _test_mount
748                 then
749                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
750                         status=1
751                         exit
752                 fi
753         fi
754
755         init_rc
756
757         seq="check"
758         check="$RESULT_BASE/check"
759
760         # don't leave old full output behind on a clean run
761         rm -f $check.full
762
763         [ -f $check.time ] || touch $check.time
764
765         # print out our test configuration
766         echo "FSTYP         -- `_full_fstyp_details`"
767         echo "PLATFORM      -- `_full_platform_details`"
768         if [ ! -z "$SCRATCH_DEV" ]; then
769           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
770           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
771         fi
772         echo
773         needwrap=true
774
775         if [ ! -z "$SCRATCH_DEV" ]; then
776           _scratch_unmount 2> /dev/null
777           # call the overridden mkfs - make sure the FS is built
778           # the same as we'll create it later.
779
780           if ! _scratch_mkfs >$tmp.err 2>&1
781           then
782               echo "our local _scratch_mkfs routine ..."
783               cat $tmp.err
784               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
785               status=1
786               exit
787           fi
788
789           # call the overridden mount - make sure the FS mounts with
790           # the same options that we'll mount with later.
791           if ! _try_scratch_mount >$tmp.err 2>&1
792           then
793               echo "our local mount routine ..."
794               cat $tmp.err
795               echo "check: failed to mount \$SCRATCH_DEV using specified options"
796               status=1
797               exit
798           else
799               _scratch_unmount
800           fi
801         fi
802
803         seqres="$check"
804         _check_test_fs
805
806         loop_status=()  # track rerun-on-failure state
807         local tc_status ix
808         local -a _list=( $list )
809         for ((ix = 0; ix < ${#_list[*]}; !${#loop_status[*]} && ix++)); do
810                 seq="${_list[$ix]}"
811
812                 if [ ! -f $seq ]; then
813                         # Try to get full name in case the user supplied only
814                         # seq id and the test has a name. A bit of hassle to
815                         # find really the test and not its sample output or
816                         # helping files.
817                         bname=$(basename $seq)
818                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
819                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
820                                      END { print shortest }')
821                         if [ -f $full_seq ] && \
822                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
823                                 seq=$full_seq
824                         fi
825                 fi
826
827                 # the filename for the test and the name output are different.
828                 # we don't include the tests/ directory in the name output.
829                 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
830
831                 # Similarly, the result directory needs to replace the tests/
832                 # part of the test location.
833                 group=`dirname $seq`
834                 if $OPTIONS_HAVE_SECTIONS; then
835                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
836                         REPORT_DIR="$RESULT_BASE/$section"
837                 else
838                         export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
839                         REPORT_DIR="$RESULT_BASE"
840                 fi
841                 seqres="$REPORT_DIR/$seqnum"
842
843                 mkdir -p $RESULT_DIR
844                 rm -f ${RESULT_DIR}/require_scratch*
845                 rm -f ${RESULT_DIR}/require_test*
846                 echo -n "$seqnum"
847
848                 if $showme; then
849                         _expunge_test $seqnum
850                         if [ $? -eq 1 ]; then
851                                 tc_status="expunge"
852                         else
853                                 echo
854                                 start=0
855                                 stop=0
856                                 tc_status="list"
857                         fi
858                         _stash_test_status "$seqnum" "$tc_status"
859                         continue
860                 fi
861
862                 tc_status="pass"
863                 if [ ! -f $seq ]; then
864                         echo " - no such test?"
865                         _stash_test_status "$seqnum" "$tc_status"
866                         continue
867                 fi
868
869                 # really going to try and run this one
870                 rm -f $seqres.out.bad $seqres.hints
871
872                 # check if we really should run it
873                 _expunge_test $seqnum
874                 if [ $? -eq 1 ]; then
875                         tc_status="expunge"
876                         _stash_test_status "$seqnum" "$tc_status"
877                         continue
878                 fi
879
880                 # record that we really tried to run this test.
881                 if ((!${#loop_status[*]})); then
882                         try+=("$seqnum")
883                 fi
884
885                 awk 'BEGIN {lasttime="       "} \
886                      $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \
887                      END {printf "%s", lasttime}' "$check.time"
888                 rm -f core $seqres.notrun
889
890                 start=`_wallclock`
891                 $timestamp && _timestamp
892                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
893                 $LOGGER_PROG "run xfstest $seqnum"
894                 if [ -w /dev/kmsg ]; then
895                         export date_time=`date +"%F %T"`
896                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
897                         # _check_dmesg depends on this log in dmesg
898                         touch ${RESULT_DIR}/check_dmesg
899                 fi
900                 _try_wipe_scratch_devs > /dev/null 2>&1
901
902                 # clear the WARN_ONCE state to allow a potential problem
903                 # to be reported for each test
904                 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
905
906                 if [ "$DUMP_OUTPUT" = true ]; then
907                         _run_seq 2>&1 | tee $tmp.out
908                         # Because $? would get tee's return code
909                         sts=${PIPESTATUS[0]}
910                 else
911                         _run_seq >$tmp.out 2>&1
912                         sts=$?
913                 fi
914
915                 if [ -f core ]; then
916                         _dump_err_cont "[dumped core]"
917                         mv core $RESULT_BASE/$seqnum.core
918                         tc_status="fail"
919                 fi
920
921                 if [ -f $seqres.notrun ]; then
922                         $timestamp && _timestamp
923                         stop=`_wallclock`
924                         $timestamp || echo -n "[not run] "
925                         $timestamp && echo " [not run]" && \
926                                       echo -n " $seqnum -- "
927                         cat $seqres.notrun
928                         tc_status="notrun"
929                         _stash_test_status "$seqnum" "$tc_status"
930
931                         # Unmount the scratch fs so that we can wipe the scratch
932                         # dev state prior to the next test run.
933                         _scratch_unmount 2> /dev/null
934                         continue;
935                 fi
936
937                 if [ $sts -ne 0 ]; then
938                         _dump_err_cont "[failed, exit status $sts]"
939                         _test_unmount 2> /dev/null
940                         _scratch_unmount 2> /dev/null
941                         rm -f ${RESULT_DIR}/require_test*
942                         rm -f ${RESULT_DIR}/require_scratch*
943                         tc_status="fail"
944                 else
945                         # The test apparently passed, so check for corruption
946                         # and log messages that shouldn't be there.  Run the
947                         # checking tools from a subshell with adjusted OOM
948                         # score so that the OOM killer will target them instead
949                         # of the check script itself.
950                         (_adjust_oom_score 250; _check_filesystems) || tc_status="fail"
951                         _check_dmesg || tc_status="fail"
952                 fi
953
954                 # Reload the module after each test to check for leaks or
955                 # other problems.
956                 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
957                         _test_unmount 2> /dev/null
958                         _scratch_unmount 2> /dev/null
959                         modprobe -r fs-$FSTYP
960                         modprobe fs-$FSTYP
961                 fi
962
963                 # Scan for memory leaks after every test so that associating
964                 # a leak to a particular test will be as accurate as possible.
965                 _check_kmemleak || tc_status="fail"
966
967                 # test ends after all checks are done.
968                 $timestamp && _timestamp
969                 stop=`_wallclock`
970
971                 if [ ! -f $seq.out ]; then
972                         _dump_err "no qualified output"
973                         tc_status="fail"
974                         _stash_test_status "$seqnum" "$tc_status"
975                         continue;
976                 fi
977
978                 # coreutils 8.16+ changed quote formats in error messages
979                 # from `foo' to 'foo'. Filter old versions to match the new
980                 # version.
981                 sed -i "s/\`/\'/g" $tmp.out
982                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
983                         if [ "$tc_status" != "fail" ]; then
984                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
985                                 echo -n " `expr $stop - $start`s"
986                         fi
987                         echo ""
988                 else
989                         _dump_err "- output mismatch (see $seqres.out.bad)"
990                         mv $tmp.out $seqres.out.bad
991                         $diff $seq.out $seqres.out.bad | {
992                         if test "$DIFF_LENGTH" -le 0; then
993                                 cat
994                         else
995                                 head -n "$DIFF_LENGTH"
996                                 echo "..."
997                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
998                                         " to see the entire diff)"
999                         fi; } | sed -e 's/^\(.\)/    \1/'
1000                         tc_status="fail"
1001                 fi
1002                 if [ -f $seqres.hints ]; then
1003                         if [ "$tc_status" == "fail" ]; then
1004                                 echo
1005                                 cat $seqres.hints
1006                         else
1007                                 rm -f $seqres.hints
1008                         fi
1009                 fi
1010                 _stash_test_status "$seqnum" "$tc_status"
1011         done
1012
1013         sect_stop=`_wallclock`
1014         interrupt=false
1015         _wrapup
1016         interrupt=true
1017         echo
1018
1019         _test_unmount 2> /dev/null
1020         _scratch_unmount 2> /dev/null
1021 }
1022
1023 for ((iters = 0; iters < $iterations; iters++)) do
1024         for section in $HOST_OPTIONS_SECTIONS; do
1025                 run_section $section
1026                 if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
1027                         interrupt=false
1028                         status=`expr $sum_bad != 0`
1029                         exit
1030                 fi
1031         done
1032 done
1033
1034 interrupt=false
1035 status=`expr $sum_bad != 0`
1036 exit