generic: test MADV_POPULATE_READ with IO errors
[xfstests-dev.git] / check
1 #!/bin/bash
2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc.  All Rights Reserved.
4 #
5 # Control script for QA
6 #
7 tmp=/tmp/$$
8 status=0
9 needwrap=true
10 needsum=true
11 try=()
12 sum_bad=0
13 bad=()
14 notrun=()
15 interrupt=true
16 diff="diff -u"
17 showme=false
18 have_test_arg=false
19 randomize=false
20 exact_order=false
21 export here=`pwd`
22 xfile=""
23 subdir_xfile=""
24 brief_test_summary=false
25 do_report=false
26 DUMP_OUTPUT=false
27 iterations=1
28 istop=false
29 loop_on_fail=0
30 exclude_tests=()
31
32 # This is a global variable used to pass test failure text to reporting gunk
33 _err_msg=""
34
35 # start the initialisation work now
36 iam=check
37
38 # mkfs.xfs uses the presence of both of these variables to enable formerly
39 # supported tiny filesystem configurations that fstests use for fuzz testing
40 # in a controlled environment
41 export MSGVERB="text:action"
42 export QA_CHECK_FS=${QA_CHECK_FS:=true}
43
44 # number of diff lines from a failed test, 0 for whole output
45 export DIFF_LENGTH=${DIFF_LENGTH:=10}
46
47 # by default don't output timestamps
48 timestamp=${TIMESTAMP:=false}
49
50 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.report.* $tmp.arglist
51
52 SRC_GROUPS="generic shared"
53 export SRC_DIR="tests"
54
55 usage()
56 {
57     echo "Usage: $0 [options] [testlist]"'
58
59 check options
60     -nfs                test NFS
61     -afs                test AFS
62     -glusterfs          test GlusterFS
63     -cifs               test CIFS
64     -9p                 test 9p
65     -fuse               test fuse
66     -virtiofs           test virtiofs
67     -overlay            test overlay
68     -pvfs2              test PVFS2
69     -tmpfs              test TMPFS
70     -ubifs              test ubifs
71     -l                  line mode diff
72     -udiff              show unified diff (default)
73     -n                  show me, do not run tests
74     -T                  output timestamps
75     -r                  randomize test order
76     --exact-order       run tests in the exact order specified
77     -i <n>              iterate the test list <n> times
78     -I <n>              iterate the test list <n> times, but stops iterating further in case of any test failure
79     -d                  dump test output to stdout
80     -b                  brief test summary
81     -R fmt[,fmt]        generate report in formats specified. Supported formats: xunit, xunit-quiet
82     --large-fs          optimise scratch device for large filesystems
83     -s section          run only specified section from config file
84     -S section          exclude the specified section from the config file
85     -L <n>              loop tests <n> times following a failure, measuring aggregate pass/fail metrics
86
87 testlist options
88     -g group[,group...] include tests from these groups
89     -x group[,group...] exclude tests from these groups
90     -X exclude_file     exclude individual tests
91     -e testlist         exclude a specific list of tests
92     -E external_file    exclude individual tests
93     [testlist]          include tests matching names in testlist
94
95 testlist argument is a list of tests in the form of <test dir>/<test name>.
96
97 <test dir> is a directory under tests that contains a group file,
98 with a list of the names of the tests in that directory.
99
100 <test name> may be either a specific test file name (e.g. xfs/001) or
101 a test file name match pattern (e.g. xfs/*).
102
103 group argument is either a name of a tests group to collect from all
104 the test dirs (e.g. quick) or a name of a tests group to collect from
105 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
106 If you want to run all the tests in the test suite, use "-g all" to specify all
107 groups.
108
109 exclude_file argument refers to a name of a file inside each test directory.
110 for every test dir where this file is found, the listed test names are
111 excluded from the list of tests to run from that test dir.
112
113 external_file argument is a path to a single file containing a list of tests
114 to exclude in the form of <test dir>/<test name>.
115
116 examples:
117  check xfs/001
118  check -g quick
119  check -g xfs/quick
120  check -x stress xfs/*
121  check -X .exclude -g auto
122  check -E ~/.xfstests.exclude
123 '
124             exit 1
125 }
126
127 get_sub_group_list()
128 {
129         local d=$1
130         local grp=$2
131
132         test -s "$SRC_DIR/$d/group.list" || return 1
133
134         local grpl=$(sed -n < $SRC_DIR/$d/group.list \
135                 -e 's/#.*//' \
136                 -e 's/$/ /' \
137                 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
138         echo $grpl
139 }
140
141 get_group_list()
142 {
143         local grp=$1
144         local grpl=""
145         local sub=$(dirname $grp)
146         local fsgroup="$FSTYP"
147
148         if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
149                 # group is given as <subdir>/<group> (e.g. xfs/quick)
150                 grp=$(basename $grp)
151                 get_sub_group_list $sub $grp
152                 return
153         fi
154
155         if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
156             fsgroup=ext4
157         fi
158         for d in $SRC_GROUPS $fsgroup; do
159                 if ! test -d "$SRC_DIR/$d" ; then
160                         continue
161                 fi
162                 grpl="$grpl $(get_sub_group_list $d $grp)"
163         done
164         echo $grpl
165 }
166
167 # Find all tests, excluding files that are test metadata such as group files.
168 # It matches test names against $VALID_TEST_NAME defined in common/rc
169 get_all_tests()
170 {
171         touch $tmp.list
172         for d in $SRC_GROUPS $FSTYP; do
173                 if ! test -d "$SRC_DIR/$d" ; then
174                         continue
175                 fi
176                 ls $SRC_DIR/$d/* | \
177                         grep -v "\..*" | \
178                         grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
179                         grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
180         done
181 }
182
183 # takes the list of tests to run in $tmp.list, and removes the tests passed to
184 # the function from that list.
185 trim_test_list()
186 {
187         local test_list="$*"
188
189         rm -f $tmp.grep
190         local numsed=0
191         for t in $test_list
192         do
193             if [ $numsed -gt 100 ]; then
194                 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
195                 mv $tmp.tmp $tmp.list
196                 numsed=0
197                 rm -f $tmp.grep
198             fi
199             echo "^$t\$" >>$tmp.grep
200             numsed=`expr $numsed + 1`
201         done
202         grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
203         mv $tmp.tmp $tmp.list
204         rm -f $tmp.grep
205 }
206
207 _timestamp()
208 {
209     local now=`date "+%T"`
210     echo -n " [$now]"
211 }
212
213 _prepare_test_list()
214 {
215         unset list
216         # Tests specified on the command line
217         if [ -s $tmp.arglist ]; then
218                 cat $tmp.arglist > $tmp.list
219         else
220                 touch $tmp.list
221         fi
222
223         # Specified groups to include
224         # Note that the CLI processing adds a leading space to the first group
225         # parameter, so we have to catch that here checking for "all"
226         if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
227                 # no test numbers, do everything
228                 get_all_tests
229         else
230                 for group in $GROUP_LIST; do
231                         list=$(get_group_list $group)
232                         if [ -z "$list" ]; then
233                                 echo "Group \"$group\" is empty or not defined?"
234                                 exit 1
235                         fi
236
237                         for t in $list; do
238                                 grep -s "^$t\$" $tmp.list >/dev/null || \
239                                                         echo "$t" >>$tmp.list
240                         done
241                 done
242         fi
243
244         # Specified groups to exclude
245         for xgroup in $XGROUP_LIST; do
246                 list=$(get_group_list $xgroup)
247                 if [ -z "$list" ]; then
248                         echo "Group \"$xgroup\" is empty or not defined?"
249                         continue
250                 fi
251
252                 trim_test_list $list
253         done
254
255         # sort the list of tests into numeric order unless we're running tests
256         # in the exact order specified
257         if ! $exact_order; then
258                 if $randomize; then
259                         if type shuf >& /dev/null; then
260                                 sorter="shuf"
261                         else
262                                 sorter="awk -v seed=$RANDOM -f randomize.awk"
263                         fi
264                 else
265                         sorter="cat"
266                 fi
267                 list=`sort -n $tmp.list | uniq | $sorter`
268         else
269                 list=`cat $tmp.list`
270         fi
271         rm -f $tmp.list
272 }
273
274 # Process command arguments first.
275 while [ $# -gt 0 ]; do
276         case "$1" in
277         -\? | -h | --help) usage ;;
278
279         -nfs|-afs|-glusterfs|-cifs|-9p|-fuse|-virtiofs|-pvfs2|-tmpfs|-ubifs)
280                 FSTYP="${1:1}"
281                 ;;
282         -overlay)
283                 [ "$FSTYP" == overlay ] || export OVL_BASE_FSTYP="$FSTYP"
284                 FSTYP=overlay
285                 export OVERLAY=true
286                 ;;
287
288         -g)     group=$2 ; shift ;
289                 GROUP_LIST="$GROUP_LIST ${group//,/ }"
290                 ;;
291
292         -x)     xgroup=$2 ; shift ;
293                 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
294                 ;;
295
296         -X)     subdir_xfile=$2; shift ;
297                 ;;
298         -e)
299                 xfile=$2; shift ;
300                 readarray -t -O "${#exclude_tests[@]}" exclude_tests < \
301                         <(echo "$xfile" | tr ', ' '\n\n')
302                 ;;
303
304         -E)     xfile=$2; shift ;
305                 if [ -f $xfile ]; then
306                         readarray -t -O ${#exclude_tests[@]} exclude_tests < \
307                                 <(sed "s/#.*$//" $xfile)
308                 fi
309                 ;;
310         -s)     RUN_SECTION="$RUN_SECTION $2"; shift ;;
311         -S)     EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
312         -l)     diff="diff" ;;
313         -udiff) diff="$diff -u" ;;
314
315         -n)     showme=true ;;
316         -r)
317                 if $exact_order; then
318                         echo "Cannot specify -r and --exact-order."
319                         exit 1
320                 fi
321                 randomize=true
322                 ;;
323         --exact-order)
324                 if $randomize; then
325                         echo "Cannnot specify --exact-order and -r."
326                         exit 1
327                 fi
328                 exact_order=true
329                 ;;
330         -i)     iterations=$2; shift ;;
331         -I)     iterations=$2; istop=true; shift ;;
332         -T)     timestamp=true ;;
333         -d)     DUMP_OUTPUT=true ;;
334         -b)     brief_test_summary=true;;
335         -R)     report_fmt=$2 ; shift ;
336                 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
337                 do_report=true
338                 ;;
339         --large-fs) export LARGE_SCRATCH_DEV=yes ;;
340         --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
341         -L)     [[ $2 =~ ^[0-9]+$ ]] || usage
342                 loop_on_fail=$2; shift
343                 ;;
344
345         -*)     usage ;;
346         *)      # not an argument, we've got tests now.
347                 have_test_arg=true ;;
348         esac
349
350         # if we've found a test specification, the break out of the processing
351         # loop before we shift the arguments so that this is the first argument
352         # that we process in the test arg loop below.
353         if $have_test_arg; then
354                 break;
355         fi
356
357         shift
358 done
359
360 # we need common/rc, that also sources common/config. We need to source it
361 # after processing args, overlay needs FSTYP set before sourcing common/config
362 if ! . ./common/rc; then
363         echo "check: failed to source common/rc"
364         exit 1
365 fi
366
367 # If the test config specified a soak test duration, see if there are any
368 # unit suffixes that need converting to an integer seconds count.
369 if [ -n "$SOAK_DURATION" ]; then
370         SOAK_DURATION="$(echo "$SOAK_DURATION" | \
371                 sed -e 's/^\([.0-9]*\)\([a-z]\)*/\1 \2/g' | \
372                 $AWK_PROG -f $here/src/soak_duration.awk)"
373         if [ $? -ne 0 ]; then
374                 status=1
375                 exit 1
376         fi
377 fi
378
379 if [ -n "$subdir_xfile" ]; then
380         for d in $SRC_GROUPS $FSTYP; do
381                 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
382                 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
383                         exclude_tests+=($d/$f)
384                 done
385         done
386 fi
387
388 # Process tests from command line now.
389 if $have_test_arg; then
390         while [ $# -gt 0 ]; do
391                 case "$1" in
392                 -*)     echo "Arguments before tests, please!"
393                         status=1
394                         exit $status
395                         ;;
396                 *)      # Expand test pattern (e.g. xfs/???, *fs/001)
397                         list=$(cd $SRC_DIR; echo $1)
398                         for t in $list; do
399                                 t=${t#$SRC_DIR/}
400                                 test_dir=${t%%/*}
401                                 test_name=${t##*/}
402                                 group_file=$SRC_DIR/$test_dir/group.list
403
404                                 if grep -Eq "^$test_name" $group_file; then
405                                         # in group file ... OK
406                                         echo $SRC_DIR/$test_dir/$test_name \
407                                                 >>$tmp.arglist
408                                 else
409                                         # oops
410                                         echo "$t - unknown test, ignored"
411                                 fi
412                         done
413                         ;;
414                 esac
415
416                 shift
417         done
418 elif [ -z "$GROUP_LIST" ]; then
419         # default group list is the auto group. If any other group or test is
420         # specified, we use that instead.
421         GROUP_LIST="auto"
422 fi
423
424 if [ `id -u` -ne 0 ]
425 then
426     echo "check: QA must be run as root"
427     exit 1
428 fi
429
430 _wipe_counters()
431 {
432         try=()
433         notrun=()
434         bad=()
435 }
436
437 _global_log() {
438         echo "$1" >> $check.log
439         if $OPTIONS_HAVE_SECTIONS; then
440                 echo "$1" >> ${REPORT_DIR}/check.log
441         fi
442 }
443
444 if [ -n "$REPORT_GCOV" ]; then
445         . ./common/gcov
446         _gcov_check_report_gcov
447 fi
448
449 _wrapup()
450 {
451         seq="check"
452         check="$RESULT_BASE/check"
453         $interrupt && sect_stop=`_wallclock`
454
455         if $showme && $needwrap; then
456                 if $do_report; then
457                         # $showme = all selected tests are notrun (no tries)
458                         _make_section_report "$section" "${#notrun[*]}" "0" \
459                                              "${#notrun[*]}" \
460                                              "$((sect_stop - sect_start))"
461                 fi
462                 needwrap=false
463         elif $needwrap; then
464                 if [ -f $check.time -a -f $tmp.time ]; then
465                         cat $check.time $tmp.time  \
466                                 | $AWK_PROG '
467                                 { t[$1] = $2 }
468                                 END {
469                                         if (NR > 0) {
470                                                 for (i in t) print i " " t[i]
471                                         }
472                                 }' \
473                                 | sort -n >$tmp.out
474                         mv $tmp.out $check.time
475                         if $OPTIONS_HAVE_SECTIONS; then
476                                 cp $check.time ${REPORT_DIR}/check.time
477                         fi
478                 fi
479
480                 _global_log ""
481                 _global_log "$(date)"
482
483                 echo "SECTION       -- $section" >>$tmp.summary
484                 echo "=========================" >>$tmp.summary
485                 if ((${#try[*]} > 0)); then
486                         if [ $brief_test_summary == "false" ]; then
487                                 echo "Ran: ${try[*]}"
488                                 echo "Ran: ${try[*]}" >>$tmp.summary
489                         fi
490                         _global_log "Ran: ${try[*]}"
491                 fi
492
493                 $interrupt && echo "Interrupted!" | tee -a $check.log
494                 if $OPTIONS_HAVE_SECTIONS; then
495                         $interrupt && echo "Interrupted!" | tee -a \
496                                 ${REPORT_DIR}/check.log
497                 fi
498
499                 if ((${#notrun[*]} > 0)); then
500                         if [ $brief_test_summary == "false" ]; then
501                                 echo "Not run: ${notrun[*]}"
502                                 echo "Not run: ${notrun[*]}" >>$tmp.summary
503                         fi
504                         _global_log "Not run: ${notrun[*]}"
505                 fi
506
507                 if ((${#bad[*]} > 0)); then
508                         echo "Failures: ${bad[*]}"
509                         echo "Failed ${#bad[*]} of ${#try[*]} tests"
510                         _global_log "Failures: ${bad[*]}"
511                         _global_log "Failed ${#bad[*]} of ${#try[*]} tests"
512                         echo "Failures: ${bad[*]}" >>$tmp.summary
513                         echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary
514                 else
515                         echo "Passed all ${#try[*]} tests"
516                         _global_log "Passed all ${#try[*]} tests"
517                         echo "Passed all ${#try[*]} tests" >>$tmp.summary
518                 fi
519                 echo "" >>$tmp.summary
520                 if $do_report; then
521                         _make_section_report "$section" "${#try[*]}" \
522                                              "${#bad[*]}" "${#notrun[*]}" \
523                                              "$((sect_stop - sect_start))"
524                 fi
525
526                 # Generate code coverage report
527                 if [ -n "$REPORT_GCOV" ]; then
528                         # don't trigger multiple times if caller hits ^C
529                         local gcov_report_dir="$REPORT_GCOV"
530                         test "$gcov_report_dir" = "1" && \
531                                 gcov_report_dir="$REPORT_DIR/gcov"
532                         unset REPORT_GCOV
533
534                         _gcov_generate_report "$gcov_report_dir"
535                 fi
536
537                 needwrap=false
538         fi
539
540         sum_bad=`expr $sum_bad + ${#bad[*]}`
541         _wipe_counters
542         rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
543         if ! $OPTIONS_HAVE_SECTIONS; then
544                 rm -f $tmp.*
545         fi
546 }
547
548 _summary()
549 {
550         _wrapup
551         if $showme; then
552                 :
553         elif $needsum; then
554                 count=`wc -L $tmp.summary | cut -f1 -d" "`
555                 cat $tmp.summary
556                 needsum=false
557         fi
558         rm -f $tmp.*
559 }
560
561 _check_filesystems()
562 {
563         local ret=0
564
565         if [ -f ${RESULT_DIR}/require_test ]; then
566                 if ! _check_test_fs ; then
567                         ret=1
568                         echo "Trying to repair broken TEST_DEV file system"
569                         _repair_test_fs
570                         _test_mount
571                 fi
572                 rm -f ${RESULT_DIR}/require_test*
573         else
574                 _test_unmount 2> /dev/null
575         fi
576         if [ -f ${RESULT_DIR}/require_scratch ]; then
577                 _check_scratch_fs || ret=1
578                 rm -f ${RESULT_DIR}/require_scratch*
579         fi
580         _scratch_unmount 2> /dev/null
581         return $ret
582 }
583
584 _expunge_test()
585 {
586         local TEST_ID="$1"
587
588         for f in "${exclude_tests[@]}"; do
589                 # $f may contain traling spaces and comments
590                 local id_regex="^${TEST_ID}\b"
591                 if [[ "$f" =~ ${id_regex} ]]; then
592                         echo "       [expunged]"
593                         return 0
594                 fi
595         done
596         return 1
597 }
598
599 # retain files which would be overwritten in subsequent reruns of the same test
600 _stash_fail_loop_files() {
601         local seq_prefix="${REPORT_DIR}/${1}"
602         local cp_suffix="$2"
603
604         for i in ".full" ".dmesg" ".out.bad" ".notrun" ".core" ".hints"; do
605                 rm -f "${seq_prefix}${i}${cp_suffix}"
606                 if [ -f "${seq_prefix}${i}" ]; then
607                         cp "${seq_prefix}${i}" "${seq_prefix}${i}${cp_suffix}"
608                 fi
609         done
610 }
611
612 # Retain in @bad / @notrun the result of the just-run @test_seq. @try array
613 # entries are added prior to execution.
614 _stash_test_status() {
615         local test_seq="$1"
616         local test_status="$2"
617
618         if $do_report && [[ $test_status != "expunge" ]]; then
619                 _make_testcase_report "$section" "$test_seq" \
620                                       "$test_status" "$((stop - start))"
621         fi
622
623         if ((${#loop_status[*]} > 0)); then
624                 # continuing or completing rerun-on-failure loop
625                 _stash_fail_loop_files "$test_seq" ".rerun${#loop_status[*]}"
626                 loop_status+=("$test_status")
627                 if ((${#loop_status[*]} > loop_on_fail)); then
628                         printf "%s aggregate results across %d runs: " \
629                                 "$test_seq" "${#loop_status[*]}"
630                         awk "BEGIN {
631                                 n=split(\"${loop_status[*]}\", arr);"'
632                                 for (i = 1; i <= n; i++)
633                                         stats[arr[i]]++;
634                                 for (x in stats)
635                                         printf("%s=%d (%.1f%%)",
636                                                (i-- > n ? x : ", " x),
637                                                stats[x], 100 * stats[x] / n);
638                                 }'
639                         echo
640                         loop_status=()
641                 fi
642                 return  # only stash @bad result for initial failure in loop
643         fi
644
645         case "$test_status" in
646         fail)
647                 if ((loop_on_fail > 0)); then
648                         # initial failure, start rerun-on-failure loop
649                         _stash_fail_loop_files "$test_seq" ".rerun0"
650                         loop_status+=("$test_status")
651                 fi
652                 bad+=("$test_seq")
653                 ;;
654         list|notrun)
655                 notrun+=("$test_seq")
656                 ;;
657         pass|expunge)
658                 ;;
659         *)
660                 echo "Unexpected test $test_seq status: $test_status"
661                 ;;
662         esac
663 }
664
665 # Can we run systemd scopes?
666 HAVE_SYSTEMD_SCOPES=
667 systemctl reset-failed "fstests-check" &>/dev/null
668 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
669 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
670
671 # Make the check script unattractive to the OOM killer...
672 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
673 function _adjust_oom_score() {
674         test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
675 }
676 _adjust_oom_score -500
677
678 # ...and make the tests themselves somewhat more attractive to it, so that if
679 # the system runs out of memory it'll be the test that gets killed and not the
680 # test framework.  The test is run in a separate process without any of our
681 # functions, so we open-code adjusting the OOM score.
682 #
683 # If systemd is available, run the entire test script in a scope so that we can
684 # kill all subprocesses of the test if it fails to clean up after itself.  This
685 # is essential for ensuring that the post-test unmount succeeds.  Note that
686 # systemd doesn't automatically remove transient scopes that fail to terminate
687 # when systemd tells them to terminate (e.g. programs stuck in D state when
688 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
689 _run_seq() {
690         local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
691
692         if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
693                 local unit="$(systemd-escape "fs$seq").scope"
694                 systemctl reset-failed "${unit}" &> /dev/null
695                 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
696                 res=$?
697                 systemctl stop "${unit}" &> /dev/null
698                 return "${res}"
699         else
700                 "${cmd[@]}"
701         fi
702 }
703
704 _detect_kmemleak
705 _prepare_test_list
706 fstests_start_time="$(date +"%F %T")"
707
708 if $OPTIONS_HAVE_SECTIONS; then
709         trap "_summary; exit \$status" 0 1 2 3 15
710 else
711         trap "_wrapup; exit \$status" 0 1 2 3 15
712 fi
713
714 function run_section()
715 {
716         local section=$1 skip
717
718         OLD_FSTYP=$FSTYP
719         OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
720
721         # Do we need to run only some sections ?
722         if [ ! -z "$RUN_SECTION" ]; then
723                 skip=true
724                 for s in $RUN_SECTION; do
725                         if [ $section == $s ]; then
726                                 skip=false
727                                 break;
728                         fi
729                 done
730                 if $skip; then
731                         return
732                 fi
733         fi
734
735         # Did this section get excluded?
736         if [ ! -z "$EXCLUDE_SECTION" ]; then
737                 skip=false
738                 for s in $EXCLUDE_SECTION; do
739                         if [ $section == $s ]; then
740                                 skip=true
741                                 break;
742                         fi
743                 done
744                 if $skip; then
745                         return
746                 fi
747         fi
748
749         get_next_config $section
750         _canonicalize_devices
751
752         mkdir -p $RESULT_BASE
753         if [ ! -d $RESULT_BASE ]; then
754                 echo "failed to create results directory $RESULT_BASE"
755                 status=1
756                 exit
757         fi
758
759         if $OPTIONS_HAVE_SECTIONS; then
760                 echo "SECTION       -- $section"
761         fi
762
763         sect_start=`_wallclock`
764         if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
765                 echo "RECREATING    -- $FSTYP on $TEST_DEV"
766                 _test_unmount 2> /dev/null
767                 if ! _test_mkfs >$tmp.err 2>&1
768                 then
769                         echo "our local _test_mkfs routine ..."
770                         cat $tmp.err
771                         echo "check: failed to mkfs \$TEST_DEV using specified options"
772                         status=1
773                         exit
774                 fi
775                 if ! _test_mount
776                 then
777                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
778                         status=1
779                         exit
780                 fi
781                 # TEST_DEV has been recreated, previous FSTYP derived from
782                 # TEST_DEV could be changed, source common/rc again with
783                 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
784                 . common/rc
785                 _prepare_test_list
786         elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
787                 _test_unmount 2> /dev/null
788                 if ! _test_mount
789                 then
790                         echo "check: failed to mount $TEST_DEV on $TEST_DIR"
791                         status=1
792                         exit
793                 fi
794         fi
795
796         init_rc
797
798         seq="check"
799         check="$RESULT_BASE/check"
800
801         # don't leave old full output behind on a clean run
802         rm -f $check.full
803
804         [ -f $check.time ] || touch $check.time
805
806         # print out our test configuration
807         echo "FSTYP         -- `_full_fstyp_details`"
808         echo "PLATFORM      -- `_full_platform_details`"
809         if [ ! -z "$SCRATCH_DEV" ]; then
810           echo "MKFS_OPTIONS  -- `_scratch_mkfs_options`"
811           echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
812         fi
813         echo
814         test -n "$REPORT_GCOV" && _gcov_reset
815         needwrap=true
816
817         if [ ! -z "$SCRATCH_DEV" ]; then
818           _scratch_unmount 2> /dev/null
819           # call the overridden mkfs - make sure the FS is built
820           # the same as we'll create it later.
821
822           if ! _scratch_mkfs >$tmp.err 2>&1
823           then
824               echo "our local _scratch_mkfs routine ..."
825               cat $tmp.err
826               echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
827               status=1
828               exit
829           fi
830
831           # call the overridden mount - make sure the FS mounts with
832           # the same options that we'll mount with later.
833           if ! _try_scratch_mount >$tmp.err 2>&1
834           then
835               echo "our local mount routine ..."
836               cat $tmp.err
837               echo "check: failed to mount \$SCRATCH_DEV using specified options"
838               status=1
839               exit
840           else
841               _scratch_unmount
842           fi
843         fi
844
845         seqres="$check"
846         _check_test_fs
847
848         loop_status=()  # track rerun-on-failure state
849         local tc_status ix
850         local -a _list=( $list )
851         for ((ix = 0; ix < ${#_list[*]}; !${#loop_status[*]} && ix++)); do
852                 seq="${_list[$ix]}"
853
854                 if [ ! -f $seq ]; then
855                         # Try to get full name in case the user supplied only
856                         # seq id and the test has a name. A bit of hassle to
857                         # find really the test and not its sample output or
858                         # helping files.
859                         bname=$(basename $seq)
860                         full_seq=$(find $(dirname $seq) -name $bname* -executable |
861                                 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
862                                      END { print shortest }')
863                         if [ -f $full_seq ] && \
864                            [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
865                                 seq=$full_seq
866                         fi
867                 fi
868
869                 # the filename for the test and the name output are different.
870                 # we don't include the tests/ directory in the name output.
871                 export seqnum=${seq#$SRC_DIR/}
872                 group=${seqnum%%/*}
873                 if $OPTIONS_HAVE_SECTIONS; then
874                         REPORT_DIR="$RESULT_BASE/$section"
875                 else
876                         REPORT_DIR="$RESULT_BASE"
877                 fi
878                 export RESULT_DIR="$REPORT_DIR/$group"
879                 seqres="$REPORT_DIR/$seqnum"
880
881                 # Generate the entire section report with whatever test results
882                 # we have so far.  Leave the $sect_time parameter empty so that
883                 # it's a little more obvious that this test run is incomplete.
884                 if $do_report; then
885                         _make_section_report "$section" "${#try[*]}" \
886                                              "${#bad[*]}" "${#notrun[*]}" \
887                                              "" &> /dev/null
888                 fi
889
890                 echo -n "$seqnum"
891
892                 if $showme; then
893                         if _expunge_test $seqnum; then
894                                 tc_status="expunge"
895                         else
896                                 echo
897                                 start=0
898                                 stop=0
899                                 tc_status="list"
900                         fi
901                         _stash_test_status "$seqnum" "$tc_status"
902                         continue
903                 fi
904
905                 tc_status="pass"
906                 if [ ! -f $seq ]; then
907                         echo " - no such test?"
908                         _stash_test_status "$seqnum" "$tc_status"
909                         continue
910                 fi
911
912                 # really going to try and run this one
913                 mkdir -p $RESULT_DIR
914                 rm -f ${RESULT_DIR}/require_scratch*
915                 rm -f ${RESULT_DIR}/require_test*
916                 rm -f $seqres.out.bad $seqres.hints
917
918                 # check if we really should run it
919                 if _expunge_test $seqnum; then
920                         tc_status="expunge"
921                         _stash_test_status "$seqnum" "$tc_status"
922                         continue
923                 fi
924
925                 # record that we really tried to run this test.
926                 if ((!${#loop_status[*]})); then
927                         try+=("$seqnum")
928                 fi
929
930                 awk 'BEGIN {lasttime="       "} \
931                      $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \
932                      END {printf "%s", lasttime}' "$check.time"
933                 rm -f core $seqres.notrun
934
935                 start=`_wallclock`
936                 $timestamp && _timestamp
937                 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
938                 $LOGGER_PROG "run xfstest $seqnum"
939                 if [ -w /dev/kmsg ]; then
940                         export date_time=`date +"%F %T"`
941                         echo "run fstests $seqnum at $date_time" > /dev/kmsg
942                         # _check_dmesg depends on this log in dmesg
943                         touch ${RESULT_DIR}/check_dmesg
944                         rm -f ${RESULT_DIR}/dmesg_filter
945                 fi
946                 _try_wipe_scratch_devs > /dev/null 2>&1
947
948                 # clear the WARN_ONCE state to allow a potential problem
949                 # to be reported for each test
950                 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
951
952                 test_start_time="$(date +"%F %T")"
953                 if [ "$DUMP_OUTPUT" = true ]; then
954                         _run_seq 2>&1 | tee $tmp.out
955                         # Because $? would get tee's return code
956                         sts=${PIPESTATUS[0]}
957                 else
958                         _run_seq >$tmp.out 2>&1
959                         sts=$?
960                 fi
961
962                 # If someone sets kernel.core_pattern or kernel.core_uses_pid,
963                 # coredumps generated by fstests might have a longer name than
964                 # just "core".  Use globbing to find the most common patterns,
965                 # assuming there are no other coredump capture packages set up.
966                 local cores=0
967                 for i in core core.*; do
968                         test -f "$i" || continue
969                         if ((cores++ == 0)); then
970                                 _dump_err_cont "[dumped core]"
971                         fi
972                         (_adjust_oom_score 250; _save_coredump "$i")
973                         tc_status="fail"
974                 done
975
976                 if [ -f $seqres.notrun ]; then
977                         $timestamp && _timestamp
978                         stop=`_wallclock`
979                         $timestamp || echo -n "[not run] "
980                         $timestamp && echo " [not run]" && \
981                                       echo -n " $seqnum -- "
982                         cat $seqres.notrun
983                         tc_status="notrun"
984                         _stash_test_status "$seqnum" "$tc_status"
985
986                         # Unmount the scratch fs so that we can wipe the scratch
987                         # dev state prior to the next test run.
988                         _scratch_unmount 2> /dev/null
989                         continue;
990                 fi
991
992                 if [ $sts -ne 0 ]; then
993                         _dump_err_cont "[failed, exit status $sts]"
994                         _test_unmount 2> /dev/null
995                         _scratch_unmount 2> /dev/null
996                         rm -f ${RESULT_DIR}/require_test*
997                         rm -f ${RESULT_DIR}/require_scratch*
998                         # Even though we failed, there may be something interesting in
999                         # dmesg which can help debugging.
1000                         _check_dmesg
1001                         (_adjust_oom_score 250; _check_filesystems)
1002                         tc_status="fail"
1003                 else
1004                         # The test apparently passed, so check for corruption
1005                         # and log messages that shouldn't be there.  Run the
1006                         # checking tools from a subshell with adjusted OOM
1007                         # score so that the OOM killer will target them instead
1008                         # of the check script itself.
1009                         (_adjust_oom_score 250; _check_filesystems) || tc_status="fail"
1010                         _check_dmesg || tc_status="fail"
1011
1012                         # Save any coredumps from the post-test fs checks
1013                         for i in core core.*; do
1014                                 test -f "$i" || continue
1015                                 if ((cores++ == 0)); then
1016                                         _dump_err_cont "[dumped core]"
1017                                 fi
1018                                 (_adjust_oom_score 250; _save_coredump "$i")
1019                                 tc_status="fail"
1020                         done
1021                 fi
1022
1023                 # Reload the module after each test to check for leaks or
1024                 # other problems.
1025                 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
1026                         _test_unmount 2> /dev/null
1027                         _scratch_unmount 2> /dev/null
1028                         modprobe -r fs-$FSTYP
1029                         modprobe fs-$FSTYP
1030                 fi
1031
1032                 # Scan for memory leaks after every test so that associating
1033                 # a leak to a particular test will be as accurate as possible.
1034                 _check_kmemleak || tc_status="fail"
1035
1036                 # test ends after all checks are done.
1037                 $timestamp && _timestamp
1038                 stop=`_wallclock`
1039
1040                 if [ ! -f $seq.out ]; then
1041                         _dump_err "no qualified output"
1042                         tc_status="fail"
1043                         _stash_test_status "$seqnum" "$tc_status"
1044                         continue;
1045                 fi
1046
1047                 # coreutils 8.16+ changed quote formats in error messages
1048                 # from `foo' to 'foo'. Filter old versions to match the new
1049                 # version.
1050                 sed -i "s/\`/\'/g" $tmp.out
1051                 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
1052                         if [ "$tc_status" != "fail" ]; then
1053                                 echo "$seqnum `expr $stop - $start`" >>$tmp.time
1054                                 echo -n " `expr $stop - $start`s"
1055                         fi
1056                         echo ""
1057                 else
1058                         _dump_err "- output mismatch (see $seqres.out.bad)"
1059                         mv $tmp.out $seqres.out.bad
1060                         $diff $seq.out $seqres.out.bad | {
1061                         if test "$DIFF_LENGTH" -le 0; then
1062                                 cat
1063                         else
1064                                 head -n "$DIFF_LENGTH"
1065                                 echo "..."
1066                                 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
1067                                         " to see the entire diff)"
1068                         fi; } | sed -e 's/^\(.\)/    \1/'
1069                         tc_status="fail"
1070                 fi
1071                 if [ -f $seqres.hints ]; then
1072                         if [ "$tc_status" == "fail" ]; then
1073                                 echo
1074                                 cat $seqres.hints
1075                         else
1076                                 rm -f $seqres.hints
1077                         fi
1078                 fi
1079                 _stash_test_status "$seqnum" "$tc_status"
1080         done
1081
1082         sect_stop=`_wallclock`
1083         interrupt=false
1084         _wrapup
1085         interrupt=true
1086         echo
1087
1088         _test_unmount 2> /dev/null
1089         _scratch_unmount 2> /dev/null
1090 }
1091
1092 for ((iters = 0; iters < $iterations; iters++)) do
1093         for section in $HOST_OPTIONS_SECTIONS; do
1094                 run_section $section
1095                 if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
1096                         interrupt=false
1097                         status=`expr $sum_bad != 0`
1098                         exit
1099                 fi
1100         done
1101 done
1102
1103 interrupt=false
1104 status=`expr $sum_bad != 0`
1105 exit