2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
5 # Control script for QA
24 brief_test_summary=false
32 # This is a global variable used to pass test failure text to reporting gunk
35 # start the initialisation work now
38 # mkfs.xfs uses the presence of both of these variables to enable formerly
39 # supported tiny filesystem configurations that fstests use for fuzz testing
40 # in a controlled environment
41 export MSGVERB="text:action"
42 export QA_CHECK_FS=${QA_CHECK_FS:=true}
44 # number of diff lines from a failed test, 0 for whole output
45 export DIFF_LENGTH=${DIFF_LENGTH:=10}
47 # by default don't output timestamps
48 timestamp=${TIMESTAMP:=false}
50 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.report.* $tmp.arglist
52 SRC_GROUPS="generic shared"
53 export SRC_DIR="tests"
57 echo "Usage: $0 [options] [testlist]"'
62 -glusterfs test GlusterFS
66 -virtiofs test virtiofs
72 -udiff show unified diff (default)
73 -n show me, do not run tests
75 -r randomize test order
76 --exact-order run tests in the exact order specified
77 -i <n> iterate the test list <n> times
78 -I <n> iterate the test list <n> times, but stops iterating further in case of any test failure
79 -d dump test output to stdout
81 -R fmt[,fmt] generate report in formats specified. Supported formats: xunit, xunit-quiet
82 --large-fs optimise scratch device for large filesystems
83 -s section run only specified section from config file
84 -S section exclude the specified section from the config file
85 -L <n> loop tests <n> times following a failure, measuring aggregate pass/fail metrics
88 -g group[,group...] include tests from these groups
89 -x group[,group...] exclude tests from these groups
90 -X exclude_file exclude individual tests
91 -e testlist exclude a specific list of tests
92 -E external_file exclude individual tests
93 [testlist] include tests matching names in testlist
95 testlist argument is a list of tests in the form of <test dir>/<test name>.
97 <test dir> is a directory under tests that contains a group file,
98 with a list of the names of the tests in that directory.
100 <test name> may be either a specific test file name (e.g. xfs/001) or
101 a test file name match pattern (e.g. xfs/*).
103 group argument is either a name of a tests group to collect from all
104 the test dirs (e.g. quick) or a name of a tests group to collect from
105 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
106 If you want to run all the tests in the test suite, use "-g all" to specify all
109 exclude_file argument refers to a name of a file inside each test directory.
110 for every test dir where this file is found, the listed test names are
111 excluded from the list of tests to run from that test dir.
113 external_file argument is a path to a single file containing a list of tests
114 to exclude in the form of <test dir>/<test name>.
120 check -x stress xfs/*
121 check -X .exclude -g auto
122 check -E ~/.xfstests.exclude
132 test -s "$SRC_DIR/$d/group.list" || return 1
134 local grpl=$(sed -n < $SRC_DIR/$d/group.list \
137 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
145 local sub=$(dirname $grp)
146 local fsgroup="$FSTYP"
148 if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
149 # group is given as <subdir>/<group> (e.g. xfs/quick)
151 get_sub_group_list $sub $grp
155 if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
158 for d in $SRC_GROUPS $fsgroup; do
159 if ! test -d "$SRC_DIR/$d" ; then
162 grpl="$grpl $(get_sub_group_list $d $grp)"
167 # Find all tests, excluding files that are test metadata such as group files.
168 # It matches test names against $VALID_TEST_NAME defined in common/rc
172 for d in $SRC_GROUPS $FSTYP; do
173 if ! test -d "$SRC_DIR/$d" ; then
178 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
179 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
183 # takes the list of tests to run in $tmp.list, and removes the tests passed to
184 # the function from that list.
193 if [ $numsed -gt 100 ]; then
194 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
195 mv $tmp.tmp $tmp.list
199 echo "^$t\$" >>$tmp.grep
200 numsed=`expr $numsed + 1`
202 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
203 mv $tmp.tmp $tmp.list
209 local now=`date "+%T"`
216 # Tests specified on the command line
217 if [ -s $tmp.arglist ]; then
218 cat $tmp.arglist > $tmp.list
223 # Specified groups to include
224 # Note that the CLI processing adds a leading space to the first group
225 # parameter, so we have to catch that here checking for "all"
226 if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
227 # no test numbers, do everything
230 for group in $GROUP_LIST; do
231 list=$(get_group_list $group)
232 if [ -z "$list" ]; then
233 echo "Group \"$group\" is empty or not defined?"
238 grep -s "^$t\$" $tmp.list >/dev/null || \
239 echo "$t" >>$tmp.list
244 # Specified groups to exclude
245 for xgroup in $XGROUP_LIST; do
246 list=$(get_group_list $xgroup)
247 if [ -z "$list" ]; then
248 echo "Group \"$xgroup\" is empty or not defined?"
255 # sort the list of tests into numeric order unless we're running tests
256 # in the exact order specified
257 if ! $exact_order; then
259 if type shuf >& /dev/null; then
262 sorter="awk -v seed=$RANDOM -f randomize.awk"
267 list=`sort -n $tmp.list | uniq | $sorter`
274 # Process command arguments first.
275 while [ $# -gt 0 ]; do
277 -\? | -h | --help) usage ;;
279 -nfs|-afs|-glusterfs|-cifs|-9p|-fuse|-virtiofs|-pvfs2|-tmpfs|-ubifs)
283 [ "$FSTYP" == overlay ] || export OVL_BASE_FSTYP="$FSTYP"
288 -g) group=$2 ; shift ;
289 GROUP_LIST="$GROUP_LIST ${group//,/ }"
292 -x) xgroup=$2 ; shift ;
293 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
296 -X) subdir_xfile=$2; shift ;
300 readarray -t -O "${#exclude_tests[@]}" exclude_tests < \
301 <(echo "$xfile" | tr ', ' '\n\n')
304 -E) xfile=$2; shift ;
305 if [ -f $xfile ]; then
306 readarray -t -O ${#exclude_tests[@]} exclude_tests < \
307 <(sed "s/#.*$//" $xfile)
310 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
311 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
313 -udiff) diff="$diff -u" ;;
317 if $exact_order; then
318 echo "Cannot specify -r and --exact-order."
325 echo "Cannnot specify --exact-order and -r."
330 -i) iterations=$2; shift ;;
331 -I) iterations=$2; istop=true; shift ;;
332 -T) timestamp=true ;;
333 -d) DUMP_OUTPUT=true ;;
334 -b) brief_test_summary=true;;
335 -R) report_fmt=$2 ; shift ;
336 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
339 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
340 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
341 -L) [[ $2 =~ ^[0-9]+$ ]] || usage
342 loop_on_fail=$2; shift
346 *) # not an argument, we've got tests now.
347 have_test_arg=true ;;
350 # if we've found a test specification, the break out of the processing
351 # loop before we shift the arguments so that this is the first argument
352 # that we process in the test arg loop below.
353 if $have_test_arg; then
360 # we need common/rc, that also sources common/config. We need to source it
361 # after processing args, overlay needs FSTYP set before sourcing common/config
362 if ! . ./common/rc; then
363 echo "check: failed to source common/rc"
367 # If the test config specified a soak test duration, see if there are any
368 # unit suffixes that need converting to an integer seconds count.
369 if [ -n "$SOAK_DURATION" ]; then
370 SOAK_DURATION="$(echo "$SOAK_DURATION" | \
371 sed -e 's/^\([.0-9]*\)\([a-z]\)*/\1 \2/g' | \
372 $AWK_PROG -f $here/src/soak_duration.awk)"
373 if [ $? -ne 0 ]; then
379 if [ -n "$subdir_xfile" ]; then
380 for d in $SRC_GROUPS $FSTYP; do
381 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
382 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
383 exclude_tests+=($d/$f)
388 # Process tests from command line now.
389 if $have_test_arg; then
390 while [ $# -gt 0 ]; do
392 -*) echo "Arguments before tests, please!"
396 *) # Expand test pattern (e.g. xfs/???, *fs/001)
397 list=$(cd $SRC_DIR; echo $1)
402 group_file=$SRC_DIR/$test_dir/group.list
404 if grep -Eq "^$test_name" $group_file; then
405 # in group file ... OK
406 echo $SRC_DIR/$test_dir/$test_name \
410 echo "$t - unknown test, ignored"
418 elif [ -z "$GROUP_LIST" ]; then
419 # default group list is the auto group. If any other group or test is
420 # specified, we use that instead.
426 echo "check: QA must be run as root"
438 echo "$1" >> $check.log
439 if $OPTIONS_HAVE_SECTIONS; then
440 echo "$1" >> ${REPORT_DIR}/check.log
444 if [ -n "$REPORT_GCOV" ]; then
446 _gcov_check_report_gcov
452 check="$RESULT_BASE/check"
453 $interrupt && sect_stop=`_wallclock`
455 if $showme && $needwrap; then
457 # $showme = all selected tests are notrun (no tries)
458 _make_section_report "$section" "${#notrun[*]}" "0" \
460 "$((sect_stop - sect_start))"
464 if [ -f $check.time -a -f $tmp.time ]; then
465 cat $check.time $tmp.time \
470 for (i in t) print i " " t[i]
474 mv $tmp.out $check.time
475 if $OPTIONS_HAVE_SECTIONS; then
476 cp $check.time ${REPORT_DIR}/check.time
481 _global_log "$(date)"
483 echo "SECTION -- $section" >>$tmp.summary
484 echo "=========================" >>$tmp.summary
485 if ((${#try[*]} > 0)); then
486 if [ $brief_test_summary == "false" ]; then
487 echo "Ran: ${try[*]}"
488 echo "Ran: ${try[*]}" >>$tmp.summary
490 _global_log "Ran: ${try[*]}"
493 $interrupt && echo "Interrupted!" | tee -a $check.log
494 if $OPTIONS_HAVE_SECTIONS; then
495 $interrupt && echo "Interrupted!" | tee -a \
496 ${REPORT_DIR}/check.log
499 if ((${#notrun[*]} > 0)); then
500 if [ $brief_test_summary == "false" ]; then
501 echo "Not run: ${notrun[*]}"
502 echo "Not run: ${notrun[*]}" >>$tmp.summary
504 _global_log "Not run: ${notrun[*]}"
507 if ((${#bad[*]} > 0)); then
508 echo "Failures: ${bad[*]}"
509 echo "Failed ${#bad[*]} of ${#try[*]} tests"
510 _global_log "Failures: ${bad[*]}"
511 _global_log "Failed ${#bad[*]} of ${#try[*]} tests"
512 echo "Failures: ${bad[*]}" >>$tmp.summary
513 echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary
515 echo "Passed all ${#try[*]} tests"
516 _global_log "Passed all ${#try[*]} tests"
517 echo "Passed all ${#try[*]} tests" >>$tmp.summary
519 echo "" >>$tmp.summary
521 _make_section_report "$section" "${#try[*]}" \
522 "${#bad[*]}" "${#notrun[*]}" \
523 "$((sect_stop - sect_start))"
526 # Generate code coverage report
527 if [ -n "$REPORT_GCOV" ]; then
528 # don't trigger multiple times if caller hits ^C
529 local gcov_report_dir="$REPORT_GCOV"
530 test "$gcov_report_dir" = "1" && \
531 gcov_report_dir="$REPORT_DIR/gcov"
534 _gcov_generate_report "$gcov_report_dir"
540 sum_bad=`expr $sum_bad + ${#bad[*]}`
542 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
543 if ! $OPTIONS_HAVE_SECTIONS; then
554 count=`wc -L $tmp.summary | cut -f1 -d" "`
565 if [ -f ${RESULT_DIR}/require_test ]; then
566 if ! _check_test_fs ; then
568 echo "Trying to repair broken TEST_DEV file system"
572 rm -f ${RESULT_DIR}/require_test*
574 _test_unmount 2> /dev/null
576 if [ -f ${RESULT_DIR}/require_scratch ]; then
577 _check_scratch_fs || ret=1
578 rm -f ${RESULT_DIR}/require_scratch*
580 _scratch_unmount 2> /dev/null
588 for f in "${exclude_tests[@]}"; do
589 # $f may contain traling spaces and comments
590 local id_regex="^${TEST_ID}\b"
591 if [[ "$f" =~ ${id_regex} ]]; then
599 # retain files which would be overwritten in subsequent reruns of the same test
600 _stash_fail_loop_files() {
601 local seq_prefix="${REPORT_DIR}/${1}"
604 for i in ".full" ".dmesg" ".out.bad" ".notrun" ".core" ".hints"; do
605 rm -f "${seq_prefix}${i}${cp_suffix}"
606 if [ -f "${seq_prefix}${i}" ]; then
607 cp "${seq_prefix}${i}" "${seq_prefix}${i}${cp_suffix}"
612 # Retain in @bad / @notrun the result of the just-run @test_seq. @try array
613 # entries are added prior to execution.
614 _stash_test_status() {
616 local test_status="$2"
618 if $do_report && [[ $test_status != "expunge" ]]; then
619 _make_testcase_report "$section" "$test_seq" \
620 "$test_status" "$((stop - start))"
623 if ((${#loop_status[*]} > 0)); then
624 # continuing or completing rerun-on-failure loop
625 _stash_fail_loop_files "$test_seq" ".rerun${#loop_status[*]}"
626 loop_status+=("$test_status")
627 if ((${#loop_status[*]} > loop_on_fail)); then
628 printf "%s aggregate results across %d runs: " \
629 "$test_seq" "${#loop_status[*]}"
631 n=split(\"${loop_status[*]}\", arr);"'
632 for (i = 1; i <= n; i++)
635 printf("%s=%d (%.1f%%)",
636 (i-- > n ? x : ", " x),
637 stats[x], 100 * stats[x] / n);
642 return # only stash @bad result for initial failure in loop
645 case "$test_status" in
647 if ((loop_on_fail > 0)); then
648 # initial failure, start rerun-on-failure loop
649 _stash_fail_loop_files "$test_seq" ".rerun0"
650 loop_status+=("$test_status")
655 notrun+=("$test_seq")
660 echo "Unexpected test $test_seq status: $test_status"
665 # Can we run systemd scopes?
667 systemctl reset-failed "fstests-check" &>/dev/null
668 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
669 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
671 # Make the check script unattractive to the OOM killer...
672 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
673 function _adjust_oom_score() {
674 test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
676 _adjust_oom_score -500
678 # ...and make the tests themselves somewhat more attractive to it, so that if
679 # the system runs out of memory it'll be the test that gets killed and not the
680 # test framework. The test is run in a separate process without any of our
681 # functions, so we open-code adjusting the OOM score.
683 # If systemd is available, run the entire test script in a scope so that we can
684 # kill all subprocesses of the test if it fails to clean up after itself. This
685 # is essential for ensuring that the post-test unmount succeeds. Note that
686 # systemd doesn't automatically remove transient scopes that fail to terminate
687 # when systemd tells them to terminate (e.g. programs stuck in D state when
688 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
690 local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
692 if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
693 local unit="$(systemd-escape "fs$seq").scope"
694 systemctl reset-failed "${unit}" &> /dev/null
695 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
697 systemctl stop "${unit}" &> /dev/null
706 fstests_start_time="$(date +"%F %T")"
708 if $OPTIONS_HAVE_SECTIONS; then
709 trap "_summary; exit \$status" 0 1 2 3 15
711 trap "_wrapup; exit \$status" 0 1 2 3 15
714 function run_section()
716 local section=$1 skip
719 OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
721 # Do we need to run only some sections ?
722 if [ ! -z "$RUN_SECTION" ]; then
724 for s in $RUN_SECTION; do
725 if [ $section == $s ]; then
735 # Did this section get excluded?
736 if [ ! -z "$EXCLUDE_SECTION" ]; then
738 for s in $EXCLUDE_SECTION; do
739 if [ $section == $s ]; then
749 get_next_config $section
750 _canonicalize_devices
752 mkdir -p $RESULT_BASE
753 if [ ! -d $RESULT_BASE ]; then
754 echo "failed to create results directory $RESULT_BASE"
759 if $OPTIONS_HAVE_SECTIONS; then
760 echo "SECTION -- $section"
763 sect_start=`_wallclock`
764 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
765 echo "RECREATING -- $FSTYP on $TEST_DEV"
766 _test_unmount 2> /dev/null
767 if ! _test_mkfs >$tmp.err 2>&1
769 echo "our local _test_mkfs routine ..."
771 echo "check: failed to mkfs \$TEST_DEV using specified options"
777 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
781 # TEST_DEV has been recreated, previous FSTYP derived from
782 # TEST_DEV could be changed, source common/rc again with
783 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
786 elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
787 _test_unmount 2> /dev/null
790 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
799 check="$RESULT_BASE/check"
801 # don't leave old full output behind on a clean run
804 [ -f $check.time ] || touch $check.time
806 # print out our test configuration
807 echo "FSTYP -- `_full_fstyp_details`"
808 echo "PLATFORM -- `_full_platform_details`"
809 if [ ! -z "$SCRATCH_DEV" ]; then
810 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
811 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
814 test -n "$REPORT_GCOV" && _gcov_reset
817 if [ ! -z "$SCRATCH_DEV" ]; then
818 _scratch_unmount 2> /dev/null
819 # call the overridden mkfs - make sure the FS is built
820 # the same as we'll create it later.
822 if ! _scratch_mkfs >$tmp.err 2>&1
824 echo "our local _scratch_mkfs routine ..."
826 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
831 # call the overridden mount - make sure the FS mounts with
832 # the same options that we'll mount with later.
833 if ! _try_scratch_mount >$tmp.err 2>&1
835 echo "our local mount routine ..."
837 echo "check: failed to mount \$SCRATCH_DEV using specified options"
848 loop_status=() # track rerun-on-failure state
850 local -a _list=( $list )
851 for ((ix = 0; ix < ${#_list[*]}; !${#loop_status[*]} && ix++)); do
854 if [ ! -f $seq ]; then
855 # Try to get full name in case the user supplied only
856 # seq id and the test has a name. A bit of hassle to
857 # find really the test and not its sample output or
859 bname=$(basename $seq)
860 full_seq=$(find $(dirname $seq) -name $bname* -executable |
861 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
862 END { print shortest }')
863 if [ -f $full_seq ] && \
864 [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
869 # the filename for the test and the name output are different.
870 # we don't include the tests/ directory in the name output.
871 export seqnum=${seq#$SRC_DIR/}
873 if $OPTIONS_HAVE_SECTIONS; then
874 REPORT_DIR="$RESULT_BASE/$section"
876 REPORT_DIR="$RESULT_BASE"
878 export RESULT_DIR="$REPORT_DIR/$group"
879 seqres="$REPORT_DIR/$seqnum"
881 # Generate the entire section report with whatever test results
882 # we have so far. Leave the $sect_time parameter empty so that
883 # it's a little more obvious that this test run is incomplete.
885 _make_section_report "$section" "${#try[*]}" \
886 "${#bad[*]}" "${#notrun[*]}" \
893 if _expunge_test $seqnum; then
901 _stash_test_status "$seqnum" "$tc_status"
906 if [ ! -f $seq ]; then
907 echo " - no such test?"
908 _stash_test_status "$seqnum" "$tc_status"
912 # really going to try and run this one
914 rm -f ${RESULT_DIR}/require_scratch*
915 rm -f ${RESULT_DIR}/require_test*
916 rm -f $seqres.out.bad $seqres.hints
918 # check if we really should run it
919 if _expunge_test $seqnum; then
921 _stash_test_status "$seqnum" "$tc_status"
925 # record that we really tried to run this test.
926 if ((!${#loop_status[*]})); then
930 awk 'BEGIN {lasttime=" "} \
931 $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \
932 END {printf "%s", lasttime}' "$check.time"
933 rm -f core $seqres.notrun
936 $timestamp && _timestamp
937 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
938 $LOGGER_PROG "run xfstest $seqnum"
939 if [ -w /dev/kmsg ]; then
940 export date_time=`date +"%F %T"`
941 echo "run fstests $seqnum at $date_time" > /dev/kmsg
942 # _check_dmesg depends on this log in dmesg
943 touch ${RESULT_DIR}/check_dmesg
944 rm -f ${RESULT_DIR}/dmesg_filter
946 _try_wipe_scratch_devs > /dev/null 2>&1
948 # clear the WARN_ONCE state to allow a potential problem
949 # to be reported for each test
950 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
952 test_start_time="$(date +"%F %T")"
953 if [ "$DUMP_OUTPUT" = true ]; then
954 _run_seq 2>&1 | tee $tmp.out
955 # Because $? would get tee's return code
958 _run_seq >$tmp.out 2>&1
962 # If someone sets kernel.core_pattern or kernel.core_uses_pid,
963 # coredumps generated by fstests might have a longer name than
964 # just "core". Use globbing to find the most common patterns,
965 # assuming there are no other coredump capture packages set up.
967 for i in core core.*; do
968 test -f "$i" || continue
969 if ((cores++ == 0)); then
970 _dump_err_cont "[dumped core]"
972 (_adjust_oom_score 250; _save_coredump "$i")
976 if [ -f $seqres.notrun ]; then
977 $timestamp && _timestamp
979 $timestamp || echo -n "[not run] "
980 $timestamp && echo " [not run]" && \
981 echo -n " $seqnum -- "
984 _stash_test_status "$seqnum" "$tc_status"
986 # Unmount the scratch fs so that we can wipe the scratch
987 # dev state prior to the next test run.
988 _scratch_unmount 2> /dev/null
992 if [ $sts -ne 0 ]; then
993 _dump_err_cont "[failed, exit status $sts]"
994 _test_unmount 2> /dev/null
995 _scratch_unmount 2> /dev/null
996 rm -f ${RESULT_DIR}/require_test*
997 rm -f ${RESULT_DIR}/require_scratch*
998 # Even though we failed, there may be something interesting in
999 # dmesg which can help debugging.
1001 (_adjust_oom_score 250; _check_filesystems)
1004 # The test apparently passed, so check for corruption
1005 # and log messages that shouldn't be there. Run the
1006 # checking tools from a subshell with adjusted OOM
1007 # score so that the OOM killer will target them instead
1008 # of the check script itself.
1009 (_adjust_oom_score 250; _check_filesystems) || tc_status="fail"
1010 _check_dmesg || tc_status="fail"
1012 # Save any coredumps from the post-test fs checks
1013 for i in core core.*; do
1014 test -f "$i" || continue
1015 if ((cores++ == 0)); then
1016 _dump_err_cont "[dumped core]"
1018 (_adjust_oom_score 250; _save_coredump "$i")
1023 # Reload the module after each test to check for leaks or
1025 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
1026 _test_unmount 2> /dev/null
1027 _scratch_unmount 2> /dev/null
1028 modprobe -r fs-$FSTYP
1032 # Scan for memory leaks after every test so that associating
1033 # a leak to a particular test will be as accurate as possible.
1034 _check_kmemleak || tc_status="fail"
1036 # test ends after all checks are done.
1037 $timestamp && _timestamp
1040 if [ ! -f $seq.out ]; then
1041 _dump_err "no qualified output"
1043 _stash_test_status "$seqnum" "$tc_status"
1047 # coreutils 8.16+ changed quote formats in error messages
1048 # from `foo' to 'foo'. Filter old versions to match the new
1050 sed -i "s/\`/\'/g" $tmp.out
1051 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
1052 if [ "$tc_status" != "fail" ]; then
1053 echo "$seqnum `expr $stop - $start`" >>$tmp.time
1054 echo -n " `expr $stop - $start`s"
1058 _dump_err "- output mismatch (see $seqres.out.bad)"
1059 mv $tmp.out $seqres.out.bad
1060 $diff $seq.out $seqres.out.bad | {
1061 if test "$DIFF_LENGTH" -le 0; then
1064 head -n "$DIFF_LENGTH"
1066 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
1067 " to see the entire diff)"
1068 fi; } | sed -e 's/^\(.\)/ \1/'
1071 if [ -f $seqres.hints ]; then
1072 if [ "$tc_status" == "fail" ]; then
1079 _stash_test_status "$seqnum" "$tc_status"
1082 sect_stop=`_wallclock`
1088 _test_unmount 2> /dev/null
1089 _scratch_unmount 2> /dev/null
1092 for ((iters = 0; iters < $iterations; iters++)) do
1093 for section in $HOST_OPTIONS_SECTIONS; do
1094 run_section $section
1095 if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
1097 status=`expr $sum_bad != 0`
1104 status=`expr $sum_bad != 0`