2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
5 # Control script for QA
24 brief_test_summary=false
31 # This is a global variable used to pass test failure text to reporting gunk
34 # start the initialisation work now
37 # mkfs.xfs uses the presence of both of these variables to enable formerly
38 # supported tiny filesystem configurations that fstests use for fuzz testing
39 # in a controlled environment
40 export MSGVERB="text:action"
41 export QA_CHECK_FS=${QA_CHECK_FS:=true}
43 # number of diff lines from a failed test, 0 for whole output
44 export DIFF_LENGTH=${DIFF_LENGTH:=10}
46 # by default don't output timestamps
47 timestamp=${TIMESTAMP:=false}
49 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
51 SRC_GROUPS="generic shared"
52 export SRC_DIR="tests"
56 echo "Usage: $0 [options] [testlist]"'
60 -glusterfs test GlusterFS
63 -virtiofs test virtiofs
69 -udiff show unified diff (default)
70 -n show me, do not run tests
72 -r randomize test order
73 --exact-order run tests in the exact order specified
74 -i <n> iterate the test list <n> times
75 -I <n> iterate the test list <n> times, but stops iterating further in case of any test failure
76 -d dump test output to stdout
78 -R fmt[,fmt] generate report in formats specified. Supported formats: xunit, xunit-quiet
79 --large-fs optimise scratch device for large filesystems
80 -s section run only specified section from config file
81 -S section exclude the specified section from the config file
82 -L <n> loop tests <n> times following a failure, measuring aggregate pass/fail metrics
85 -g group[,group...] include tests from these groups
86 -x group[,group...] exclude tests from these groups
87 -X exclude_file exclude individual tests
88 -e testlist exclude a specific list of tests
89 -E external_file exclude individual tests
90 [testlist] include tests matching names in testlist
92 testlist argument is a list of tests in the form of <test dir>/<test name>.
94 <test dir> is a directory under tests that contains a group file,
95 with a list of the names of the tests in that directory.
97 <test name> may be either a specific test file name (e.g. xfs/001) or
98 a test file name match pattern (e.g. xfs/*).
100 group argument is either a name of a tests group to collect from all
101 the test dirs (e.g. quick) or a name of a tests group to collect from
102 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
103 If you want to run all the tests in the test suite, use "-g all" to specify all
106 exclude_file argument refers to a name of a file inside each test directory.
107 for every test dir where this file is found, the listed test names are
108 excluded from the list of tests to run from that test dir.
110 external_file argument is a path to a single file containing a list of tests
111 to exclude in the form of <test dir>/<test name>.
117 check -x stress xfs/*
118 check -X .exclude -g auto
119 check -E ~/.xfstests.exclude
129 test -s "$SRC_DIR/$d/group.list" || return 1
131 local grpl=$(sed -n < $SRC_DIR/$d/group.list \
134 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
142 local sub=$(dirname $grp)
143 local fsgroup="$FSTYP"
145 if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
146 # group is given as <subdir>/<group> (e.g. xfs/quick)
148 get_sub_group_list $sub $grp
152 if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
155 for d in $SRC_GROUPS $fsgroup; do
156 if ! test -d "$SRC_DIR/$d" ; then
159 grpl="$grpl $(get_sub_group_list $d $grp)"
164 # Find all tests, excluding files that are test metadata such as group files.
165 # It matches test names against $VALID_TEST_NAME defined in common/rc
169 for d in $SRC_GROUPS $FSTYP; do
170 if ! test -d "$SRC_DIR/$d" ; then
175 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
176 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
180 # takes the list of tests to run in $tmp.list, and removes the tests passed to
181 # the function from that list.
190 if [ $numsed -gt 100 ]; then
191 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
192 mv $tmp.tmp $tmp.list
196 echo "^$t\$" >>$tmp.grep
197 numsed=`expr $numsed + 1`
199 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
200 mv $tmp.tmp $tmp.list
212 local now=`date "+%T"`
219 # Tests specified on the command line
220 if [ -s $tmp.arglist ]; then
221 cat $tmp.arglist > $tmp.list
226 # Specified groups to include
227 # Note that the CLI processing adds a leading space to the first group
228 # parameter, so we have to catch that here checking for "all"
229 if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
230 # no test numbers, do everything
233 for group in $GROUP_LIST; do
234 list=$(get_group_list $group)
235 if [ -z "$list" ]; then
236 echo "Group \"$group\" is empty or not defined?"
241 grep -s "^$t\$" $tmp.list >/dev/null || \
242 echo "$t" >>$tmp.list
247 # Specified groups to exclude
248 for xgroup in $XGROUP_LIST; do
249 list=$(get_group_list $xgroup)
250 if [ -z "$list" ]; then
251 echo "Group \"$xgroup\" is empty or not defined?"
258 # sort the list of tests into numeric order unless we're running tests
259 # in the exact order specified
260 if ! $exact_order; then
262 if type shuf >& /dev/null; then
265 sorter="awk -v seed=$RANDOM -f randomize.awk"
270 list=`sort -n $tmp.list | uniq | $sorter`
277 # Process command arguments first.
278 while [ $# -gt 0 ]; do
280 -\? | -h | --help) usage ;;
282 -nfs|-glusterfs|-cifs|-9p|-virtiofs|-pvfs2|-tmpfs|-ubifs)
290 -g) group=$2 ; shift ;
291 GROUP_LIST="$GROUP_LIST ${group//,/ }"
294 -x) xgroup=$2 ; shift ;
295 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
298 -X) subdir_xfile=$2; shift ;
302 echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
305 -E) xfile=$2; shift ;
306 if [ -f $xfile ]; then
307 sed "s/#.*$//" "$xfile" >> $tmp.xlist
310 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
311 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
313 -udiff) diff="$diff -u" ;;
317 if $exact_order; then
318 echo "Cannot specify -r and --exact-order."
325 echo "Cannnot specify --exact-order and -r."
330 -i) iterations=$2; shift ;;
331 -I) iterations=$2; istop=true; shift ;;
332 -T) timestamp=true ;;
333 -d) DUMP_OUTPUT=true ;;
334 -b) brief_test_summary=true;;
335 -R) report_fmt=$2 ; shift ;
336 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
339 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
340 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
341 -L) [[ $2 =~ ^[0-9]+$ ]] || usage
342 loop_on_fail=$2; shift
346 *) # not an argument, we've got tests now.
347 have_test_arg=true ;;
350 # if we've found a test specification, the break out of the processing
351 # loop before we shift the arguments so that this is the first argument
352 # that we process in the test arg loop below.
353 if $have_test_arg; then
360 # we need common/rc, that also sources common/config. We need to source it
361 # after processing args, overlay needs FSTYP set before sourcing common/config
362 if ! . ./common/rc; then
363 echo "check: failed to source common/rc"
367 if [ -n "$subdir_xfile" ]; then
368 for d in $SRC_GROUPS $FSTYP; do
369 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
370 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
371 echo $d/$f >> $tmp.xlist
376 # Process tests from command line now.
377 if $have_test_arg; then
378 while [ $# -gt 0 ]; do
380 -*) echo "Arguments before tests, please!"
384 *) # Expand test pattern (e.g. xfs/???, *fs/001)
385 list=$(cd $SRC_DIR; echo $1)
387 test_dir=`dirname $t`
388 test_dir=${test_dir#$SRC_DIR/*}
389 test_name=`basename $t`
390 group_file=$SRC_DIR/$test_dir/group.list
392 if egrep -q "^$test_name" $group_file; then
393 # in group file ... OK
394 echo $SRC_DIR/$test_dir/$test_name \
398 echo "$t - unknown test, ignored"
406 elif [ -z "$GROUP_LIST" ]; then
407 # default group list is the auto group. If any other group or test is
408 # specified, we use that instead.
414 echo "check: QA must be run as root"
426 echo "$1" >> $check.log
427 if $OPTIONS_HAVE_SECTIONS; then
428 echo "$1" >> ${REPORT_DIR}/check.log
435 check="$RESULT_BASE/check"
437 if $showme && $needwrap; then
439 # $showme = all selected tests are notrun (no tries)
440 _make_section_report "$section" "${#notrun[*]}" "0" \
442 "$((sect_stop - sect_start))"
446 if [ -f $check.time -a -f $tmp.time ]; then
447 cat $check.time $tmp.time \
452 for (i in t) print i " " t[i]
456 mv $tmp.out $check.time
457 if $OPTIONS_HAVE_SECTIONS; then
458 cp $check.time ${REPORT_DIR}/check.time
463 _global_log "$(date)"
465 echo "SECTION -- $section" >>$tmp.summary
466 echo "=========================" >>$tmp.summary
467 if ((${#try[*]} > 0)); then
468 if [ $brief_test_summary == "false" ]; then
469 echo "Ran: ${try[*]}"
470 echo "Ran: ${try[*]}" >>$tmp.summary
472 _global_log "Ran: ${try[*]}"
475 $interrupt && echo "Interrupted!" | tee -a $check.log
476 if $OPTIONS_HAVE_SECTIONS; then
477 $interrupt && echo "Interrupted!" | tee -a \
478 ${REPORT_DIR}/check.log
481 if ((${#notrun[*]} > 0)); then
482 if [ $brief_test_summary == "false" ]; then
483 echo "Not run: ${notrun[*]}"
484 echo "Not run: ${notrun[*]}" >>$tmp.summary
486 _global_log "Not run: ${notrun[*]}"
489 if ((${#bad[*]} > 0)); then
490 echo "Failures: ${bad[*]}"
491 echo "Failed ${#bad[*]} of ${#try[*]} tests"
492 _global_log "Failures: ${bad[*]}"
493 _global_log "Failed ${#bad[*]} of ${#try[*]} tests"
494 echo "Failures: ${bad[*]}" >>$tmp.summary
495 echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary
497 echo "Passed all ${#try[*]} tests"
498 _global_log "Passed all ${#try[*]} tests"
499 echo "Passed all ${#try[*]} tests" >>$tmp.summary
501 echo "" >>$tmp.summary
503 _make_section_report "$section" "${#try[*]}" \
504 "${#bad[*]}" "${#notrun[*]}" \
505 "$((sect_stop - sect_start))"
510 sum_bad=`expr $sum_bad + ${#bad[*]}`
512 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
513 if ! $OPTIONS_HAVE_SECTIONS; then
524 count=`wc -L $tmp.summary | cut -f1 -d" "`
535 if [ -f ${RESULT_DIR}/require_test ]; then
536 _check_test_fs || ret=1
537 rm -f ${RESULT_DIR}/require_test*
539 _test_unmount 2> /dev/null
541 if [ -f ${RESULT_DIR}/require_scratch ]; then
542 _check_scratch_fs || ret=1
543 rm -f ${RESULT_DIR}/require_scratch*
545 _scratch_unmount 2> /dev/null
552 if [ -s $tmp.xlist ]; then
553 if grep -q $TEST_ID $tmp.xlist; then
561 # retain files which would be overwritten in subsequent reruns of the same test
562 _stash_fail_loop_files() {
563 local seq_prefix="${REPORT_DIR}/${1}"
566 for i in ".full" ".dmesg" ".out.bad" ".notrun" ".core" ".hints"; do
567 rm -f "${seq_prefix}${i}${cp_suffix}"
568 if [ -f "${seq_prefix}${i}" ]; then
569 cp "${seq_prefix}${i}" "${seq_prefix}${i}${cp_suffix}"
574 # Retain in @bad / @notrun the result of the just-run @test_seq. @try array
575 # entries are added prior to execution.
576 _stash_test_status() {
578 local test_status="$2"
580 if $do_report && [[ $test_status != "expunge" ]]; then
581 _make_testcase_report "$section" "$test_seq" \
582 "$test_status" "$((stop - start))"
585 if ((${#loop_status[*]} > 0)); then
586 # continuing or completing rerun-on-failure loop
587 _stash_fail_loop_files "$test_seq" ".rerun${#loop_status[*]}"
588 loop_status+=("$test_status")
589 if ((${#loop_status[*]} > loop_on_fail)); then
590 printf "%s aggregate results across %d runs: " \
591 "$test_seq" "${#loop_status[*]}"
593 n=split(\"${loop_status[*]}\", arr);"'
594 for (i = 1; i <= n; i++)
597 printf("%s=%d (%.1f%%)",
598 (i-- > n ? x : ", " x),
599 stats[x], 100 * stats[x] / n);
604 return # only stash @bad result for initial failure in loop
607 case "$test_status" in
609 if ((loop_on_fail > 0)); then
610 # initial failure, start rerun-on-failure loop
611 _stash_fail_loop_files "$test_seq" ".rerun0"
612 loop_status+=("$test_status")
617 notrun+=("$test_seq")
622 echo "Unexpected test $test_seq status: $test_status"
627 # Can we run systemd scopes?
629 systemctl reset-failed "fstests-check" &>/dev/null
630 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
631 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
633 # Make the check script unattractive to the OOM killer...
634 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
635 function _adjust_oom_score() {
636 test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
638 _adjust_oom_score -500
640 # ...and make the tests themselves somewhat more attractive to it, so that if
641 # the system runs out of memory it'll be the test that gets killed and not the
642 # test framework. The test is run in a separate process without any of our
643 # functions, so we open-code adjusting the OOM score.
645 # If systemd is available, run the entire test script in a scope so that we can
646 # kill all subprocesses of the test if it fails to clean up after itself. This
647 # is essential for ensuring that the post-test unmount succeeds. Note that
648 # systemd doesn't automatically remove transient scopes that fail to terminate
649 # when systemd tells them to terminate (e.g. programs stuck in D state when
650 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
652 local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
654 if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
655 local unit="$(systemd-escape "fs$seq").scope"
656 systemctl reset-failed "${unit}" &> /dev/null
657 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
659 systemctl stop "${unit}" &> /dev/null
669 if $OPTIONS_HAVE_SECTIONS; then
670 trap "_summary; exit \$status" 0 1 2 3 15
672 trap "_wrapup; exit \$status" 0 1 2 3 15
675 function run_section()
677 local section=$1 skip
680 OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
681 get_next_config $section
683 # Do we need to run only some sections ?
684 if [ ! -z "$RUN_SECTION" ]; then
686 for s in $RUN_SECTION; do
687 if [ $section == $s ]; then
697 # Did this section get excluded?
698 if [ ! -z "$EXCLUDE_SECTION" ]; then
700 for s in $EXCLUDE_SECTION; do
701 if [ $section == $s ]; then
711 mkdir -p $RESULT_BASE
712 if [ ! -d $RESULT_BASE ]; then
713 echo "failed to create results directory $RESULT_BASE"
718 if $OPTIONS_HAVE_SECTIONS; then
719 echo "SECTION -- $section"
722 sect_start=`_wallclock`
723 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
724 echo "RECREATING -- $FSTYP on $TEST_DEV"
725 _test_unmount 2> /dev/null
726 if ! _test_mkfs >$tmp.err 2>&1
728 echo "our local _test_mkfs routine ..."
730 echo "check: failed to mkfs \$TEST_DEV using specified options"
736 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
740 # TEST_DEV has been recreated, previous FSTYP derived from
741 # TEST_DEV could be changed, source common/rc again with
742 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
745 elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
746 _test_unmount 2> /dev/null
749 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
758 check="$RESULT_BASE/check"
760 # don't leave old full output behind on a clean run
763 [ -f $check.time ] || touch $check.time
765 # print out our test configuration
766 echo "FSTYP -- `_full_fstyp_details`"
767 echo "PLATFORM -- `_full_platform_details`"
768 if [ ! -z "$SCRATCH_DEV" ]; then
769 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
770 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
775 if [ ! -z "$SCRATCH_DEV" ]; then
776 _scratch_unmount 2> /dev/null
777 # call the overridden mkfs - make sure the FS is built
778 # the same as we'll create it later.
780 if ! _scratch_mkfs >$tmp.err 2>&1
782 echo "our local _scratch_mkfs routine ..."
784 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
789 # call the overridden mount - make sure the FS mounts with
790 # the same options that we'll mount with later.
791 if ! _try_scratch_mount >$tmp.err 2>&1
793 echo "our local mount routine ..."
795 echo "check: failed to mount \$SCRATCH_DEV using specified options"
806 loop_status=() # track rerun-on-failure state
808 local -a _list=( $list )
809 for ((ix = 0; ix < ${#_list[*]}; !${#loop_status[*]} && ix++)); do
812 if [ ! -f $seq ]; then
813 # Try to get full name in case the user supplied only
814 # seq id and the test has a name. A bit of hassle to
815 # find really the test and not its sample output or
817 bname=$(basename $seq)
818 full_seq=$(find $(dirname $seq) -name $bname* -executable |
819 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
820 END { print shortest }')
821 if [ -f $full_seq ] && \
822 [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
827 # the filename for the test and the name output are different.
828 # we don't include the tests/ directory in the name output.
829 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
831 # Similarly, the result directory needs to replace the tests/
832 # part of the test location.
834 if $OPTIONS_HAVE_SECTIONS; then
835 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
836 REPORT_DIR="$RESULT_BASE/$section"
838 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
839 REPORT_DIR="$RESULT_BASE"
841 seqres="$REPORT_DIR/$seqnum"
844 rm -f ${RESULT_DIR}/require_scratch*
845 rm -f ${RESULT_DIR}/require_test*
849 _expunge_test $seqnum
850 if [ $? -eq 1 ]; then
858 _stash_test_status "$seqnum" "$tc_status"
863 if [ ! -f $seq ]; then
864 echo " - no such test?"
865 _stash_test_status "$seqnum" "$tc_status"
869 # really going to try and run this one
870 rm -f $seqres.out.bad $seqres.hints
872 # check if we really should run it
873 _expunge_test $seqnum
874 if [ $? -eq 1 ]; then
876 _stash_test_status "$seqnum" "$tc_status"
880 # record that we really tried to run this test.
881 if ((!${#loop_status[*]})); then
885 awk 'BEGIN {lasttime=" "} \
886 $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \
887 END {printf "%s", lasttime}' "$check.time"
888 rm -f core $seqres.notrun
891 $timestamp && _timestamp
892 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
893 $LOGGER_PROG "run xfstest $seqnum"
894 if [ -w /dev/kmsg ]; then
895 export date_time=`date +"%F %T"`
896 echo "run fstests $seqnum at $date_time" > /dev/kmsg
897 # _check_dmesg depends on this log in dmesg
898 touch ${RESULT_DIR}/check_dmesg
900 _try_wipe_scratch_devs > /dev/null 2>&1
902 # clear the WARN_ONCE state to allow a potential problem
903 # to be reported for each test
904 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
906 if [ "$DUMP_OUTPUT" = true ]; then
907 _run_seq 2>&1 | tee $tmp.out
908 # Because $? would get tee's return code
911 _run_seq >$tmp.out 2>&1
916 _dump_err_cont "[dumped core]"
917 mv core $RESULT_BASE/$seqnum.core
921 if [ -f $seqres.notrun ]; then
922 $timestamp && _timestamp
924 $timestamp || echo -n "[not run] "
925 $timestamp && echo " [not run]" && \
926 echo -n " $seqnum -- "
929 _stash_test_status "$seqnum" "$tc_status"
931 # Unmount the scratch fs so that we can wipe the scratch
932 # dev state prior to the next test run.
933 _scratch_unmount 2> /dev/null
937 if [ $sts -ne 0 ]; then
938 _dump_err_cont "[failed, exit status $sts]"
939 _test_unmount 2> /dev/null
940 _scratch_unmount 2> /dev/null
941 rm -f ${RESULT_DIR}/require_test*
942 rm -f ${RESULT_DIR}/require_scratch*
945 # The test apparently passed, so check for corruption
946 # and log messages that shouldn't be there. Run the
947 # checking tools from a subshell with adjusted OOM
948 # score so that the OOM killer will target them instead
949 # of the check script itself.
950 (_adjust_oom_score 250; _check_filesystems) || tc_status="fail"
951 _check_dmesg || tc_status="fail"
954 # Reload the module after each test to check for leaks or
956 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
957 _test_unmount 2> /dev/null
958 _scratch_unmount 2> /dev/null
959 modprobe -r fs-$FSTYP
963 # Scan for memory leaks after every test so that associating
964 # a leak to a particular test will be as accurate as possible.
965 _check_kmemleak || tc_status="fail"
967 # test ends after all checks are done.
968 $timestamp && _timestamp
971 if [ ! -f $seq.out ]; then
972 _dump_err "no qualified output"
974 _stash_test_status "$seqnum" "$tc_status"
978 # coreutils 8.16+ changed quote formats in error messages
979 # from `foo' to 'foo'. Filter old versions to match the new
981 sed -i "s/\`/\'/g" $tmp.out
982 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
983 if [ "$tc_status" != "fail" ]; then
984 echo "$seqnum `expr $stop - $start`" >>$tmp.time
985 echo -n " `expr $stop - $start`s"
989 _dump_err "- output mismatch (see $seqres.out.bad)"
990 mv $tmp.out $seqres.out.bad
991 $diff $seq.out $seqres.out.bad | {
992 if test "$DIFF_LENGTH" -le 0; then
995 head -n "$DIFF_LENGTH"
997 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
998 " to see the entire diff)"
999 fi; } | sed -e 's/^\(.\)/ \1/'
1002 if [ -f $seqres.hints ]; then
1003 if [ "$tc_status" == "fail" ]; then
1010 _stash_test_status "$seqnum" "$tc_status"
1013 sect_stop=`_wallclock`
1019 _test_unmount 2> /dev/null
1020 _scratch_unmount 2> /dev/null
1023 for ((iters = 0; iters < $iterations; iters++)) do
1024 for section in $HOST_OPTIONS_SECTIONS; do
1025 run_section $section
1026 if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
1028 status=`expr $sum_bad != 0`
1035 status=`expr $sum_bad != 0`