2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
5 # Control script for QA
26 brief_test_summary=false
30 # This is a global variable used to pass test failure text to reporting gunk
33 # start the initialisation work now
36 export MSGVERB="text:action"
37 export QA_CHECK_FS=${QA_CHECK_FS:=true}
39 # number of diff lines from a failed test, 0 for whole output
40 export DIFF_LENGTH=${DIFF_LENGTH:=10}
42 # by default don't output timestamps
43 timestamp=${TIMESTAMP:=false}
45 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
47 SRC_GROUPS="generic shared"
48 export SRC_DIR="tests"
52 echo "Usage: $0 [options] [testlist]"'
56 -glusterfs test GlusterFS
64 -udiff show unified diff (default)
65 -n show me, do not run tests
67 -r randomize test order
68 -d dump test output to stdout
70 -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
71 --large-fs optimise scratch device for large filesystems
72 -s section run only specified section from config file
73 -S section exclude the specified section from the config file
76 -g group[,group...] include tests from these groups
77 -x group[,group...] exclude tests from these groups
78 -X exclude_file exclude individual tests
79 -E external_file exclude individual tests
80 [testlist] include tests matching names in testlist
82 testlist argument is a list of tests in the form of <test dir>/<test name>.
84 <test dir> is a directory under tests that contains a group file,
85 with a list of the names of the tests in that directory.
87 <test name> may be either a specific test file name (e.g. xfs/001) or
88 a test file name match pattern (e.g. xfs/*).
90 group argument is either a name of a tests group to collect from all
91 the test dirs (e.g. quick) or a name of a tests group to collect from
92 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
93 If you want to run all the tests in the test suite, use "-g all" to specify all
96 exclude_file argument refers to a name of a file inside each test directory.
97 for every test dir where this file is found, the listed test names are
98 excluded from the list of tests to run from that test dir.
100 external_file argument is a path to a single file containing a list of tests
101 to exclude in the form of <test dir>/<test name>.
107 check -x stress xfs/*
108 check -X .exclude -g auto
109 check -E ~/.xfstests.exclude
119 test -s "$SRC_DIR/$d/group" || return 1
121 local grpl=$(sed -n < $SRC_DIR/$d/group \
124 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
132 local sub=$(dirname $grp)
134 if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
135 # group is given as <subdir>/<group> (e.g. xfs/quick)
137 get_sub_group_list $sub $grp
141 for d in $SRC_GROUPS $FSTYP; do
142 if ! test -d "$SRC_DIR/$d" ; then
145 grpl="$grpl $(get_sub_group_list $d $grp)"
150 # Find all tests, excluding files that are test metadata such as group files.
151 # It matches test names against $VALID_TEST_NAME defined in common/rc
155 for d in $SRC_GROUPS $FSTYP; do
156 if ! test -d "$SRC_DIR/$d" ; then
161 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
162 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
166 # takes the list of tests to run in $tmp.list, and removes the tests passed to
167 # the function from that list.
176 if [ $numsed -gt 100 ]; then
177 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
178 mv $tmp.tmp $tmp.list
182 echo "^$t\$" >>$tmp.grep
183 numsed=`expr $numsed + 1`
185 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
186 mv $tmp.tmp $tmp.list
205 # Tests specified on the command line
206 if [ -s $tmp.arglist ]; then
207 cat $tmp.arglist > $tmp.list
212 # Specified groups to include
213 # Note that the CLI processing adds a leading space to the first group
214 # parameter, so we have to catch that here checking for "all"
215 if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
216 # no test numbers, do everything
219 for group in $GROUP_LIST; do
220 list=$(get_group_list $group)
221 if [ -z "$list" ]; then
222 echo "Group \"$group\" is empty or not defined?"
227 grep -s "^$t\$" $tmp.list >/dev/null || \
228 echo "$t" >>$tmp.list
233 # Specified groups to exclude
234 for xgroup in $XGROUP_LIST; do
235 list=$(get_group_list $xgroup)
236 if [ -z "$list" ]; then
237 echo "Group \"$xgroup\" is empty or not defined?"
244 # sort the list of tests into numeric order
246 if type shuf >& /dev/null; then
249 sorter="awk -v seed=$RANDOM -f randomize.awk"
254 list=`sort -n $tmp.list | uniq | $sorter`
258 # Process command arguments first.
259 while [ $# -gt 0 ]; do
261 -\? | -h | --help) usage ;;
264 -glusterfs) FSTYP=glusterfs ;;
267 -overlay) FSTYP=overlay; export OVERLAY=true ;;
268 -pvfs2) FSTYP=pvfs2 ;;
269 -tmpfs) FSTYP=tmpfs ;;
270 -ubifs) FSTYP=ubifs ;;
272 -g) group=$2 ; shift ;
273 GROUP_LIST="$GROUP_LIST ${group//,/ }"
276 -x) xgroup=$2 ; shift ;
277 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
280 -X) subdir_xfile=$2; shift ;
282 -E) xfile=$2; shift ;
283 if [ -f $xfile ]; then
284 sed "s/#.*$//" "$xfile" >> $tmp.xlist
287 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
288 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
290 -udiff) diff="$diff -u" ;;
293 -r) randomize=true ;;
295 -T) timestamp=true ;;
296 -d) DUMP_OUTPUT=true ;;
297 -b) brief_test_summary=true;;
298 -R) report_fmt=$2 ; shift ;
299 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
302 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
303 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
306 *) # not an argument, we've got tests now.
307 have_test_arg=true ;;
310 # if we've found a test specification, the break out of the processing
311 # loop before we shift the arguments so that this is the first argument
312 # that we process in the test arg loop below.
313 if $have_test_arg; then
320 # we need common/rc, that also sources common/config. We need to source it
321 # after processing args, overlay needs FSTYP set before sourcing common/config
322 if ! . ./common/rc; then
323 echo "check: failed to source common/rc"
327 if [ -n "$subdir_xfile" ]; then
328 for d in $SRC_GROUPS $FSTYP; do
329 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
330 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
331 echo $d/$f >> $tmp.xlist
336 # Process tests from command line now.
337 if $have_test_arg; then
338 while [ $# -gt 0 ]; do
340 -*) echo "Arguments before tests, please!"
344 *) # Expand test pattern (e.g. xfs/???, *fs/001)
345 list=$(cd $SRC_DIR; echo $1)
347 test_dir=`dirname $t`
348 test_dir=${test_dir#$SRC_DIR/*}
349 test_name=`basename $t`
350 group_file=$SRC_DIR/$test_dir/group
352 if egrep -q "^$test_name" $group_file; then
353 # in group file ... OK
354 echo $SRC_DIR/$test_dir/$test_name \
358 echo "$t - unknown test, ignored"
366 elif [ -z "$GROUP_LIST" ]; then
367 # default group list is the auto group. If any other group or test is
368 # specified, we use that instead.
374 echo "check: QA must be run as root"
389 check="$RESULT_BASE/check"
399 if [ -f $check.time -a -f $tmp.time ]; then
400 cat $check.time $tmp.time \
405 for (i in t) print i " " t[i]
409 mv $tmp.out $check.time
415 echo "SECTION -- $section" >>$tmp.summary
416 echo "=========================" >>$tmp.summary
417 if [ ! -z "$n_try" -a $n_try != 0 ]; then
418 if [ $brief_test_summary == "false" ]; then
420 echo "Ran:$try" >>$tmp.summary
422 echo "Ran:$try" >>$check.log
425 $interrupt && echo "Interrupted!" | tee -a $check.log
427 if [ ! -z "$notrun" ]; then
428 if [ $brief_test_summary == "false" ]; then
429 echo "Not run:$notrun"
430 echo "Not run:$notrun" >>$tmp.summary
432 echo "Not run:$notrun" >>$check.log
435 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
437 echo "Failed $n_bad of $n_try tests"
438 echo "Failures:$bad" >>$check.log
439 echo "Failed $n_bad of $n_try tests" >>$check.log
440 echo "Failures:$bad" >>$tmp.summary
441 echo "Failed $n_bad of $n_try tests" >>$tmp.summary
443 echo "Passed all $n_try tests"
444 echo "Passed all $n_try tests" >>$check.log
445 echo "Passed all $n_try tests" >>$tmp.summary
447 echo "" >>$tmp.summary
454 sum_bad=`expr $sum_bad + $n_bad`
456 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
457 if ! $OPTIONS_HAVE_SECTIONS; then
468 count=`wc -L $tmp.summary | cut -f1 -d" "`
477 if [ -f ${RESULT_DIR}/require_test ]; then
478 _check_test_fs || err=true
479 rm -f ${RESULT_DIR}/require_test*
481 _test_unmount 2> /dev/null
483 if [ -f ${RESULT_DIR}/require_scratch ]; then
484 _check_scratch_fs || err=true
485 rm -f ${RESULT_DIR}/require_scratch*
487 _scratch_unmount 2> /dev/null
493 if [ -s $tmp.xlist ]; then
494 if grep -q $TEST_ID $tmp.xlist; then
502 # Make the check script unattractive to the OOM killer...
503 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
504 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
506 # ...and make the tests themselves somewhat more attractive to it, so that if
507 # the system runs out of memory it'll be the test that gets killed and not the
510 bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
516 if $OPTIONS_HAVE_SECTIONS; then
517 trap "_summary; exit \$status" 0 1 2 3 15
519 trap "_wrapup; exit \$status" 0 1 2 3 15
522 for section in $HOST_OPTIONS_SECTIONS; do
524 OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
525 get_next_config $section
527 # Do we need to run only some sections ?
528 if [ ! -z "$RUN_SECTION" ]; then
530 for s in $RUN_SECTION; do
531 if [ $section == $s ]; then
541 # Did this section get excluded?
542 if [ ! -z "$EXCLUDE_SECTION" ]; then
544 for s in $EXCLUDE_SECTION; do
545 if [ $section == $s ]; then
555 mkdir -p $RESULT_BASE
556 if [ ! -d $RESULT_BASE ]; then
557 echo "failed to create results directory $RESULT_BASE"
562 if $OPTIONS_HAVE_SECTIONS; then
563 echo "SECTION -- $section"
566 sect_start=`_wallclock`
567 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
568 echo "RECREATING -- $FSTYP on $TEST_DEV"
569 _test_unmount 2> /dev/null
570 if ! _test_mkfs >$tmp.err 2>&1
572 echo "our local _test_mkfs routine ..."
574 echo "check: failed to mkfs \$TEST_DEV using specified options"
580 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
585 elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
586 _test_unmount 2> /dev/null
589 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
598 check="$RESULT_BASE/check"
600 # don't leave old full output behind on a clean run
603 [ -f $check.time ] || touch $check.time
605 # print out our test configuration
606 echo "FSTYP -- `_full_fstyp_details`"
607 echo "PLATFORM -- `_full_platform_details`"
608 if [ ! -z "$SCRATCH_DEV" ]; then
609 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
610 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
615 if [ ! -z "$SCRATCH_DEV" ]; then
616 _scratch_unmount 2> /dev/null
617 # call the overridden mkfs - make sure the FS is built
618 # the same as we'll create it later.
620 if ! _scratch_mkfs >$tmp.err 2>&1
622 echo "our local _scratch_mkfs routine ..."
624 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
629 # call the overridden mount - make sure the FS mounts with
630 # the same options that we'll mount with later.
631 if ! _try_scratch_mount >$tmp.err 2>&1
633 echo "our local mount routine ..."
635 echo "check: failed to mount \$SCRATCH_DEV using specified options"
649 for seq in $list ; do
650 # Run report for previous test!
653 n_bad=`expr $n_bad + 1`
656 if $do_report && ! $first_test ; then
657 if [ $tc_status != "expunge" ] ; then
658 _make_testcase_report "$prev_seq" "$tc_status"
665 if [ ! -f $seq ]; then
666 # Try to get full name in case the user supplied only
667 # seq id and the test has a name. A bit of hassle to
668 # find really the test and not its sample output or
670 bname=$(basename $seq)
671 full_seq=$(find $(dirname $seq) -name $bname* -executable |
672 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
673 END { print shortest }')
674 if [ -f $full_seq ] && \
675 [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
680 # the filename for the test and the name output are different.
681 # we don't include the tests/ directory in the name output.
682 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
684 # Similarly, the result directory needs to replace the tests/
685 # part of the test location.
687 if $OPTIONS_HAVE_SECTIONS; then
688 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
689 REPORT_DIR="$RESULT_BASE/$section"
691 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
692 REPORT_DIR="$RESULT_BASE"
694 seqres="$REPORT_DIR/$seqnum"
700 _expunge_test $seqnum
701 if [ $? -eq 1 ]; then
709 n_notrun=`expr $n_notrun + 1`
714 if [ ! -f $seq ]; then
715 echo " - no such test?"
719 # really going to try and run this one
720 rm -f $seqres.out.bad
722 # check if we really should run it
723 _expunge_test $seqnum
724 if [ $? -eq 1 ]; then
729 # record that we really tried to run this test.
731 n_try=`expr $n_try + 1`
733 # slashes now in names, sed barfs on them so use grep
734 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
735 if [ "X$lasttime" != X ]; then
736 echo -n " ${lasttime}s ... "
738 echo -n " " # prettier output with timestamps.
740 rm -f core $seqres.notrun
743 $timestamp && echo -n " ["`date "+%T"`"]"
744 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
745 $LOGGER_PROG "run xfstest $seqnum"
746 if [ -w /dev/kmsg ]; then
747 export date_time=`date +"%F %T"`
748 echo "run fstests $seqnum at $date_time" > /dev/kmsg
749 # _check_dmesg depends on this log in dmesg
750 touch ${RESULT_DIR}/check_dmesg
752 _try_wipe_scratch_devs > /dev/null 2>&1
753 if [ "$DUMP_OUTPUT" = true ]; then
754 _run_seq 2>&1 | tee $tmp.out
755 # Because $? would get tee's return code
758 _run_seq >$tmp.out 2>&1
763 _dump_err_cont "[dumped core]"
764 mv core $RESULT_BASE/$seqnum.core
768 if [ -f $seqres.notrun ]; then
769 $timestamp && _timestamp
771 $timestamp || echo -n "[not run] "
772 $timestamp && echo " [not run]" && \
773 echo -n " $seqnum -- "
775 notrun="$notrun $seqnum"
776 n_notrun=`expr $n_notrun + 1`
781 if [ $sts -ne 0 ]; then
782 _dump_err_cont "[failed, exit status $sts]"
783 _test_unmount 2> /dev/null
784 _scratch_unmount 2> /dev/null
785 rm -f ${RESULT_DIR}/require_test*
786 rm -f ${RESULT_DIR}/require_scratch*
789 # the test apparently passed, so check for corruption
790 # and log messages that shouldn't be there.
792 _check_dmesg || err=true
795 # Scan for memory leaks after every test so that associating
796 # a leak to a particular test will be as accurate as possible.
797 _check_kmemleak || err=true
799 # test ends after all checks are done.
800 $timestamp && _timestamp
803 if [ ! -f $seq.out ]; then
804 _dump_err "no qualified output"
809 # coreutils 8.16+ changed quote formats in error messages
810 # from `foo' to 'foo'. Filter old versions to match the new
812 sed -i "s/\`/\'/g" $tmp.out
813 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
815 echo "$seqnum `expr $stop - $start`" >>$tmp.time
816 echo -n " `expr $stop - $start`s"
820 _dump_err "- output mismatch (see $seqres.out.bad)"
821 mv $tmp.out $seqres.out.bad
822 $diff $seq.out $seqres.out.bad | {
823 if test "$DIFF_LENGTH" -le 0; then
826 head -n "$DIFF_LENGTH"
828 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
829 " to see the entire diff)"
830 fi; } | sed -e 's/^\(.\)/ \1/'
835 # make sure we record the status of the last test we ran.
838 n_bad=`expr $n_bad + 1`
841 if $do_report && ! $first_test ; then
842 if [ $tc_status != "expunge" ] ; then
843 _make_testcase_report "$prev_seq" "$tc_status"
847 sect_stop=`_wallclock`
853 _test_unmount 2> /dev/null
854 _scratch_unmount 2> /dev/null
858 status=`expr $sum_bad != 0`