2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
5 # Control script for QA
26 brief_test_summary=false
30 # This is a global variable used to pass test failure text to reporting gunk
33 # start the initialisation work now
36 export MSGVERB="text:action"
37 export QA_CHECK_FS=${QA_CHECK_FS:=true}
39 # number of diff lines from a failed test, 0 for whole output
40 export DIFF_LENGTH=${DIFF_LENGTH:=10}
42 # by default don't output timestamps
43 timestamp=${TIMESTAMP:=false}
45 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
47 SRC_GROUPS="generic shared"
48 export SRC_DIR="tests"
52 echo "Usage: $0 [options] [testlist]"'
56 -glusterfs test GlusterFS
64 -udiff show unified diff (default)
65 -n show me, do not run tests
67 -r randomize test order
68 -d dump test output to stdout
70 -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
71 --large-fs optimise scratch device for large filesystems
72 -s section run only specified section from config file
73 -S section exclude the specified section from the config file
76 -g group[,group...] include tests from these groups
77 -x group[,group...] exclude tests from these groups
78 -X exclude_file exclude individual tests
79 -E external_file exclude individual tests
80 [testlist] include tests matching names in testlist
82 testlist argument is a list of tests in the form of <test dir>/<test name>.
84 <test dir> is a directory under tests that contains a group file,
85 with a list of the names of the tests in that directory.
87 <test name> may be either a specific test file name (e.g. xfs/001) or
88 a test file name match pattern (e.g. xfs/*).
90 group argument is either a name of a tests group to collect from all
91 the test dirs (e.g. quick) or a name of a tests group to collect from
92 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
93 If you want to run all the tests in the test suite, use "-g all" to specify all
96 exclude_file argument refers to a name of a file inside each test directory.
97 for every test dir where this file is found, the listed test names are
98 excluded from the list of tests to run from that test dir.
100 external_file argument is a path to a single file containing a list of tests
101 to exclude in the form of <test dir>/<test name>.
107 check -x stress xfs/*
108 check -X .exclude -g auto
109 check -E ~/.xfstests.exclude
119 test -s "$SRC_DIR/$d/group" || return 1
121 local grpl=$(sed -n < $SRC_DIR/$d/group \
124 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
132 local sub=$(dirname $grp)
134 if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
135 # group is given as <subdir>/<group> (e.g. xfs/quick)
137 get_sub_group_list $sub $grp
141 for d in $SRC_GROUPS $FSTYP; do
142 if ! test -d "$SRC_DIR/$d" ; then
145 grpl="$grpl $(get_sub_group_list $d $grp)"
150 # Find all tests, excluding files that are test metadata such as group files.
151 # It matches test names against $VALID_TEST_NAME defined in common/rc
155 for d in $SRC_GROUPS $FSTYP; do
156 if ! test -d "$SRC_DIR/$d" ; then
161 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
162 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
166 # takes the list of tests to run in $tmp.list, and removes the tests passed to
167 # the function from that list.
176 if [ $numsed -gt 100 ]; then
177 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
178 mv $tmp.tmp $tmp.list
182 echo "^$t\$" >>$tmp.grep
183 numsed=`expr $numsed + 1`
185 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
186 mv $tmp.tmp $tmp.list
205 # Tests specified on the command line
206 if [ -s $tmp.arglist ]; then
207 cat $tmp.arglist > $tmp.list
212 # Specified groups to include
213 # Note that the CLI processing adds a leading space to the first group
214 # parameter, so we have to catch that here checking for "all"
215 if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
216 # no test numbers, do everything
219 for group in $GROUP_LIST; do
220 list=$(get_group_list $group)
221 if [ -z "$list" ]; then
222 echo "Group \"$group\" is empty or not defined?"
227 grep -s "^$t\$" $tmp.list >/dev/null || \
228 echo "$t" >>$tmp.list
233 # Specified groups to exclude
234 for xgroup in $XGROUP_LIST; do
235 list=$(get_group_list $xgroup)
236 if [ -z "$list" ]; then
237 echo "Group \"$xgroup\" is empty or not defined?"
244 # sort the list of tests into numeric order
245 list=`sort -n $tmp.list | uniq`
250 list=`echo $list | awk -f randomize.awk`
254 # Process command arguments first.
255 while [ $# -gt 0 ]; do
257 -\? | -h | --help) usage ;;
260 -glusterfs) FSTYP=glusterfs ;;
263 -overlay) FSTYP=overlay; export OVERLAY=true ;;
264 -pvfs2) FSTYP=pvfs2 ;;
265 -tmpfs) FSTYP=tmpfs ;;
266 -ubifs) FSTYP=ubifs ;;
268 -g) group=$2 ; shift ;
269 GROUP_LIST="$GROUP_LIST ${group//,/ }"
272 -x) xgroup=$2 ; shift ;
273 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
276 -X) subdir_xfile=$2; shift ;
278 -E) xfile=$2; shift ;
279 if [ -f $xfile ]; then
280 sed "s/#.*$//" "$xfile" >> $tmp.xlist
283 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
284 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
286 -udiff) diff="$diff -u" ;;
289 -r) randomize=true ;;
291 -T) timestamp=true ;;
292 -d) DUMP_OUTPUT=true ;;
293 -b) brief_test_summary=true;;
294 -R) report_fmt=$2 ; shift ;
295 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
298 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
299 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
302 *) # not an argument, we've got tests now.
303 have_test_arg=true ;;
306 # if we've found a test specification, the break out of the processing
307 # loop before we shift the arguments so that this is the first argument
308 # that we process in the test arg loop below.
309 if $have_test_arg; then
316 # we need common/rc, that also sources common/config. We need to source it
317 # after processing args, overlay needs FSTYP set before sourcing common/config
318 if ! . ./common/rc; then
319 echo "check: failed to source common/rc"
323 if [ -n "$subdir_xfile" ]; then
324 for d in $SRC_GROUPS $FSTYP; do
325 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
326 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
327 echo $d/$f >> $tmp.xlist
332 # Process tests from command line now.
333 if $have_test_arg; then
334 while [ $# -gt 0 ]; do
336 -*) echo "Arguments before tests, please!"
340 *) # Expand test pattern (e.g. xfs/???, *fs/001)
341 list=$(cd $SRC_DIR; echo $1)
343 test_dir=`dirname $t`
344 test_dir=${test_dir#$SRC_DIR/*}
345 test_name=`basename $t`
346 group_file=$SRC_DIR/$test_dir/group
348 if egrep -q "^$test_name" $group_file; then
349 # in group file ... OK
350 echo $SRC_DIR/$test_dir/$test_name \
354 echo "$t - unknown test, ignored"
362 elif [ -z "$GROUP_LIST" ]; then
363 # default group list is the auto group. If any other group or test is
364 # specified, we use that instead.
370 echo "check: QA must be run as root"
385 check="$RESULT_BASE/check"
395 if [ -f $check.time -a -f $tmp.time ]; then
396 cat $check.time $tmp.time \
401 for (i in t) print i " " t[i]
405 mv $tmp.out $check.time
411 echo "SECTION -- $section" >>$tmp.summary
412 echo "=========================" >>$tmp.summary
413 if [ ! -z "$n_try" -a $n_try != 0 ]; then
414 if [ $brief_test_summary == "false" ]; then
416 echo "Ran:$try" >>$tmp.summary
418 echo "Ran:$try" >>$check.log
421 $interrupt && echo "Interrupted!" | tee -a $check.log
423 if [ ! -z "$notrun" ]; then
424 if [ $brief_test_summary == "false" ]; then
425 echo "Not run:$notrun"
426 echo "Not run:$notrun" >>$tmp.summary
428 echo "Not run:$notrun" >>$check.log
431 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
433 echo "Failed $n_bad of $n_try tests"
434 echo "Failures:$bad" >>$check.log
435 echo "Failed $n_bad of $n_try tests" >>$check.log
436 echo "Failures:$bad" >>$tmp.summary
437 echo "Failed $n_bad of $n_try tests" >>$tmp.summary
439 echo "Passed all $n_try tests"
440 echo "Passed all $n_try tests" >>$check.log
441 echo "Passed all $n_try tests" >>$tmp.summary
443 echo "" >>$tmp.summary
450 sum_bad=`expr $sum_bad + $n_bad`
452 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
453 if ! $OPTIONS_HAVE_SECTIONS; then
464 count=`wc -L $tmp.summary | cut -f1 -d" "`
473 if [ -f ${RESULT_DIR}/require_test ]; then
474 _check_test_fs || err=true
475 rm -f ${RESULT_DIR}/require_test*
477 _test_unmount 2> /dev/null
479 if [ -f ${RESULT_DIR}/require_scratch ]; then
480 _check_scratch_fs || err=true
481 rm -f ${RESULT_DIR}/require_scratch*
483 _scratch_unmount 2> /dev/null
490 if [ -s $tmp.xlist ]; then
491 if grep -q $TEST_ID $tmp.xlist; then
502 if $OPTIONS_HAVE_SECTIONS; then
503 trap "_summary; exit \$status" 0 1 2 3 15
505 trap "_wrapup; exit \$status" 0 1 2 3 15
508 for section in $HOST_OPTIONS_SECTIONS; do
510 OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
511 get_next_config $section
513 # Do we need to run only some sections ?
514 if [ ! -z "$RUN_SECTION" ]; then
516 for s in $RUN_SECTION; do
517 if [ $section == $s ]; then
527 # Did this section get excluded?
528 if [ ! -z "$EXCLUDE_SECTION" ]; then
530 for s in $EXCLUDE_SECTION; do
531 if [ $section == $s ]; then
541 mkdir -p $RESULT_BASE
542 if [ ! -d $RESULT_BASE ]; then
543 echo "failed to create results directory $RESULT_BASE"
548 if $OPTIONS_HAVE_SECTIONS; then
549 echo "SECTION -- $section"
552 sect_start=`_wallclock`
553 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
554 echo "RECREATING -- $FSTYP on $TEST_DEV"
555 _test_unmount 2> /dev/null
556 if ! _test_mkfs >$tmp.err 2>&1
558 echo "our local _test_mkfs routine ..."
560 echo "check: failed to mkfs \$TEST_DEV using specified options"
566 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
571 elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
572 _test_unmount 2> /dev/null
575 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
584 check="$RESULT_BASE/check"
586 # don't leave old full output behind on a clean run
589 [ -f $check.time ] || touch $check.time
591 # print out our test configuration
592 echo "FSTYP -- `_full_fstyp_details`"
593 echo "PLATFORM -- `_full_platform_details`"
594 if [ ! -z "$SCRATCH_DEV" ]; then
595 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
596 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
601 if [ ! -z "$SCRATCH_DEV" ]; then
602 _scratch_unmount 2> /dev/null
603 # call the overridden mkfs - make sure the FS is built
604 # the same as we'll create it later.
606 if ! _scratch_mkfs >$tmp.err 2>&1
608 echo "our local _scratch_mkfs routine ..."
610 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
615 # call the overridden mount - make sure the FS mounts with
616 # the same options that we'll mount with later.
617 if ! _try_scratch_mount >$tmp.err 2>&1
619 echo "our local mount routine ..."
621 echo "check: failed to mount \$SCRATCH_DEV using specified options"
633 for seq in $list ; do
634 # Run report for previous test!
637 n_bad=`expr $n_bad + 1`
640 if $do_report && ! $first_test ; then
641 if [ $tc_status != "expunge" ] ; then
642 _make_testcase_report "$prev_seq" "$tc_status"
649 if [ ! -f $seq ]; then
650 # Try to get full name in case the user supplied only
651 # seq id and the test has a name. A bit of hassle to
652 # find really the test and not its sample output or
654 bname=$(basename $seq)
655 full_seq=$(find $(dirname $seq) -name $bname* -executable |
656 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
657 END { print shortest }')
658 if [ -f $full_seq ] && \
659 [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
664 # the filename for the test and the name output are different.
665 # we don't include the tests/ directory in the name output.
666 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
668 # Similarly, the result directory needs to replace the tests/
669 # part of the test location.
671 if $OPTIONS_HAVE_SECTIONS; then
672 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
673 REPORT_DIR="$RESULT_BASE/$section"
675 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
676 REPORT_DIR="$RESULT_BASE"
678 seqres="$REPORT_DIR/$seqnum"
684 _expunge_test $seqnum
685 if [ $? -eq 1 ]; then
693 n_notrun=`expr $n_notrun + 1`
698 if [ ! -f $seq ]; then
699 echo " - no such test?"
703 # really going to try and run this one
704 rm -f $seqres.out.bad
706 # check if we really should run it
707 _expunge_test $seqnum
708 if [ $? -eq 1 ]; then
713 # record that we really tried to run this test.
715 n_try=`expr $n_try + 1`
717 # slashes now in names, sed barfs on them so use grep
718 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
719 if [ "X$lasttime" != X ]; then
720 echo -n " ${lasttime}s ... "
722 echo -n " " # prettier output with timestamps.
724 rm -f core $seqres.notrun
727 $timestamp && echo -n " ["`date "+%T"`"]"
728 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
729 $LOGGER_PROG "run xfstest $seqnum"
730 if [ -w /dev/kmsg ]; then
731 export date_time=`date +"%F %T"`
732 echo "run fstests $seqnum at $date_time" > /dev/kmsg
733 # _check_dmesg depends on this log in dmesg
734 touch ${RESULT_DIR}/check_dmesg
736 if [ "$DUMP_OUTPUT" = true ]; then
737 ./$seq 2>&1 | tee $tmp.out
738 # Because $? would get tee's return code
741 ./$seq >$tmp.out 2>&1
746 _dump_err_cont "[dumped core]"
747 mv core $RESULT_BASE/$seqnum.core
751 if [ -f $seqres.notrun ]; then
752 $timestamp && _timestamp
754 $timestamp || echo -n "[not run] "
755 $timestamp && echo " [not run]" && \
756 echo -n " $seqnum -- "
758 notrun="$notrun $seqnum"
759 n_notrun=`expr $n_notrun + 1`
764 if [ $sts -ne 0 ]; then
765 _dump_err_cont "[failed, exit status $sts]"
766 _test_unmount 2> /dev/null
767 _scratch_unmount 2> /dev/null
770 # the test apparently passed, so check for corruption
771 # and log messages that shouldn't be there.
773 _check_dmesg || err=true
774 _check_kmemleak || err=true
777 # test ends after all checks are done.
778 $timestamp && _timestamp
781 if [ ! -f $seq.out ]; then
782 _dump_err "no qualified output"
787 # coreutils 8.16+ changed quote formats in error messages
788 # from `foo' to 'foo'. Filter old versions to match the new
790 sed -i "s/\`/\'/g" $tmp.out
791 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
793 echo "$seqnum `expr $stop - $start`" >>$tmp.time
794 echo -n " `expr $stop - $start`s"
798 _dump_err "- output mismatch (see $seqres.out.bad)"
799 mv $tmp.out $seqres.out.bad
800 $diff $seq.out $seqres.out.bad | {
801 if test "$DIFF_LENGTH" -le 0; then
804 head -n "$DIFF_LENGTH"
806 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
807 " to see the entire diff)"
808 fi; } | sed -e 's/^\(.\)/ \1/'
813 # make sure we record the status of the last test we ran.
816 n_bad=`expr $n_bad + 1`
819 if $do_report && ! $first_test ; then
820 if [ $tc_status != "expunge" ] ; then
821 _make_testcase_report "$prev_seq" "$tc_status"
825 sect_stop=`_wallclock`
831 _test_unmount 2> /dev/null
832 _scratch_unmount 2> /dev/null
836 status=`expr $sum_bad != 0`