2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
5 # Control script for QA
26 brief_test_summary=false
30 # This is a global variable used to pass test failure text to reporting gunk
33 # start the initialisation work now
36 export MSGVERB="text:action"
37 export QA_CHECK_FS=${QA_CHECK_FS:=true}
39 # number of diff lines from a failed test, 0 for whole output
40 export DIFF_LENGTH=${DIFF_LENGTH:=10}
42 # by default don't output timestamps
43 timestamp=${TIMESTAMP:=false}
45 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
47 SRC_GROUPS="generic shared"
48 export SRC_DIR="tests"
52 echo "Usage: $0 [options] [testlist]"'
56 -glusterfs test GlusterFS
64 -udiff show unified diff (default)
65 -n show me, do not run tests
67 -r randomize test order
68 -d dump test output to stdout
70 -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
71 --large-fs optimise scratch device for large filesystems
72 -s section run only specified section from config file
73 -S section exclude the specified section from the config file
76 -g group[,group...] include tests from these groups
77 -x group[,group...] exclude tests from these groups
78 -X exclude_file exclude individual tests
79 -E external_file exclude individual tests
80 [testlist] include tests matching names in testlist
82 testlist argument is a list of tests in the form of <test dir>/<test name>.
84 <test dir> is a directory under tests that contains a group file,
85 with a list of the names of the tests in that directory.
87 <test name> may be either a specific test file name (e.g. xfs/001) or
88 a test file name match pattern (e.g. xfs/*).
90 group argument is either a name of a tests group to collect from all
91 the test dirs (e.g. quick) or a name of a tests group to collect from
92 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
93 If you want to run all the tests in the test suite, use "-g all" to specify all
96 exclude_file argument refers to a name of a file inside each test directory.
97 for every test dir where this file is found, the listed test names are
98 excluded from the list of tests to run from that test dir.
100 external_file argument is a path to a single file containing a list of tests
101 to exclude in the form of <test dir>/<test name>.
107 check -x stress xfs/*
108 check -X .exclude -g auto
109 check -E ~/.xfstests.exclude
119 test -s "$SRC_DIR/$d/group" || return 1
121 local grpl=$(sed -n < $SRC_DIR/$d/group \
124 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
132 local sub=$(dirname $grp)
133 local fsgroup="$FSTYP"
135 if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
136 # group is given as <subdir>/<group> (e.g. xfs/quick)
138 get_sub_group_list $sub $grp
142 if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
145 for d in $SRC_GROUPS $fsgroup; do
146 if ! test -d "$SRC_DIR/$d" ; then
149 grpl="$grpl $(get_sub_group_list $d $grp)"
154 # Find all tests, excluding files that are test metadata such as group files.
155 # It matches test names against $VALID_TEST_NAME defined in common/rc
159 for d in $SRC_GROUPS $FSTYP; do
160 if ! test -d "$SRC_DIR/$d" ; then
165 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
166 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
170 # takes the list of tests to run in $tmp.list, and removes the tests passed to
171 # the function from that list.
180 if [ $numsed -gt 100 ]; then
181 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
182 mv $tmp.tmp $tmp.list
186 echo "^$t\$" >>$tmp.grep
187 numsed=`expr $numsed + 1`
189 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
190 mv $tmp.tmp $tmp.list
209 # Tests specified on the command line
210 if [ -s $tmp.arglist ]; then
211 cat $tmp.arglist > $tmp.list
216 # Specified groups to include
217 # Note that the CLI processing adds a leading space to the first group
218 # parameter, so we have to catch that here checking for "all"
219 if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
220 # no test numbers, do everything
223 for group in $GROUP_LIST; do
224 list=$(get_group_list $group)
225 if [ -z "$list" ]; then
226 echo "Group \"$group\" is empty or not defined?"
231 grep -s "^$t\$" $tmp.list >/dev/null || \
232 echo "$t" >>$tmp.list
237 # Specified groups to exclude
238 for xgroup in $XGROUP_LIST; do
239 list=$(get_group_list $xgroup)
240 if [ -z "$list" ]; then
241 echo "Group \"$xgroup\" is empty or not defined?"
248 # sort the list of tests into numeric order
250 if type shuf >& /dev/null; then
253 sorter="awk -v seed=$RANDOM -f randomize.awk"
258 list=`sort -n $tmp.list | uniq | $sorter`
262 # Process command arguments first.
263 while [ $# -gt 0 ]; do
265 -\? | -h | --help) usage ;;
268 -glusterfs) FSTYP=glusterfs ;;
271 -overlay) FSTYP=overlay; export OVERLAY=true ;;
272 -pvfs2) FSTYP=pvfs2 ;;
273 -tmpfs) FSTYP=tmpfs ;;
274 -ubifs) FSTYP=ubifs ;;
276 -g) group=$2 ; shift ;
277 GROUP_LIST="$GROUP_LIST ${group//,/ }"
280 -x) xgroup=$2 ; shift ;
281 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
284 -X) subdir_xfile=$2; shift ;
286 -E) xfile=$2; shift ;
287 if [ -f $xfile ]; then
288 sed "s/#.*$//" "$xfile" >> $tmp.xlist
291 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
292 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
294 -udiff) diff="$diff -u" ;;
297 -r) randomize=true ;;
299 -T) timestamp=true ;;
300 -d) DUMP_OUTPUT=true ;;
301 -b) brief_test_summary=true;;
302 -R) report_fmt=$2 ; shift ;
303 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
306 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
307 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
310 *) # not an argument, we've got tests now.
311 have_test_arg=true ;;
314 # if we've found a test specification, the break out of the processing
315 # loop before we shift the arguments so that this is the first argument
316 # that we process in the test arg loop below.
317 if $have_test_arg; then
324 # we need common/rc, that also sources common/config. We need to source it
325 # after processing args, overlay needs FSTYP set before sourcing common/config
326 if ! . ./common/rc; then
327 echo "check: failed to source common/rc"
331 if [ -n "$subdir_xfile" ]; then
332 for d in $SRC_GROUPS $FSTYP; do
333 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
334 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
335 echo $d/$f >> $tmp.xlist
340 # Process tests from command line now.
341 if $have_test_arg; then
342 while [ $# -gt 0 ]; do
344 -*) echo "Arguments before tests, please!"
348 *) # Expand test pattern (e.g. xfs/???, *fs/001)
349 list=$(cd $SRC_DIR; echo $1)
351 test_dir=`dirname $t`
352 test_dir=${test_dir#$SRC_DIR/*}
353 test_name=`basename $t`
354 group_file=$SRC_DIR/$test_dir/group
356 if egrep -q "^$test_name" $group_file; then
357 # in group file ... OK
358 echo $SRC_DIR/$test_dir/$test_name \
362 echo "$t - unknown test, ignored"
370 elif [ -z "$GROUP_LIST" ]; then
371 # default group list is the auto group. If any other group or test is
372 # specified, we use that instead.
378 echo "check: QA must be run as root"
393 check="$RESULT_BASE/check"
403 if [ -f $check.time -a -f $tmp.time ]; then
404 cat $check.time $tmp.time \
409 for (i in t) print i " " t[i]
413 mv $tmp.out $check.time
419 echo "SECTION -- $section" >>$tmp.summary
420 echo "=========================" >>$tmp.summary
421 if [ ! -z "$n_try" -a $n_try != 0 ]; then
422 if [ $brief_test_summary == "false" ]; then
424 echo "Ran:$try" >>$tmp.summary
426 echo "Ran:$try" >>$check.log
429 $interrupt && echo "Interrupted!" | tee -a $check.log
431 if [ ! -z "$notrun" ]; then
432 if [ $brief_test_summary == "false" ]; then
433 echo "Not run:$notrun"
434 echo "Not run:$notrun" >>$tmp.summary
436 echo "Not run:$notrun" >>$check.log
439 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
441 echo "Failed $n_bad of $n_try tests"
442 echo "Failures:$bad" >>$check.log
443 echo "Failed $n_bad of $n_try tests" >>$check.log
444 echo "Failures:$bad" >>$tmp.summary
445 echo "Failed $n_bad of $n_try tests" >>$tmp.summary
447 echo "Passed all $n_try tests"
448 echo "Passed all $n_try tests" >>$check.log
449 echo "Passed all $n_try tests" >>$tmp.summary
451 echo "" >>$tmp.summary
458 sum_bad=`expr $sum_bad + $n_bad`
460 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
461 if ! $OPTIONS_HAVE_SECTIONS; then
472 count=`wc -L $tmp.summary | cut -f1 -d" "`
481 if [ -f ${RESULT_DIR}/require_test ]; then
482 _check_test_fs || err=true
483 rm -f ${RESULT_DIR}/require_test*
485 _test_unmount 2> /dev/null
487 if [ -f ${RESULT_DIR}/require_scratch ]; then
488 _check_scratch_fs || err=true
489 rm -f ${RESULT_DIR}/require_scratch*
491 _scratch_unmount 2> /dev/null
497 if [ -s $tmp.xlist ]; then
498 if grep -q $TEST_ID $tmp.xlist; then
506 # Make the check script unattractive to the OOM killer...
507 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
508 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
510 # ...and make the tests themselves somewhat more attractive to it, so that if
511 # the system runs out of memory it'll be the test that gets killed and not the
514 bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
520 if $OPTIONS_HAVE_SECTIONS; then
521 trap "_summary; exit \$status" 0 1 2 3 15
523 trap "_wrapup; exit \$status" 0 1 2 3 15
526 for section in $HOST_OPTIONS_SECTIONS; do
528 OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
529 get_next_config $section
531 # Do we need to run only some sections ?
532 if [ ! -z "$RUN_SECTION" ]; then
534 for s in $RUN_SECTION; do
535 if [ $section == $s ]; then
545 # Did this section get excluded?
546 if [ ! -z "$EXCLUDE_SECTION" ]; then
548 for s in $EXCLUDE_SECTION; do
549 if [ $section == $s ]; then
559 mkdir -p $RESULT_BASE
560 if [ ! -d $RESULT_BASE ]; then
561 echo "failed to create results directory $RESULT_BASE"
566 if $OPTIONS_HAVE_SECTIONS; then
567 echo "SECTION -- $section"
570 sect_start=`_wallclock`
571 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
572 echo "RECREATING -- $FSTYP on $TEST_DEV"
573 _test_unmount 2> /dev/null
574 if ! _test_mkfs >$tmp.err 2>&1
576 echo "our local _test_mkfs routine ..."
578 echo "check: failed to mkfs \$TEST_DEV using specified options"
584 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
589 elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
590 _test_unmount 2> /dev/null
593 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
602 check="$RESULT_BASE/check"
604 # don't leave old full output behind on a clean run
607 [ -f $check.time ] || touch $check.time
609 # print out our test configuration
610 echo "FSTYP -- `_full_fstyp_details`"
611 echo "PLATFORM -- `_full_platform_details`"
612 if [ ! -z "$SCRATCH_DEV" ]; then
613 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
614 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
619 if [ ! -z "$SCRATCH_DEV" ]; then
620 _scratch_unmount 2> /dev/null
621 # call the overridden mkfs - make sure the FS is built
622 # the same as we'll create it later.
624 if ! _scratch_mkfs >$tmp.err 2>&1
626 echo "our local _scratch_mkfs routine ..."
628 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
633 # call the overridden mount - make sure the FS mounts with
634 # the same options that we'll mount with later.
635 if ! _try_scratch_mount >$tmp.err 2>&1
637 echo "our local mount routine ..."
639 echo "check: failed to mount \$SCRATCH_DEV using specified options"
653 for seq in $list ; do
654 # Run report for previous test!
657 n_bad=`expr $n_bad + 1`
660 if $do_report && ! $first_test ; then
661 if [ $tc_status != "expunge" ] ; then
662 _make_testcase_report "$prev_seq" "$tc_status"
669 if [ ! -f $seq ]; then
670 # Try to get full name in case the user supplied only
671 # seq id and the test has a name. A bit of hassle to
672 # find really the test and not its sample output or
674 bname=$(basename $seq)
675 full_seq=$(find $(dirname $seq) -name $bname* -executable |
676 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
677 END { print shortest }')
678 if [ -f $full_seq ] && \
679 [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
684 # the filename for the test and the name output are different.
685 # we don't include the tests/ directory in the name output.
686 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
688 # Similarly, the result directory needs to replace the tests/
689 # part of the test location.
691 if $OPTIONS_HAVE_SECTIONS; then
692 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
693 REPORT_DIR="$RESULT_BASE/$section"
695 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
696 REPORT_DIR="$RESULT_BASE"
698 seqres="$REPORT_DIR/$seqnum"
704 _expunge_test $seqnum
705 if [ $? -eq 1 ]; then
713 n_notrun=`expr $n_notrun + 1`
718 if [ ! -f $seq ]; then
719 echo " - no such test?"
723 # really going to try and run this one
724 rm -f $seqres.out.bad
726 # check if we really should run it
727 _expunge_test $seqnum
728 if [ $? -eq 1 ]; then
733 # record that we really tried to run this test.
735 n_try=`expr $n_try + 1`
737 # slashes now in names, sed barfs on them so use grep
738 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
739 if [ "X$lasttime" != X ]; then
740 echo -n " ${lasttime}s ... "
742 echo -n " " # prettier output with timestamps.
744 rm -f core $seqres.notrun
747 $timestamp && echo -n " ["`date "+%T"`"]"
748 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
749 $LOGGER_PROG "run xfstest $seqnum"
750 if [ -w /dev/kmsg ]; then
751 export date_time=`date +"%F %T"`
752 echo "run fstests $seqnum at $date_time" > /dev/kmsg
753 # _check_dmesg depends on this log in dmesg
754 touch ${RESULT_DIR}/check_dmesg
756 _try_wipe_scratch_devs > /dev/null 2>&1
757 if [ "$DUMP_OUTPUT" = true ]; then
758 _run_seq 2>&1 | tee $tmp.out
759 # Because $? would get tee's return code
762 _run_seq >$tmp.out 2>&1
767 _dump_err_cont "[dumped core]"
768 mv core $RESULT_BASE/$seqnum.core
772 if [ -f $seqres.notrun ]; then
773 $timestamp && _timestamp
775 $timestamp || echo -n "[not run] "
776 $timestamp && echo " [not run]" && \
777 echo -n " $seqnum -- "
779 notrun="$notrun $seqnum"
780 n_notrun=`expr $n_notrun + 1`
785 if [ $sts -ne 0 ]; then
786 _dump_err_cont "[failed, exit status $sts]"
787 _test_unmount 2> /dev/null
788 _scratch_unmount 2> /dev/null
789 rm -f ${RESULT_DIR}/require_test*
790 rm -f ${RESULT_DIR}/require_scratch*
793 # the test apparently passed, so check for corruption
794 # and log messages that shouldn't be there.
796 _check_dmesg || err=true
799 # Scan for memory leaks after every test so that associating
800 # a leak to a particular test will be as accurate as possible.
801 _check_kmemleak || err=true
803 # test ends after all checks are done.
804 $timestamp && _timestamp
807 if [ ! -f $seq.out ]; then
808 _dump_err "no qualified output"
813 # coreutils 8.16+ changed quote formats in error messages
814 # from `foo' to 'foo'. Filter old versions to match the new
816 sed -i "s/\`/\'/g" $tmp.out
817 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
819 echo "$seqnum `expr $stop - $start`" >>$tmp.time
820 echo -n " `expr $stop - $start`s"
824 _dump_err "- output mismatch (see $seqres.out.bad)"
825 mv $tmp.out $seqres.out.bad
826 $diff $seq.out $seqres.out.bad | {
827 if test "$DIFF_LENGTH" -le 0; then
830 head -n "$DIFF_LENGTH"
832 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
833 " to see the entire diff)"
834 fi; } | sed -e 's/^\(.\)/ \1/'
839 # make sure we record the status of the last test we ran.
842 n_bad=`expr $n_bad + 1`
845 if $do_report && ! $first_test ; then
846 if [ $tc_status != "expunge" ] ; then
847 _make_testcase_report "$prev_seq" "$tc_status"
851 sect_stop=`_wallclock`
857 _test_unmount 2> /dev/null
858 _scratch_unmount 2> /dev/null
862 status=`expr $sum_bad != 0`