2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
5 # Control script for QA
26 brief_test_summary=false
30 # This is a global variable used to pass test failure text to reporting gunk
33 # start the initialisation work now
36 export MSGVERB="text:action"
37 export QA_CHECK_FS=${QA_CHECK_FS:=true}
39 # number of diff lines from a failed test, 0 for whole output
40 export DIFF_LENGTH=${DIFF_LENGTH:=10}
42 # by default don't output timestamps
43 timestamp=${TIMESTAMP:=false}
45 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
47 SRC_GROUPS="generic shared"
48 export SRC_DIR="tests"
52 echo "Usage: $0 [options] [testlist]"'
56 -glusterfs test GlusterFS
59 -virtiofs test virtiofs
65 -udiff show unified diff (default)
66 -n show me, do not run tests
68 -r randomize test order
69 -d dump test output to stdout
71 -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
72 --large-fs optimise scratch device for large filesystems
73 -s section run only specified section from config file
74 -S section exclude the specified section from the config file
77 -g group[,group...] include tests from these groups
78 -x group[,group...] exclude tests from these groups
79 -X exclude_file exclude individual tests
80 -E external_file exclude individual tests
81 [testlist] include tests matching names in testlist
83 testlist argument is a list of tests in the form of <test dir>/<test name>.
85 <test dir> is a directory under tests that contains a group file,
86 with a list of the names of the tests in that directory.
88 <test name> may be either a specific test file name (e.g. xfs/001) or
89 a test file name match pattern (e.g. xfs/*).
91 group argument is either a name of a tests group to collect from all
92 the test dirs (e.g. quick) or a name of a tests group to collect from
93 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
94 If you want to run all the tests in the test suite, use "-g all" to specify all
97 exclude_file argument refers to a name of a file inside each test directory.
98 for every test dir where this file is found, the listed test names are
99 excluded from the list of tests to run from that test dir.
101 external_file argument is a path to a single file containing a list of tests
102 to exclude in the form of <test dir>/<test name>.
108 check -x stress xfs/*
109 check -X .exclude -g auto
110 check -E ~/.xfstests.exclude
120 test -s "$SRC_DIR/$d/group" || return 1
122 local grpl=$(sed -n < $SRC_DIR/$d/group \
125 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
133 local sub=$(dirname $grp)
134 local fsgroup="$FSTYP"
136 if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
137 # group is given as <subdir>/<group> (e.g. xfs/quick)
139 get_sub_group_list $sub $grp
143 if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
146 for d in $SRC_GROUPS $fsgroup; do
147 if ! test -d "$SRC_DIR/$d" ; then
150 grpl="$grpl $(get_sub_group_list $d $grp)"
155 # Find all tests, excluding files that are test metadata such as group files.
156 # It matches test names against $VALID_TEST_NAME defined in common/rc
160 for d in $SRC_GROUPS $FSTYP; do
161 if ! test -d "$SRC_DIR/$d" ; then
166 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
167 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
171 # takes the list of tests to run in $tmp.list, and removes the tests passed to
172 # the function from that list.
181 if [ $numsed -gt 100 ]; then
182 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
183 mv $tmp.tmp $tmp.list
187 echo "^$t\$" >>$tmp.grep
188 numsed=`expr $numsed + 1`
190 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
191 mv $tmp.tmp $tmp.list
210 # Tests specified on the command line
211 if [ -s $tmp.arglist ]; then
212 cat $tmp.arglist > $tmp.list
217 # Specified groups to include
218 # Note that the CLI processing adds a leading space to the first group
219 # parameter, so we have to catch that here checking for "all"
220 if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
221 # no test numbers, do everything
224 for group in $GROUP_LIST; do
225 list=$(get_group_list $group)
226 if [ -z "$list" ]; then
227 echo "Group \"$group\" is empty or not defined?"
232 grep -s "^$t\$" $tmp.list >/dev/null || \
233 echo "$t" >>$tmp.list
238 # Specified groups to exclude
239 for xgroup in $XGROUP_LIST; do
240 list=$(get_group_list $xgroup)
241 if [ -z "$list" ]; then
242 echo "Group \"$xgroup\" is empty or not defined?"
249 # sort the list of tests into numeric order
251 if type shuf >& /dev/null; then
254 sorter="awk -v seed=$RANDOM -f randomize.awk"
259 list=`sort -n $tmp.list | uniq | $sorter`
263 # Process command arguments first.
264 while [ $# -gt 0 ]; do
266 -\? | -h | --help) usage ;;
269 -glusterfs) FSTYP=glusterfs ;;
272 -virtiofs) FSTYP=virtiofs ;;
273 -overlay) FSTYP=overlay; export OVERLAY=true ;;
274 -pvfs2) FSTYP=pvfs2 ;;
275 -tmpfs) FSTYP=tmpfs ;;
276 -ubifs) FSTYP=ubifs ;;
278 -g) group=$2 ; shift ;
279 GROUP_LIST="$GROUP_LIST ${group//,/ }"
282 -x) xgroup=$2 ; shift ;
283 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
286 -X) subdir_xfile=$2; shift ;
288 -E) xfile=$2; shift ;
289 if [ -f $xfile ]; then
290 sed "s/#.*$//" "$xfile" >> $tmp.xlist
293 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
294 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
296 -udiff) diff="$diff -u" ;;
299 -r) randomize=true ;;
301 -T) timestamp=true ;;
302 -d) DUMP_OUTPUT=true ;;
303 -b) brief_test_summary=true;;
304 -R) report_fmt=$2 ; shift ;
305 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
308 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
309 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
312 *) # not an argument, we've got tests now.
313 have_test_arg=true ;;
316 # if we've found a test specification, the break out of the processing
317 # loop before we shift the arguments so that this is the first argument
318 # that we process in the test arg loop below.
319 if $have_test_arg; then
326 # we need common/rc, that also sources common/config. We need to source it
327 # after processing args, overlay needs FSTYP set before sourcing common/config
328 if ! . ./common/rc; then
329 echo "check: failed to source common/rc"
333 if [ -n "$subdir_xfile" ]; then
334 for d in $SRC_GROUPS $FSTYP; do
335 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
336 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
337 echo $d/$f >> $tmp.xlist
342 # Process tests from command line now.
343 if $have_test_arg; then
344 while [ $# -gt 0 ]; do
346 -*) echo "Arguments before tests, please!"
350 *) # Expand test pattern (e.g. xfs/???, *fs/001)
351 list=$(cd $SRC_DIR; echo $1)
353 test_dir=`dirname $t`
354 test_dir=${test_dir#$SRC_DIR/*}
355 test_name=`basename $t`
356 group_file=$SRC_DIR/$test_dir/group
358 if egrep -q "^$test_name" $group_file; then
359 # in group file ... OK
360 echo $SRC_DIR/$test_dir/$test_name \
364 echo "$t - unknown test, ignored"
372 elif [ -z "$GROUP_LIST" ]; then
373 # default group list is the auto group. If any other group or test is
374 # specified, we use that instead.
380 echo "check: QA must be run as root"
395 check="$RESULT_BASE/check"
405 if [ -f $check.time -a -f $tmp.time ]; then
406 cat $check.time $tmp.time \
411 for (i in t) print i " " t[i]
415 mv $tmp.out $check.time
421 echo "SECTION -- $section" >>$tmp.summary
422 echo "=========================" >>$tmp.summary
423 if [ ! -z "$n_try" -a $n_try != 0 ]; then
424 if [ $brief_test_summary == "false" ]; then
426 echo "Ran:$try" >>$tmp.summary
428 echo "Ran:$try" >>$check.log
431 $interrupt && echo "Interrupted!" | tee -a $check.log
433 if [ ! -z "$notrun" ]; then
434 if [ $brief_test_summary == "false" ]; then
435 echo "Not run:$notrun"
436 echo "Not run:$notrun" >>$tmp.summary
438 echo "Not run:$notrun" >>$check.log
441 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
443 echo "Failed $n_bad of $n_try tests"
444 echo "Failures:$bad" >>$check.log
445 echo "Failed $n_bad of $n_try tests" >>$check.log
446 echo "Failures:$bad" >>$tmp.summary
447 echo "Failed $n_bad of $n_try tests" >>$tmp.summary
449 echo "Passed all $n_try tests"
450 echo "Passed all $n_try tests" >>$check.log
451 echo "Passed all $n_try tests" >>$tmp.summary
453 echo "" >>$tmp.summary
460 sum_bad=`expr $sum_bad + $n_bad`
462 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
463 if ! $OPTIONS_HAVE_SECTIONS; then
474 count=`wc -L $tmp.summary | cut -f1 -d" "`
483 if [ -f ${RESULT_DIR}/require_test ]; then
484 _check_test_fs || err=true
485 rm -f ${RESULT_DIR}/require_test*
487 _test_unmount 2> /dev/null
489 if [ -f ${RESULT_DIR}/require_scratch ]; then
490 _check_scratch_fs || err=true
491 rm -f ${RESULT_DIR}/require_scratch*
493 _scratch_unmount 2> /dev/null
499 if [ -s $tmp.xlist ]; then
500 if grep -q $TEST_ID $tmp.xlist; then
508 # Make the check script unattractive to the OOM killer...
509 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
510 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
512 # ...and make the tests themselves somewhat more attractive to it, so that if
513 # the system runs out of memory it'll be the test that gets killed and not the
516 bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
522 if $OPTIONS_HAVE_SECTIONS; then
523 trap "_summary; exit \$status" 0 1 2 3 15
525 trap "_wrapup; exit \$status" 0 1 2 3 15
528 for section in $HOST_OPTIONS_SECTIONS; do
530 OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
531 get_next_config $section
533 # Do we need to run only some sections ?
534 if [ ! -z "$RUN_SECTION" ]; then
536 for s in $RUN_SECTION; do
537 if [ $section == $s ]; then
547 # Did this section get excluded?
548 if [ ! -z "$EXCLUDE_SECTION" ]; then
550 for s in $EXCLUDE_SECTION; do
551 if [ $section == $s ]; then
561 mkdir -p $RESULT_BASE
562 if [ ! -d $RESULT_BASE ]; then
563 echo "failed to create results directory $RESULT_BASE"
568 if $OPTIONS_HAVE_SECTIONS; then
569 echo "SECTION -- $section"
572 sect_start=`_wallclock`
573 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
574 echo "RECREATING -- $FSTYP on $TEST_DEV"
575 _test_unmount 2> /dev/null
576 if ! _test_mkfs >$tmp.err 2>&1
578 echo "our local _test_mkfs routine ..."
580 echo "check: failed to mkfs \$TEST_DEV using specified options"
586 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
591 elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
592 _test_unmount 2> /dev/null
595 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
604 check="$RESULT_BASE/check"
606 # don't leave old full output behind on a clean run
609 [ -f $check.time ] || touch $check.time
611 # print out our test configuration
612 echo "FSTYP -- `_full_fstyp_details`"
613 echo "PLATFORM -- `_full_platform_details`"
614 if [ ! -z "$SCRATCH_DEV" ]; then
615 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
616 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
621 if [ ! -z "$SCRATCH_DEV" ]; then
622 _scratch_unmount 2> /dev/null
623 # call the overridden mkfs - make sure the FS is built
624 # the same as we'll create it later.
626 if ! _scratch_mkfs >$tmp.err 2>&1
628 echo "our local _scratch_mkfs routine ..."
630 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
635 # call the overridden mount - make sure the FS mounts with
636 # the same options that we'll mount with later.
637 if ! _try_scratch_mount >$tmp.err 2>&1
639 echo "our local mount routine ..."
641 echo "check: failed to mount \$SCRATCH_DEV using specified options"
655 for seq in $list ; do
656 # Run report for previous test!
659 n_bad=`expr $n_bad + 1`
662 if $do_report && ! $first_test ; then
663 if [ $tc_status != "expunge" ] ; then
664 _make_testcase_report "$prev_seq" "$tc_status"
671 if [ ! -f $seq ]; then
672 # Try to get full name in case the user supplied only
673 # seq id and the test has a name. A bit of hassle to
674 # find really the test and not its sample output or
676 bname=$(basename $seq)
677 full_seq=$(find $(dirname $seq) -name $bname* -executable |
678 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
679 END { print shortest }')
680 if [ -f $full_seq ] && \
681 [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
686 # the filename for the test and the name output are different.
687 # we don't include the tests/ directory in the name output.
688 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
690 # Similarly, the result directory needs to replace the tests/
691 # part of the test location.
693 if $OPTIONS_HAVE_SECTIONS; then
694 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
695 REPORT_DIR="$RESULT_BASE/$section"
697 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
698 REPORT_DIR="$RESULT_BASE"
700 seqres="$REPORT_DIR/$seqnum"
706 _expunge_test $seqnum
707 if [ $? -eq 1 ]; then
715 n_notrun=`expr $n_notrun + 1`
720 if [ ! -f $seq ]; then
721 echo " - no such test?"
725 # really going to try and run this one
726 rm -f $seqres.out.bad
728 # check if we really should run it
729 _expunge_test $seqnum
730 if [ $? -eq 1 ]; then
735 # record that we really tried to run this test.
737 n_try=`expr $n_try + 1`
739 # slashes now in names, sed barfs on them so use grep
740 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
741 if [ "X$lasttime" != X ]; then
742 echo -n " ${lasttime}s ... "
744 echo -n " " # prettier output with timestamps.
746 rm -f core $seqres.notrun
749 $timestamp && echo -n " ["`date "+%T"`"]"
750 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
751 $LOGGER_PROG "run xfstest $seqnum"
752 if [ -w /dev/kmsg ]; then
753 export date_time=`date +"%F %T"`
754 echo "run fstests $seqnum at $date_time" > /dev/kmsg
755 # _check_dmesg depends on this log in dmesg
756 touch ${RESULT_DIR}/check_dmesg
758 _try_wipe_scratch_devs > /dev/null 2>&1
759 if [ "$DUMP_OUTPUT" = true ]; then
760 _run_seq 2>&1 | tee $tmp.out
761 # Because $? would get tee's return code
764 _run_seq >$tmp.out 2>&1
769 _dump_err_cont "[dumped core]"
770 mv core $RESULT_BASE/$seqnum.core
774 if [ -f $seqres.notrun ]; then
775 $timestamp && _timestamp
777 $timestamp || echo -n "[not run] "
778 $timestamp && echo " [not run]" && \
779 echo -n " $seqnum -- "
781 notrun="$notrun $seqnum"
782 n_notrun=`expr $n_notrun + 1`
787 if [ $sts -ne 0 ]; then
788 _dump_err_cont "[failed, exit status $sts]"
789 _test_unmount 2> /dev/null
790 _scratch_unmount 2> /dev/null
791 rm -f ${RESULT_DIR}/require_test*
792 rm -f ${RESULT_DIR}/require_scratch*
795 # the test apparently passed, so check for corruption
796 # and log messages that shouldn't be there.
798 _check_dmesg || err=true
801 # Scan for memory leaks after every test so that associating
802 # a leak to a particular test will be as accurate as possible.
803 _check_kmemleak || err=true
805 # test ends after all checks are done.
806 $timestamp && _timestamp
809 if [ ! -f $seq.out ]; then
810 _dump_err "no qualified output"
815 # coreutils 8.16+ changed quote formats in error messages
816 # from `foo' to 'foo'. Filter old versions to match the new
818 sed -i "s/\`/\'/g" $tmp.out
819 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
821 echo "$seqnum `expr $stop - $start`" >>$tmp.time
822 echo -n " `expr $stop - $start`s"
826 _dump_err "- output mismatch (see $seqres.out.bad)"
827 mv $tmp.out $seqres.out.bad
828 $diff $seq.out $seqres.out.bad | {
829 if test "$DIFF_LENGTH" -le 0; then
832 head -n "$DIFF_LENGTH"
834 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
835 " to see the entire diff)"
836 fi; } | sed -e 's/^\(.\)/ \1/'
841 # make sure we record the status of the last test we ran.
844 n_bad=`expr $n_bad + 1`
847 if $do_report && ! $first_test ; then
848 if [ $tc_status != "expunge" ] ; then
849 _make_testcase_report "$prev_seq" "$tc_status"
853 sect_stop=`_wallclock`
859 _test_unmount 2> /dev/null
860 _scratch_unmount 2> /dev/null
864 status=`expr $sum_bad != 0`