2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
5 # Control script for QA
26 brief_test_summary=false
31 # This is a global variable used to pass test failure text to reporting gunk
34 # start the initialisation work now
37 export MSGVERB="text:action"
38 export QA_CHECK_FS=${QA_CHECK_FS:=true}
40 # number of diff lines from a failed test, 0 for whole output
41 export DIFF_LENGTH=${DIFF_LENGTH:=10}
43 # by default don't output timestamps
44 timestamp=${TIMESTAMP:=false}
46 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
48 SRC_GROUPS="generic shared"
49 export SRC_DIR="tests"
53 echo "Usage: $0 [options] [testlist]"'
57 -glusterfs test GlusterFS
60 -virtiofs test virtiofs
66 -udiff show unified diff (default)
67 -n show me, do not run tests
69 -r randomize test order
70 -i <n> iterate the test list <n> times
71 -d dump test output to stdout
73 -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
74 --large-fs optimise scratch device for large filesystems
75 -s section run only specified section from config file
76 -S section exclude the specified section from the config file
79 -g group[,group...] include tests from these groups
80 -x group[,group...] exclude tests from these groups
81 -X exclude_file exclude individual tests
82 -E external_file exclude individual tests
83 [testlist] include tests matching names in testlist
85 testlist argument is a list of tests in the form of <test dir>/<test name>.
87 <test dir> is a directory under tests that contains a group file,
88 with a list of the names of the tests in that directory.
90 <test name> may be either a specific test file name (e.g. xfs/001) or
91 a test file name match pattern (e.g. xfs/*).
93 group argument is either a name of a tests group to collect from all
94 the test dirs (e.g. quick) or a name of a tests group to collect from
95 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
96 If you want to run all the tests in the test suite, use "-g all" to specify all
99 exclude_file argument refers to a name of a file inside each test directory.
100 for every test dir where this file is found, the listed test names are
101 excluded from the list of tests to run from that test dir.
103 external_file argument is a path to a single file containing a list of tests
104 to exclude in the form of <test dir>/<test name>.
110 check -x stress xfs/*
111 check -X .exclude -g auto
112 check -E ~/.xfstests.exclude
122 test -s "$SRC_DIR/$d/group" || return 1
124 local grpl=$(sed -n < $SRC_DIR/$d/group \
127 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
135 local sub=$(dirname $grp)
136 local fsgroup="$FSTYP"
138 if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
139 # group is given as <subdir>/<group> (e.g. xfs/quick)
141 get_sub_group_list $sub $grp
145 if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
148 for d in $SRC_GROUPS $fsgroup; do
149 if ! test -d "$SRC_DIR/$d" ; then
152 grpl="$grpl $(get_sub_group_list $d $grp)"
157 # Find all tests, excluding files that are test metadata such as group files.
158 # It matches test names against $VALID_TEST_NAME defined in common/rc
162 for d in $SRC_GROUPS $FSTYP; do
163 if ! test -d "$SRC_DIR/$d" ; then
168 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
169 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
173 # takes the list of tests to run in $tmp.list, and removes the tests passed to
174 # the function from that list.
183 if [ $numsed -gt 100 ]; then
184 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
185 mv $tmp.tmp $tmp.list
189 echo "^$t\$" >>$tmp.grep
190 numsed=`expr $numsed + 1`
192 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
193 mv $tmp.tmp $tmp.list
212 # Tests specified on the command line
213 if [ -s $tmp.arglist ]; then
214 cat $tmp.arglist > $tmp.list
219 # Specified groups to include
220 # Note that the CLI processing adds a leading space to the first group
221 # parameter, so we have to catch that here checking for "all"
222 if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
223 # no test numbers, do everything
226 for group in $GROUP_LIST; do
227 list=$(get_group_list $group)
228 if [ -z "$list" ]; then
229 echo "Group \"$group\" is empty or not defined?"
234 grep -s "^$t\$" $tmp.list >/dev/null || \
235 echo "$t" >>$tmp.list
240 # Specified groups to exclude
241 for xgroup in $XGROUP_LIST; do
242 list=$(get_group_list $xgroup)
243 if [ -z "$list" ]; then
244 echo "Group \"$xgroup\" is empty or not defined?"
251 # sort the list of tests into numeric order
253 if type shuf >& /dev/null; then
256 sorter="awk -v seed=$RANDOM -f randomize.awk"
261 list=`sort -n $tmp.list | uniq | $sorter`
265 # Process command arguments first.
266 while [ $# -gt 0 ]; do
268 -\? | -h | --help) usage ;;
271 -glusterfs) FSTYP=glusterfs ;;
274 -virtiofs) FSTYP=virtiofs ;;
275 -overlay) FSTYP=overlay; export OVERLAY=true ;;
276 -pvfs2) FSTYP=pvfs2 ;;
277 -tmpfs) FSTYP=tmpfs ;;
278 -ubifs) FSTYP=ubifs ;;
280 -g) group=$2 ; shift ;
281 GROUP_LIST="$GROUP_LIST ${group//,/ }"
284 -x) xgroup=$2 ; shift ;
285 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
288 -X) subdir_xfile=$2; shift ;
290 -E) xfile=$2; shift ;
291 if [ -f $xfile ]; then
292 sed "s/#.*$//" "$xfile" >> $tmp.xlist
295 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
296 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
298 -udiff) diff="$diff -u" ;;
301 -r) randomize=true ;;
302 -i) iterations=$2; shift ;;
303 -T) timestamp=true ;;
304 -d) DUMP_OUTPUT=true ;;
305 -b) brief_test_summary=true;;
306 -R) report_fmt=$2 ; shift ;
307 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
310 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
311 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
314 *) # not an argument, we've got tests now.
315 have_test_arg=true ;;
318 # if we've found a test specification, the break out of the processing
319 # loop before we shift the arguments so that this is the first argument
320 # that we process in the test arg loop below.
321 if $have_test_arg; then
328 # we need common/rc, that also sources common/config. We need to source it
329 # after processing args, overlay needs FSTYP set before sourcing common/config
330 if ! . ./common/rc; then
331 echo "check: failed to source common/rc"
335 if [ -n "$subdir_xfile" ]; then
336 for d in $SRC_GROUPS $FSTYP; do
337 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
338 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
339 echo $d/$f >> $tmp.xlist
344 # Process tests from command line now.
345 if $have_test_arg; then
346 while [ $# -gt 0 ]; do
348 -*) echo "Arguments before tests, please!"
352 *) # Expand test pattern (e.g. xfs/???, *fs/001)
353 list=$(cd $SRC_DIR; echo $1)
355 test_dir=`dirname $t`
356 test_dir=${test_dir#$SRC_DIR/*}
357 test_name=`basename $t`
358 group_file=$SRC_DIR/$test_dir/group
360 if egrep -q "^$test_name" $group_file; then
361 # in group file ... OK
362 echo $SRC_DIR/$test_dir/$test_name \
366 echo "$t - unknown test, ignored"
374 elif [ -z "$GROUP_LIST" ]; then
375 # default group list is the auto group. If any other group or test is
376 # specified, we use that instead.
382 echo "check: QA must be run as root"
395 echo "$1" >> $check.log
396 if $OPTIONS_HAVE_SECTIONS; then
397 echo "$1" >> ${REPORT_DIR}/check.log
404 check="$RESULT_BASE/check"
414 if [ -f $check.time -a -f $tmp.time ]; then
415 cat $check.time $tmp.time \
420 for (i in t) print i " " t[i]
424 mv $tmp.out $check.time
425 if $OPTIONS_HAVE_SECTIONS; then
426 cp $check.time ${REPORT_DIR}/check.time
431 _global_log "$(date)"
433 echo "SECTION -- $section" >>$tmp.summary
434 echo "=========================" >>$tmp.summary
435 if [ ! -z "$n_try" -a $n_try != 0 ]; then
436 if [ $brief_test_summary == "false" ]; then
438 echo "Ran:$try" >>$tmp.summary
440 _global_log "Ran:$try"
443 $interrupt && echo "Interrupted!" | tee -a $check.log
444 if $OPTIONS_HAVE_SECTIONS; then
445 $interrupt && echo "Interrupted!" | tee -a \
446 ${REPORT_DIR}/check.log
449 if [ ! -z "$notrun" ]; then
450 if [ $brief_test_summary == "false" ]; then
451 echo "Not run:$notrun"
452 echo "Not run:$notrun" >>$tmp.summary
454 _global_log "Not run:$notrun"
457 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
459 echo "Failed $n_bad of $n_try tests"
460 _global_log "Failures:$bad"
461 _global_log "Failed $n_bad of $n_try tests"
462 echo "Failures:$bad" >>$tmp.summary
463 echo "Failed $n_bad of $n_try tests" >>$tmp.summary
465 echo "Passed all $n_try tests"
466 _global_log "Passed all $n_try tests"
467 echo "Passed all $n_try tests" >>$tmp.summary
469 echo "" >>$tmp.summary
476 sum_bad=`expr $sum_bad + $n_bad`
478 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
479 if ! $OPTIONS_HAVE_SECTIONS; then
490 count=`wc -L $tmp.summary | cut -f1 -d" "`
499 if [ -f ${RESULT_DIR}/require_test ]; then
500 _check_test_fs || err=true
501 rm -f ${RESULT_DIR}/require_test*
503 _test_unmount 2> /dev/null
505 if [ -f ${RESULT_DIR}/require_scratch ]; then
506 _check_scratch_fs || err=true
507 rm -f ${RESULT_DIR}/require_scratch*
509 _scratch_unmount 2> /dev/null
515 if [ -s $tmp.xlist ]; then
516 if grep -q $TEST_ID $tmp.xlist; then
524 # Can we run systemd scopes?
526 systemctl reset-failed "fstests-check" &>/dev/null
527 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
528 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
530 # Make the check script unattractive to the OOM killer...
531 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
532 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
534 # ...and make the tests themselves somewhat more attractive to it, so that if
535 # the system runs out of memory it'll be the test that gets killed and not the
538 # If systemd is available, run the entire test script in a scope so that we can
539 # kill all subprocesses of the test if it fails to clean up after itself. This
540 # is essential for ensuring that the post-test unmount succeeds. Note that
541 # systemd doesn't automatically remove transient scopes that fail to terminate
542 # when systemd tells them to terminate (e.g. programs stuck in D state when
543 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
545 local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
547 if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
548 local unit="$(systemd-escape "fs$seq").scope"
549 systemctl reset-failed "${unit}" &> /dev/null
550 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
552 systemctl stop "${unit}" &> /dev/null
562 if $OPTIONS_HAVE_SECTIONS; then
563 trap "_summary; exit \$status" 0 1 2 3 15
565 trap "_wrapup; exit \$status" 0 1 2 3 15
568 function run_section()
573 OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
574 get_next_config $section
576 # Do we need to run only some sections ?
577 if [ ! -z "$RUN_SECTION" ]; then
579 for s in $RUN_SECTION; do
580 if [ $section == $s ]; then
590 # Did this section get excluded?
591 if [ ! -z "$EXCLUDE_SECTION" ]; then
593 for s in $EXCLUDE_SECTION; do
594 if [ $section == $s ]; then
604 mkdir -p $RESULT_BASE
605 if [ ! -d $RESULT_BASE ]; then
606 echo "failed to create results directory $RESULT_BASE"
611 if $OPTIONS_HAVE_SECTIONS; then
612 echo "SECTION -- $section"
615 sect_start=`_wallclock`
616 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
617 echo "RECREATING -- $FSTYP on $TEST_DEV"
618 _test_unmount 2> /dev/null
619 if ! _test_mkfs >$tmp.err 2>&1
621 echo "our local _test_mkfs routine ..."
623 echo "check: failed to mkfs \$TEST_DEV using specified options"
629 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
633 # TEST_DEV has been recreated, previous FSTYP derived from
634 # TEST_DEV could be changed, source common/rc again with
635 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
638 elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
639 _test_unmount 2> /dev/null
642 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
651 check="$RESULT_BASE/check"
653 # don't leave old full output behind on a clean run
656 [ -f $check.time ] || touch $check.time
658 # print out our test configuration
659 echo "FSTYP -- `_full_fstyp_details`"
660 echo "PLATFORM -- `_full_platform_details`"
661 if [ ! -z "$SCRATCH_DEV" ]; then
662 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
663 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
668 if [ ! -z "$SCRATCH_DEV" ]; then
669 _scratch_unmount 2> /dev/null
670 # call the overridden mkfs - make sure the FS is built
671 # the same as we'll create it later.
673 if ! _scratch_mkfs >$tmp.err 2>&1
675 echo "our local _scratch_mkfs routine ..."
677 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
682 # call the overridden mount - make sure the FS mounts with
683 # the same options that we'll mount with later.
684 if ! _try_scratch_mount >$tmp.err 2>&1
686 echo "our local mount routine ..."
688 echo "check: failed to mount \$SCRATCH_DEV using specified options"
702 for seq in $list ; do
703 # Run report for previous test!
706 n_bad=`expr $n_bad + 1`
709 if $do_report && ! $first_test ; then
710 if [ $tc_status != "expunge" ] ; then
711 _make_testcase_report "$prev_seq" "$tc_status"
718 if [ ! -f $seq ]; then
719 # Try to get full name in case the user supplied only
720 # seq id and the test has a name. A bit of hassle to
721 # find really the test and not its sample output or
723 bname=$(basename $seq)
724 full_seq=$(find $(dirname $seq) -name $bname* -executable |
725 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
726 END { print shortest }')
727 if [ -f $full_seq ] && \
728 [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
733 # the filename for the test and the name output are different.
734 # we don't include the tests/ directory in the name output.
735 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
737 # Similarly, the result directory needs to replace the tests/
738 # part of the test location.
740 if $OPTIONS_HAVE_SECTIONS; then
741 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
742 REPORT_DIR="$RESULT_BASE/$section"
744 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
745 REPORT_DIR="$RESULT_BASE"
747 seqres="$REPORT_DIR/$seqnum"
750 rm -f ${RESULT_DIR}/require_scratch*
751 rm -f ${RESULT_DIR}/require_test*
755 _expunge_test $seqnum
756 if [ $? -eq 1 ]; then
764 n_notrun=`expr $n_notrun + 1`
769 if [ ! -f $seq ]; then
770 echo " - no such test?"
774 # really going to try and run this one
775 rm -f $seqres.out.bad
777 # check if we really should run it
778 _expunge_test $seqnum
779 if [ $? -eq 1 ]; then
784 # record that we really tried to run this test.
786 n_try=`expr $n_try + 1`
788 # slashes now in names, sed barfs on them so use grep
789 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
790 if [ "X$lasttime" != X ]; then
791 echo -n " ${lasttime}s ... "
793 echo -n " " # prettier output with timestamps.
795 rm -f core $seqres.notrun
798 $timestamp && echo -n " ["`date "+%T"`"]"
799 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
800 $LOGGER_PROG "run xfstest $seqnum"
801 if [ -w /dev/kmsg ]; then
802 export date_time=`date +"%F %T"`
803 echo "run fstests $seqnum at $date_time" > /dev/kmsg
804 # _check_dmesg depends on this log in dmesg
805 touch ${RESULT_DIR}/check_dmesg
807 _try_wipe_scratch_devs > /dev/null 2>&1
809 # clear the WARN_ONCE state to allow a potential problem
810 # to be reported for each test
811 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
813 if [ "$DUMP_OUTPUT" = true ]; then
814 _run_seq 2>&1 | tee $tmp.out
815 # Because $? would get tee's return code
818 _run_seq >$tmp.out 2>&1
823 _dump_err_cont "[dumped core]"
824 mv core $RESULT_BASE/$seqnum.core
828 if [ -f $seqres.notrun ]; then
829 $timestamp && _timestamp
831 $timestamp || echo -n "[not run] "
832 $timestamp && echo " [not run]" && \
833 echo -n " $seqnum -- "
835 notrun="$notrun $seqnum"
836 n_notrun=`expr $n_notrun + 1`
841 if [ $sts -ne 0 ]; then
842 _dump_err_cont "[failed, exit status $sts]"
843 _test_unmount 2> /dev/null
844 _scratch_unmount 2> /dev/null
845 rm -f ${RESULT_DIR}/require_test*
846 rm -f ${RESULT_DIR}/require_scratch*
849 # the test apparently passed, so check for corruption
850 # and log messages that shouldn't be there.
852 _check_dmesg || err=true
855 # Reload the module after each test to check for leaks or
857 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
858 _test_unmount 2> /dev/null
859 _scratch_unmount 2> /dev/null
860 modprobe -r fs-$FSTYP
864 # Scan for memory leaks after every test so that associating
865 # a leak to a particular test will be as accurate as possible.
866 _check_kmemleak || err=true
868 # test ends after all checks are done.
869 $timestamp && _timestamp
872 if [ ! -f $seq.out ]; then
873 _dump_err "no qualified output"
878 # coreutils 8.16+ changed quote formats in error messages
879 # from `foo' to 'foo'. Filter old versions to match the new
881 sed -i "s/\`/\'/g" $tmp.out
882 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
884 echo "$seqnum `expr $stop - $start`" >>$tmp.time
885 echo -n " `expr $stop - $start`s"
889 _dump_err "- output mismatch (see $seqres.out.bad)"
890 mv $tmp.out $seqres.out.bad
891 $diff $seq.out $seqres.out.bad | {
892 if test "$DIFF_LENGTH" -le 0; then
895 head -n "$DIFF_LENGTH"
897 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
898 " to see the entire diff)"
899 fi; } | sed -e 's/^\(.\)/ \1/'
904 # make sure we record the status of the last test we ran.
907 n_bad=`expr $n_bad + 1`
910 if $do_report && ! $first_test ; then
911 if [ $tc_status != "expunge" ] ; then
912 _make_testcase_report "$prev_seq" "$tc_status"
916 sect_stop=`_wallclock`
922 _test_unmount 2> /dev/null
923 _scratch_unmount 2> /dev/null
926 for ((iters = 0; iters < $iterations; iters++)) do
927 for section in $HOST_OPTIONS_SECTIONS; do
933 status=`expr $sum_bad != 0`