2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
5 # Control script for QA
27 brief_test_summary=false
32 # This is a global variable used to pass test failure text to reporting gunk
35 # start the initialisation work now
38 export MSGVERB="text:action"
39 export QA_CHECK_FS=${QA_CHECK_FS:=true}
41 # number of diff lines from a failed test, 0 for whole output
42 export DIFF_LENGTH=${DIFF_LENGTH:=10}
44 # by default don't output timestamps
45 timestamp=${TIMESTAMP:=false}
47 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
49 SRC_GROUPS="generic shared"
50 export SRC_DIR="tests"
54 echo "Usage: $0 [options] [testlist]"'
58 -glusterfs test GlusterFS
61 -virtiofs test virtiofs
67 -udiff show unified diff (default)
68 -n show me, do not run tests
70 -r randomize test order
71 --exact-order run tests in the exact order specified
72 -i <n> iterate the test list <n> times
73 -d dump test output to stdout
75 -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
76 --large-fs optimise scratch device for large filesystems
77 -s section run only specified section from config file
78 -S section exclude the specified section from the config file
81 -g group[,group...] include tests from these groups
82 -x group[,group...] exclude tests from these groups
83 -X exclude_file exclude individual tests
84 -e testlist exclude a specific list of tests
85 -E external_file exclude individual tests
86 [testlist] include tests matching names in testlist
88 testlist argument is a list of tests in the form of <test dir>/<test name>.
90 <test dir> is a directory under tests that contains a group file,
91 with a list of the names of the tests in that directory.
93 <test name> may be either a specific test file name (e.g. xfs/001) or
94 a test file name match pattern (e.g. xfs/*).
96 group argument is either a name of a tests group to collect from all
97 the test dirs (e.g. quick) or a name of a tests group to collect from
98 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
99 If you want to run all the tests in the test suite, use "-g all" to specify all
102 exclude_file argument refers to a name of a file inside each test directory.
103 for every test dir where this file is found, the listed test names are
104 excluded from the list of tests to run from that test dir.
106 external_file argument is a path to a single file containing a list of tests
107 to exclude in the form of <test dir>/<test name>.
113 check -x stress xfs/*
114 check -X .exclude -g auto
115 check -E ~/.xfstests.exclude
125 test -s "$SRC_DIR/$d/group" || return 1
127 local grpl=$(sed -n < $SRC_DIR/$d/group \
130 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
138 local sub=$(dirname $grp)
139 local fsgroup="$FSTYP"
141 if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
142 # group is given as <subdir>/<group> (e.g. xfs/quick)
144 get_sub_group_list $sub $grp
148 if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
151 for d in $SRC_GROUPS $fsgroup; do
152 if ! test -d "$SRC_DIR/$d" ; then
155 grpl="$grpl $(get_sub_group_list $d $grp)"
160 # Find all tests, excluding files that are test metadata such as group files.
161 # It matches test names against $VALID_TEST_NAME defined in common/rc
165 for d in $SRC_GROUPS $FSTYP; do
166 if ! test -d "$SRC_DIR/$d" ; then
171 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
172 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
176 # takes the list of tests to run in $tmp.list, and removes the tests passed to
177 # the function from that list.
186 if [ $numsed -gt 100 ]; then
187 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
188 mv $tmp.tmp $tmp.list
192 echo "^$t\$" >>$tmp.grep
193 numsed=`expr $numsed + 1`
195 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
196 mv $tmp.tmp $tmp.list
215 # Tests specified on the command line
216 if [ -s $tmp.arglist ]; then
217 cat $tmp.arglist > $tmp.list
222 # Specified groups to include
223 # Note that the CLI processing adds a leading space to the first group
224 # parameter, so we have to catch that here checking for "all"
225 if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
226 # no test numbers, do everything
229 for group in $GROUP_LIST; do
230 list=$(get_group_list $group)
231 if [ -z "$list" ]; then
232 echo "Group \"$group\" is empty or not defined?"
237 grep -s "^$t\$" $tmp.list >/dev/null || \
238 echo "$t" >>$tmp.list
243 # Specified groups to exclude
244 for xgroup in $XGROUP_LIST; do
245 list=$(get_group_list $xgroup)
246 if [ -z "$list" ]; then
247 echo "Group \"$xgroup\" is empty or not defined?"
254 # sort the list of tests into numeric order unless we're running tests
255 # in the exact order specified
256 if ! $exact_order; then
258 if type shuf >& /dev/null; then
261 sorter="awk -v seed=$RANDOM -f randomize.awk"
266 list=`sort -n $tmp.list | uniq | $sorter`
273 # Process command arguments first.
274 while [ $# -gt 0 ]; do
276 -\? | -h | --help) usage ;;
279 -glusterfs) FSTYP=glusterfs ;;
282 -virtiofs) FSTYP=virtiofs ;;
283 -overlay) FSTYP=overlay; export OVERLAY=true ;;
284 -pvfs2) FSTYP=pvfs2 ;;
285 -tmpfs) FSTYP=tmpfs ;;
286 -ubifs) FSTYP=ubifs ;;
288 -g) group=$2 ; shift ;
289 GROUP_LIST="$GROUP_LIST ${group//,/ }"
292 -x) xgroup=$2 ; shift ;
293 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
296 -X) subdir_xfile=$2; shift ;
300 echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
303 -E) xfile=$2; shift ;
304 if [ -f $xfile ]; then
305 sed "s/#.*$//" "$xfile" >> $tmp.xlist
308 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
309 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
311 -udiff) diff="$diff -u" ;;
315 if $exact_order; then
316 echo "Cannot specify -r and --exact-order."
323 echo "Cannnot specify --exact-order and -r."
328 -i) iterations=$2; shift ;;
329 -T) timestamp=true ;;
330 -d) DUMP_OUTPUT=true ;;
331 -b) brief_test_summary=true;;
332 -R) report_fmt=$2 ; shift ;
333 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
336 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
337 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
340 *) # not an argument, we've got tests now.
341 have_test_arg=true ;;
344 # if we've found a test specification, the break out of the processing
345 # loop before we shift the arguments so that this is the first argument
346 # that we process in the test arg loop below.
347 if $have_test_arg; then
354 # we need common/rc, that also sources common/config. We need to source it
355 # after processing args, overlay needs FSTYP set before sourcing common/config
356 if ! . ./common/rc; then
357 echo "check: failed to source common/rc"
361 if [ -n "$subdir_xfile" ]; then
362 for d in $SRC_GROUPS $FSTYP; do
363 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
364 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
365 echo $d/$f >> $tmp.xlist
370 # Process tests from command line now.
371 if $have_test_arg; then
372 while [ $# -gt 0 ]; do
374 -*) echo "Arguments before tests, please!"
378 *) # Expand test pattern (e.g. xfs/???, *fs/001)
379 list=$(cd $SRC_DIR; echo $1)
381 test_dir=`dirname $t`
382 test_dir=${test_dir#$SRC_DIR/*}
383 test_name=`basename $t`
384 group_file=$SRC_DIR/$test_dir/group
386 if egrep -q "^$test_name" $group_file; then
387 # in group file ... OK
388 echo $SRC_DIR/$test_dir/$test_name \
392 echo "$t - unknown test, ignored"
400 elif [ -z "$GROUP_LIST" ]; then
401 # default group list is the auto group. If any other group or test is
402 # specified, we use that instead.
408 echo "check: QA must be run as root"
421 echo "$1" >> $check.log
422 if $OPTIONS_HAVE_SECTIONS; then
423 echo "$1" >> ${REPORT_DIR}/check.log
430 check="$RESULT_BASE/check"
440 if [ -f $check.time -a -f $tmp.time ]; then
441 cat $check.time $tmp.time \
446 for (i in t) print i " " t[i]
450 mv $tmp.out $check.time
451 if $OPTIONS_HAVE_SECTIONS; then
452 cp $check.time ${REPORT_DIR}/check.time
457 _global_log "$(date)"
459 echo "SECTION -- $section" >>$tmp.summary
460 echo "=========================" >>$tmp.summary
461 if [ ! -z "$n_try" -a $n_try != 0 ]; then
462 if [ $brief_test_summary == "false" ]; then
464 echo "Ran:$try" >>$tmp.summary
466 _global_log "Ran:$try"
469 $interrupt && echo "Interrupted!" | tee -a $check.log
470 if $OPTIONS_HAVE_SECTIONS; then
471 $interrupt && echo "Interrupted!" | tee -a \
472 ${REPORT_DIR}/check.log
475 if [ ! -z "$notrun" ]; then
476 if [ $brief_test_summary == "false" ]; then
477 echo "Not run:$notrun"
478 echo "Not run:$notrun" >>$tmp.summary
480 _global_log "Not run:$notrun"
483 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
485 echo "Failed $n_bad of $n_try tests"
486 _global_log "Failures:$bad"
487 _global_log "Failed $n_bad of $n_try tests"
488 echo "Failures:$bad" >>$tmp.summary
489 echo "Failed $n_bad of $n_try tests" >>$tmp.summary
491 echo "Passed all $n_try tests"
492 _global_log "Passed all $n_try tests"
493 echo "Passed all $n_try tests" >>$tmp.summary
495 echo "" >>$tmp.summary
502 sum_bad=`expr $sum_bad + $n_bad`
504 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
505 if ! $OPTIONS_HAVE_SECTIONS; then
516 count=`wc -L $tmp.summary | cut -f1 -d" "`
525 if [ -f ${RESULT_DIR}/require_test ]; then
526 _check_test_fs || err=true
527 rm -f ${RESULT_DIR}/require_test*
529 _test_unmount 2> /dev/null
531 if [ -f ${RESULT_DIR}/require_scratch ]; then
532 _check_scratch_fs || err=true
533 rm -f ${RESULT_DIR}/require_scratch*
535 _scratch_unmount 2> /dev/null
541 if [ -s $tmp.xlist ]; then
542 if grep -q $TEST_ID $tmp.xlist; then
550 # Can we run systemd scopes?
552 systemctl reset-failed "fstests-check" &>/dev/null
553 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
554 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
556 # Make the check script unattractive to the OOM killer...
557 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
558 test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
560 # ...and make the tests themselves somewhat more attractive to it, so that if
561 # the system runs out of memory it'll be the test that gets killed and not the
564 # If systemd is available, run the entire test script in a scope so that we can
565 # kill all subprocesses of the test if it fails to clean up after itself. This
566 # is essential for ensuring that the post-test unmount succeeds. Note that
567 # systemd doesn't automatically remove transient scopes that fail to terminate
568 # when systemd tells them to terminate (e.g. programs stuck in D state when
569 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
571 local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
573 if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
574 local unit="$(systemd-escape "fs$seq").scope"
575 systemctl reset-failed "${unit}" &> /dev/null
576 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
578 systemctl stop "${unit}" &> /dev/null
588 if $OPTIONS_HAVE_SECTIONS; then
589 trap "_summary; exit \$status" 0 1 2 3 15
591 trap "_wrapup; exit \$status" 0 1 2 3 15
594 function run_section()
599 OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
600 get_next_config $section
602 # Do we need to run only some sections ?
603 if [ ! -z "$RUN_SECTION" ]; then
605 for s in $RUN_SECTION; do
606 if [ $section == $s ]; then
616 # Did this section get excluded?
617 if [ ! -z "$EXCLUDE_SECTION" ]; then
619 for s in $EXCLUDE_SECTION; do
620 if [ $section == $s ]; then
630 mkdir -p $RESULT_BASE
631 if [ ! -d $RESULT_BASE ]; then
632 echo "failed to create results directory $RESULT_BASE"
637 if $OPTIONS_HAVE_SECTIONS; then
638 echo "SECTION -- $section"
641 sect_start=`_wallclock`
642 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
643 echo "RECREATING -- $FSTYP on $TEST_DEV"
644 _test_unmount 2> /dev/null
645 if ! _test_mkfs >$tmp.err 2>&1
647 echo "our local _test_mkfs routine ..."
649 echo "check: failed to mkfs \$TEST_DEV using specified options"
655 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
659 # TEST_DEV has been recreated, previous FSTYP derived from
660 # TEST_DEV could be changed, source common/rc again with
661 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
664 elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
665 _test_unmount 2> /dev/null
668 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
677 check="$RESULT_BASE/check"
679 # don't leave old full output behind on a clean run
682 [ -f $check.time ] || touch $check.time
684 # print out our test configuration
685 echo "FSTYP -- `_full_fstyp_details`"
686 echo "PLATFORM -- `_full_platform_details`"
687 if [ ! -z "$SCRATCH_DEV" ]; then
688 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
689 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
694 if [ ! -z "$SCRATCH_DEV" ]; then
695 _scratch_unmount 2> /dev/null
696 # call the overridden mkfs - make sure the FS is built
697 # the same as we'll create it later.
699 if ! _scratch_mkfs >$tmp.err 2>&1
701 echo "our local _scratch_mkfs routine ..."
703 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
708 # call the overridden mount - make sure the FS mounts with
709 # the same options that we'll mount with later.
710 if ! _try_scratch_mount >$tmp.err 2>&1
712 echo "our local mount routine ..."
714 echo "check: failed to mount \$SCRATCH_DEV using specified options"
728 for seq in $list ; do
729 # Run report for previous test!
732 n_bad=`expr $n_bad + 1`
735 if $do_report && ! $first_test ; then
736 if [ $tc_status != "expunge" ] ; then
737 _make_testcase_report "$prev_seq" "$tc_status"
744 if [ ! -f $seq ]; then
745 # Try to get full name in case the user supplied only
746 # seq id and the test has a name. A bit of hassle to
747 # find really the test and not its sample output or
749 bname=$(basename $seq)
750 full_seq=$(find $(dirname $seq) -name $bname* -executable |
751 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
752 END { print shortest }')
753 if [ -f $full_seq ] && \
754 [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
759 # the filename for the test and the name output are different.
760 # we don't include the tests/ directory in the name output.
761 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
763 # Similarly, the result directory needs to replace the tests/
764 # part of the test location.
766 if $OPTIONS_HAVE_SECTIONS; then
767 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
768 REPORT_DIR="$RESULT_BASE/$section"
770 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
771 REPORT_DIR="$RESULT_BASE"
773 seqres="$REPORT_DIR/$seqnum"
776 rm -f ${RESULT_DIR}/require_scratch*
777 rm -f ${RESULT_DIR}/require_test*
781 _expunge_test $seqnum
782 if [ $? -eq 1 ]; then
790 n_notrun=`expr $n_notrun + 1`
795 if [ ! -f $seq ]; then
796 echo " - no such test?"
800 # really going to try and run this one
801 rm -f $seqres.out.bad
803 # check if we really should run it
804 _expunge_test $seqnum
805 if [ $? -eq 1 ]; then
810 # record that we really tried to run this test.
812 n_try=`expr $n_try + 1`
814 # slashes now in names, sed barfs on them so use grep
815 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
816 if [ "X$lasttime" != X ]; then
817 echo -n " ${lasttime}s ... "
819 echo -n " " # prettier output with timestamps.
821 rm -f core $seqres.notrun
824 $timestamp && echo -n " ["`date "+%T"`"]"
825 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
826 $LOGGER_PROG "run xfstest $seqnum"
827 if [ -w /dev/kmsg ]; then
828 export date_time=`date +"%F %T"`
829 echo "run fstests $seqnum at $date_time" > /dev/kmsg
830 # _check_dmesg depends on this log in dmesg
831 touch ${RESULT_DIR}/check_dmesg
833 _try_wipe_scratch_devs > /dev/null 2>&1
835 # clear the WARN_ONCE state to allow a potential problem
836 # to be reported for each test
837 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
839 if [ "$DUMP_OUTPUT" = true ]; then
840 _run_seq 2>&1 | tee $tmp.out
841 # Because $? would get tee's return code
844 _run_seq >$tmp.out 2>&1
849 _dump_err_cont "[dumped core]"
850 mv core $RESULT_BASE/$seqnum.core
854 if [ -f $seqres.notrun ]; then
855 $timestamp && _timestamp
857 $timestamp || echo -n "[not run] "
858 $timestamp && echo " [not run]" && \
859 echo -n " $seqnum -- "
861 notrun="$notrun $seqnum"
862 n_notrun=`expr $n_notrun + 1`
867 if [ $sts -ne 0 ]; then
868 _dump_err_cont "[failed, exit status $sts]"
869 _test_unmount 2> /dev/null
870 _scratch_unmount 2> /dev/null
871 rm -f ${RESULT_DIR}/require_test*
872 rm -f ${RESULT_DIR}/require_scratch*
875 # the test apparently passed, so check for corruption
876 # and log messages that shouldn't be there.
878 _check_dmesg || err=true
881 # Reload the module after each test to check for leaks or
883 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
884 _test_unmount 2> /dev/null
885 _scratch_unmount 2> /dev/null
886 modprobe -r fs-$FSTYP
890 # Scan for memory leaks after every test so that associating
891 # a leak to a particular test will be as accurate as possible.
892 _check_kmemleak || err=true
894 # test ends after all checks are done.
895 $timestamp && _timestamp
898 if [ ! -f $seq.out ]; then
899 _dump_err "no qualified output"
904 # coreutils 8.16+ changed quote formats in error messages
905 # from `foo' to 'foo'. Filter old versions to match the new
907 sed -i "s/\`/\'/g" $tmp.out
908 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
910 echo "$seqnum `expr $stop - $start`" >>$tmp.time
911 echo -n " `expr $stop - $start`s"
915 _dump_err "- output mismatch (see $seqres.out.bad)"
916 mv $tmp.out $seqres.out.bad
917 $diff $seq.out $seqres.out.bad | {
918 if test "$DIFF_LENGTH" -le 0; then
921 head -n "$DIFF_LENGTH"
923 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
924 " to see the entire diff)"
925 fi; } | sed -e 's/^\(.\)/ \1/'
930 # make sure we record the status of the last test we ran.
933 n_bad=`expr $n_bad + 1`
936 if $do_report && ! $first_test ; then
937 if [ $tc_status != "expunge" ] ; then
938 _make_testcase_report "$prev_seq" "$tc_status"
942 sect_stop=`_wallclock`
948 _test_unmount 2> /dev/null
949 _scratch_unmount 2> /dev/null
952 for ((iters = 0; iters < $iterations; iters++)) do
953 for section in $HOST_OPTIONS_SECTIONS; do
959 status=`expr $sum_bad != 0`