2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
5 # Control script for QA
27 brief_test_summary=false
33 # This is a global variable used to pass test failure text to reporting gunk
36 # start the initialisation work now
39 export MSGVERB="text:action"
40 export QA_CHECK_FS=${QA_CHECK_FS:=true}
42 # number of diff lines from a failed test, 0 for whole output
43 export DIFF_LENGTH=${DIFF_LENGTH:=10}
45 # by default don't output timestamps
46 timestamp=${TIMESTAMP:=false}
48 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
50 SRC_GROUPS="generic shared"
51 export SRC_DIR="tests"
55 echo "Usage: $0 [options] [testlist]"'
59 -glusterfs test GlusterFS
62 -virtiofs test virtiofs
68 -udiff show unified diff (default)
69 -n show me, do not run tests
71 -r randomize test order
72 --exact-order run tests in the exact order specified
73 -i <n> iterate the test list <n> times
74 -I <n> iterate the test list <n> times, but stops iterating further in case of any test failure
75 -d dump test output to stdout
77 -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
78 --large-fs optimise scratch device for large filesystems
79 -s section run only specified section from config file
80 -S section exclude the specified section from the config file
83 -g group[,group...] include tests from these groups
84 -x group[,group...] exclude tests from these groups
85 -X exclude_file exclude individual tests
86 -e testlist exclude a specific list of tests
87 -E external_file exclude individual tests
88 [testlist] include tests matching names in testlist
90 testlist argument is a list of tests in the form of <test dir>/<test name>.
92 <test dir> is a directory under tests that contains a group file,
93 with a list of the names of the tests in that directory.
95 <test name> may be either a specific test file name (e.g. xfs/001) or
96 a test file name match pattern (e.g. xfs/*).
98 group argument is either a name of a tests group to collect from all
99 the test dirs (e.g. quick) or a name of a tests group to collect from
100 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
101 If you want to run all the tests in the test suite, use "-g all" to specify all
104 exclude_file argument refers to a name of a file inside each test directory.
105 for every test dir where this file is found, the listed test names are
106 excluded from the list of tests to run from that test dir.
108 external_file argument is a path to a single file containing a list of tests
109 to exclude in the form of <test dir>/<test name>.
115 check -x stress xfs/*
116 check -X .exclude -g auto
117 check -E ~/.xfstests.exclude
127 test -s "$SRC_DIR/$d/group.list" || return 1
129 local grpl=$(sed -n < $SRC_DIR/$d/group.list \
132 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
140 local sub=$(dirname $grp)
141 local fsgroup="$FSTYP"
143 if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
144 # group is given as <subdir>/<group> (e.g. xfs/quick)
146 get_sub_group_list $sub $grp
150 if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
153 for d in $SRC_GROUPS $fsgroup; do
154 if ! test -d "$SRC_DIR/$d" ; then
157 grpl="$grpl $(get_sub_group_list $d $grp)"
162 # Find all tests, excluding files that are test metadata such as group files.
163 # It matches test names against $VALID_TEST_NAME defined in common/rc
167 for d in $SRC_GROUPS $FSTYP; do
168 if ! test -d "$SRC_DIR/$d" ; then
173 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
174 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
178 # takes the list of tests to run in $tmp.list, and removes the tests passed to
179 # the function from that list.
188 if [ $numsed -gt 100 ]; then
189 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
190 mv $tmp.tmp $tmp.list
194 echo "^$t\$" >>$tmp.grep
195 numsed=`expr $numsed + 1`
197 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
198 mv $tmp.tmp $tmp.list
217 # Tests specified on the command line
218 if [ -s $tmp.arglist ]; then
219 cat $tmp.arglist > $tmp.list
224 # Specified groups to include
225 # Note that the CLI processing adds a leading space to the first group
226 # parameter, so we have to catch that here checking for "all"
227 if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
228 # no test numbers, do everything
231 for group in $GROUP_LIST; do
232 list=$(get_group_list $group)
233 if [ -z "$list" ]; then
234 echo "Group \"$group\" is empty or not defined?"
239 grep -s "^$t\$" $tmp.list >/dev/null || \
240 echo "$t" >>$tmp.list
245 # Specified groups to exclude
246 for xgroup in $XGROUP_LIST; do
247 list=$(get_group_list $xgroup)
248 if [ -z "$list" ]; then
249 echo "Group \"$xgroup\" is empty or not defined?"
256 # sort the list of tests into numeric order unless we're running tests
257 # in the exact order specified
258 if ! $exact_order; then
260 if type shuf >& /dev/null; then
263 sorter="awk -v seed=$RANDOM -f randomize.awk"
268 list=`sort -n $tmp.list | uniq | $sorter`
275 # Process command arguments first.
276 while [ $# -gt 0 ]; do
278 -\? | -h | --help) usage ;;
281 -glusterfs) FSTYP=glusterfs ;;
284 -virtiofs) FSTYP=virtiofs ;;
285 -overlay) FSTYP=overlay; export OVERLAY=true ;;
286 -pvfs2) FSTYP=pvfs2 ;;
287 -tmpfs) FSTYP=tmpfs ;;
288 -ubifs) FSTYP=ubifs ;;
290 -g) group=$2 ; shift ;
291 GROUP_LIST="$GROUP_LIST ${group//,/ }"
294 -x) xgroup=$2 ; shift ;
295 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
298 -X) subdir_xfile=$2; shift ;
302 echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
305 -E) xfile=$2; shift ;
306 if [ -f $xfile ]; then
307 sed "s/#.*$//" "$xfile" >> $tmp.xlist
310 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
311 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
313 -udiff) diff="$diff -u" ;;
317 if $exact_order; then
318 echo "Cannot specify -r and --exact-order."
325 echo "Cannnot specify --exact-order and -r."
330 -i) iterations=$2; shift ;;
331 -I) iterations=$2; istop=true; shift ;;
332 -T) timestamp=true ;;
333 -d) DUMP_OUTPUT=true ;;
334 -b) brief_test_summary=true;;
335 -R) report_fmt=$2 ; shift ;
336 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
339 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
340 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
343 *) # not an argument, we've got tests now.
344 have_test_arg=true ;;
347 # if we've found a test specification, the break out of the processing
348 # loop before we shift the arguments so that this is the first argument
349 # that we process in the test arg loop below.
350 if $have_test_arg; then
357 # we need common/rc, that also sources common/config. We need to source it
358 # after processing args, overlay needs FSTYP set before sourcing common/config
359 if ! . ./common/rc; then
360 echo "check: failed to source common/rc"
364 if [ -n "$subdir_xfile" ]; then
365 for d in $SRC_GROUPS $FSTYP; do
366 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
367 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
368 echo $d/$f >> $tmp.xlist
373 # Process tests from command line now.
374 if $have_test_arg; then
375 while [ $# -gt 0 ]; do
377 -*) echo "Arguments before tests, please!"
381 *) # Expand test pattern (e.g. xfs/???, *fs/001)
382 list=$(cd $SRC_DIR; echo $1)
384 test_dir=`dirname $t`
385 test_dir=${test_dir#$SRC_DIR/*}
386 test_name=`basename $t`
387 group_file=$SRC_DIR/$test_dir/group.list
389 if egrep -q "^$test_name" $group_file; then
390 # in group file ... OK
391 echo $SRC_DIR/$test_dir/$test_name \
395 echo "$t - unknown test, ignored"
403 elif [ -z "$GROUP_LIST" ]; then
404 # default group list is the auto group. If any other group or test is
405 # specified, we use that instead.
411 echo "check: QA must be run as root"
424 echo "$1" >> $check.log
425 if $OPTIONS_HAVE_SECTIONS; then
426 echo "$1" >> ${REPORT_DIR}/check.log
433 check="$RESULT_BASE/check"
443 if [ -f $check.time -a -f $tmp.time ]; then
444 cat $check.time $tmp.time \
449 for (i in t) print i " " t[i]
453 mv $tmp.out $check.time
454 if $OPTIONS_HAVE_SECTIONS; then
455 cp $check.time ${REPORT_DIR}/check.time
460 _global_log "$(date)"
462 echo "SECTION -- $section" >>$tmp.summary
463 echo "=========================" >>$tmp.summary
464 if [ ! -z "$n_try" -a $n_try != 0 ]; then
465 if [ $brief_test_summary == "false" ]; then
467 echo "Ran:$try" >>$tmp.summary
469 _global_log "Ran:$try"
472 $interrupt && echo "Interrupted!" | tee -a $check.log
473 if $OPTIONS_HAVE_SECTIONS; then
474 $interrupt && echo "Interrupted!" | tee -a \
475 ${REPORT_DIR}/check.log
478 if [ ! -z "$notrun" ]; then
479 if [ $brief_test_summary == "false" ]; then
480 echo "Not run:$notrun"
481 echo "Not run:$notrun" >>$tmp.summary
483 _global_log "Not run:$notrun"
486 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
488 echo "Failed $n_bad of $n_try tests"
489 _global_log "Failures:$bad"
490 _global_log "Failed $n_bad of $n_try tests"
491 echo "Failures:$bad" >>$tmp.summary
492 echo "Failed $n_bad of $n_try tests" >>$tmp.summary
494 echo "Passed all $n_try tests"
495 _global_log "Passed all $n_try tests"
496 echo "Passed all $n_try tests" >>$tmp.summary
498 echo "" >>$tmp.summary
505 sum_bad=`expr $sum_bad + $n_bad`
507 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
508 if ! $OPTIONS_HAVE_SECTIONS; then
519 count=`wc -L $tmp.summary | cut -f1 -d" "`
530 if [ -f ${RESULT_DIR}/require_test ]; then
531 _check_test_fs || ret=1
532 rm -f ${RESULT_DIR}/require_test*
534 _test_unmount 2> /dev/null
536 if [ -f ${RESULT_DIR}/require_scratch ]; then
537 _check_scratch_fs || ret=1
538 rm -f ${RESULT_DIR}/require_scratch*
540 _scratch_unmount 2> /dev/null
547 if [ -s $tmp.xlist ]; then
548 if grep -q $TEST_ID $tmp.xlist; then
556 # Can we run systemd scopes?
558 systemctl reset-failed "fstests-check" &>/dev/null
559 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
560 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
562 # Make the check script unattractive to the OOM killer...
563 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
564 function _adjust_oom_score() {
565 test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
567 _adjust_oom_score -500
569 # ...and make the tests themselves somewhat more attractive to it, so that if
570 # the system runs out of memory it'll be the test that gets killed and not the
571 # test framework. The test is run in a separate process without any of our
572 # functions, so we open-code adjusting the OOM score.
574 # If systemd is available, run the entire test script in a scope so that we can
575 # kill all subprocesses of the test if it fails to clean up after itself. This
576 # is essential for ensuring that the post-test unmount succeeds. Note that
577 # systemd doesn't automatically remove transient scopes that fail to terminate
578 # when systemd tells them to terminate (e.g. programs stuck in D state when
579 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
581 local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
583 if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
584 local unit="$(systemd-escape "fs$seq").scope"
585 systemctl reset-failed "${unit}" &> /dev/null
586 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
588 systemctl stop "${unit}" &> /dev/null
598 if $OPTIONS_HAVE_SECTIONS; then
599 trap "_summary; exit \$status" 0 1 2 3 15
601 trap "_wrapup; exit \$status" 0 1 2 3 15
604 function run_section()
609 OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
610 get_next_config $section
612 # Do we need to run only some sections ?
613 if [ ! -z "$RUN_SECTION" ]; then
615 for s in $RUN_SECTION; do
616 if [ $section == $s ]; then
626 # Did this section get excluded?
627 if [ ! -z "$EXCLUDE_SECTION" ]; then
629 for s in $EXCLUDE_SECTION; do
630 if [ $section == $s ]; then
640 mkdir -p $RESULT_BASE
641 if [ ! -d $RESULT_BASE ]; then
642 echo "failed to create results directory $RESULT_BASE"
647 if $OPTIONS_HAVE_SECTIONS; then
648 echo "SECTION -- $section"
651 sect_start=`_wallclock`
652 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
653 echo "RECREATING -- $FSTYP on $TEST_DEV"
654 _test_unmount 2> /dev/null
655 if ! _test_mkfs >$tmp.err 2>&1
657 echo "our local _test_mkfs routine ..."
659 echo "check: failed to mkfs \$TEST_DEV using specified options"
665 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
669 # TEST_DEV has been recreated, previous FSTYP derived from
670 # TEST_DEV could be changed, source common/rc again with
671 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
674 elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
675 _test_unmount 2> /dev/null
678 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
687 check="$RESULT_BASE/check"
689 # don't leave old full output behind on a clean run
692 [ -f $check.time ] || touch $check.time
694 # print out our test configuration
695 echo "FSTYP -- `_full_fstyp_details`"
696 echo "PLATFORM -- `_full_platform_details`"
697 if [ ! -z "$SCRATCH_DEV" ]; then
698 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
699 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
704 if [ ! -z "$SCRATCH_DEV" ]; then
705 _scratch_unmount 2> /dev/null
706 # call the overridden mkfs - make sure the FS is built
707 # the same as we'll create it later.
709 if ! _scratch_mkfs >$tmp.err 2>&1
711 echo "our local _scratch_mkfs routine ..."
713 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
718 # call the overridden mount - make sure the FS mounts with
719 # the same options that we'll mount with later.
720 if ! _try_scratch_mount >$tmp.err 2>&1
722 echo "our local mount routine ..."
724 echo "check: failed to mount \$SCRATCH_DEV using specified options"
738 for seq in $list ; do
739 # Run report for previous test!
742 n_bad=`expr $n_bad + 1`
745 if $do_report && ! $first_test ; then
746 if [ $tc_status != "expunge" ] ; then
747 _make_testcase_report "$prev_seq" "$tc_status"
754 if [ ! -f $seq ]; then
755 # Try to get full name in case the user supplied only
756 # seq id and the test has a name. A bit of hassle to
757 # find really the test and not its sample output or
759 bname=$(basename $seq)
760 full_seq=$(find $(dirname $seq) -name $bname* -executable |
761 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
762 END { print shortest }')
763 if [ -f $full_seq ] && \
764 [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
769 # the filename for the test and the name output are different.
770 # we don't include the tests/ directory in the name output.
771 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
773 # Similarly, the result directory needs to replace the tests/
774 # part of the test location.
776 if $OPTIONS_HAVE_SECTIONS; then
777 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
778 REPORT_DIR="$RESULT_BASE/$section"
780 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
781 REPORT_DIR="$RESULT_BASE"
783 seqres="$REPORT_DIR/$seqnum"
786 rm -f ${RESULT_DIR}/require_scratch*
787 rm -f ${RESULT_DIR}/require_test*
791 _expunge_test $seqnum
792 if [ $? -eq 1 ]; then
800 n_notrun=`expr $n_notrun + 1`
805 if [ ! -f $seq ]; then
806 echo " - no such test?"
810 # really going to try and run this one
811 rm -f $seqres.out.bad
813 # check if we really should run it
814 _expunge_test $seqnum
815 if [ $? -eq 1 ]; then
820 # record that we really tried to run this test.
822 n_try=`expr $n_try + 1`
824 # slashes now in names, sed barfs on them so use grep
825 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
826 if [ "X$lasttime" != X ]; then
827 echo -n " ${lasttime}s ... "
829 echo -n " " # prettier output with timestamps.
831 rm -f core $seqres.notrun
834 $timestamp && echo -n " ["`date "+%T"`"]"
835 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
836 $LOGGER_PROG "run xfstest $seqnum"
837 if [ -w /dev/kmsg ]; then
838 export date_time=`date +"%F %T"`
839 echo "run fstests $seqnum at $date_time" > /dev/kmsg
840 # _check_dmesg depends on this log in dmesg
841 touch ${RESULT_DIR}/check_dmesg
843 _try_wipe_scratch_devs > /dev/null 2>&1
845 # clear the WARN_ONCE state to allow a potential problem
846 # to be reported for each test
847 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
849 if [ "$DUMP_OUTPUT" = true ]; then
850 _run_seq 2>&1 | tee $tmp.out
851 # Because $? would get tee's return code
854 _run_seq >$tmp.out 2>&1
859 _dump_err_cont "[dumped core]"
860 mv core $RESULT_BASE/$seqnum.core
864 if [ -f $seqres.notrun ]; then
865 $timestamp && _timestamp
867 $timestamp || echo -n "[not run] "
868 $timestamp && echo " [not run]" && \
869 echo -n " $seqnum -- "
871 notrun="$notrun $seqnum"
872 n_notrun=`expr $n_notrun + 1`
875 # Unmount the scratch fs so that we can wipe the scratch
876 # dev state prior to the next test run.
877 _scratch_unmount 2> /dev/null
881 if [ $sts -ne 0 ]; then
882 _dump_err_cont "[failed, exit status $sts]"
883 _test_unmount 2> /dev/null
884 _scratch_unmount 2> /dev/null
885 rm -f ${RESULT_DIR}/require_test*
886 rm -f ${RESULT_DIR}/require_scratch*
889 # The test apparently passed, so check for corruption
890 # and log messages that shouldn't be there. Run the
891 # checking tools from a subshell with adjusted OOM
892 # score so that the OOM killer will target them instead
893 # of the check script itself.
894 (_adjust_oom_score 250; _check_filesystems) || err=true
895 _check_dmesg || err=true
898 # Reload the module after each test to check for leaks or
900 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
901 _test_unmount 2> /dev/null
902 _scratch_unmount 2> /dev/null
903 modprobe -r fs-$FSTYP
907 # Scan for memory leaks after every test so that associating
908 # a leak to a particular test will be as accurate as possible.
909 _check_kmemleak || err=true
911 # test ends after all checks are done.
912 $timestamp && _timestamp
915 if [ ! -f $seq.out ]; then
916 _dump_err "no qualified output"
921 # coreutils 8.16+ changed quote formats in error messages
922 # from `foo' to 'foo'. Filter old versions to match the new
924 sed -i "s/\`/\'/g" $tmp.out
925 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
927 echo "$seqnum `expr $stop - $start`" >>$tmp.time
928 echo -n " `expr $stop - $start`s"
932 _dump_err "- output mismatch (see $seqres.out.bad)"
933 mv $tmp.out $seqres.out.bad
934 $diff $seq.out $seqres.out.bad | {
935 if test "$DIFF_LENGTH" -le 0; then
938 head -n "$DIFF_LENGTH"
940 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
941 " to see the entire diff)"
942 fi; } | sed -e 's/^\(.\)/ \1/'
947 # make sure we record the status of the last test we ran.
950 n_bad=`expr $n_bad + 1`
953 if $do_report && ! $first_test ; then
954 if [ $tc_status != "expunge" ] ; then
955 _make_testcase_report "$prev_seq" "$tc_status"
959 sect_stop=`_wallclock`
965 _test_unmount 2> /dev/null
966 _scratch_unmount 2> /dev/null
969 for ((iters = 0; iters < $iterations; iters++)) do
970 for section in $HOST_OPTIONS_SECTIONS; do
972 if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
974 status=`expr $sum_bad != 0`
981 status=`expr $sum_bad != 0`