2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
5 # Control script for QA
24 brief_test_summary=false
30 # This is a global variable used to pass test failure text to reporting gunk
33 # start the initialisation work now
36 export MSGVERB="text:action"
37 export QA_CHECK_FS=${QA_CHECK_FS:=true}
39 # number of diff lines from a failed test, 0 for whole output
40 export DIFF_LENGTH=${DIFF_LENGTH:=10}
42 # by default don't output timestamps
43 timestamp=${TIMESTAMP:=false}
45 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
47 SRC_GROUPS="generic shared"
48 export SRC_DIR="tests"
52 echo "Usage: $0 [options] [testlist]"'
56 -glusterfs test GlusterFS
59 -virtiofs test virtiofs
65 -udiff show unified diff (default)
66 -n show me, do not run tests
68 -r randomize test order
69 --exact-order run tests in the exact order specified
70 -i <n> iterate the test list <n> times
71 -I <n> iterate the test list <n> times, but stops iterating further in case of any test failure
72 -d dump test output to stdout
74 -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
75 --large-fs optimise scratch device for large filesystems
76 -s section run only specified section from config file
77 -S section exclude the specified section from the config file
80 -g group[,group...] include tests from these groups
81 -x group[,group...] exclude tests from these groups
82 -X exclude_file exclude individual tests
83 -e testlist exclude a specific list of tests
84 -E external_file exclude individual tests
85 [testlist] include tests matching names in testlist
87 testlist argument is a list of tests in the form of <test dir>/<test name>.
89 <test dir> is a directory under tests that contains a group file,
90 with a list of the names of the tests in that directory.
92 <test name> may be either a specific test file name (e.g. xfs/001) or
93 a test file name match pattern (e.g. xfs/*).
95 group argument is either a name of a tests group to collect from all
96 the test dirs (e.g. quick) or a name of a tests group to collect from
97 a specific tests dir in the form of <test dir>/<group name> (e.g. xfs/quick).
98 If you want to run all the tests in the test suite, use "-g all" to specify all
101 exclude_file argument refers to a name of a file inside each test directory.
102 for every test dir where this file is found, the listed test names are
103 excluded from the list of tests to run from that test dir.
105 external_file argument is a path to a single file containing a list of tests
106 to exclude in the form of <test dir>/<test name>.
112 check -x stress xfs/*
113 check -X .exclude -g auto
114 check -E ~/.xfstests.exclude
124 test -s "$SRC_DIR/$d/group.list" || return 1
126 local grpl=$(sed -n < $SRC_DIR/$d/group.list \
129 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
137 local sub=$(dirname $grp)
138 local fsgroup="$FSTYP"
140 if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
141 # group is given as <subdir>/<group> (e.g. xfs/quick)
143 get_sub_group_list $sub $grp
147 if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
150 for d in $SRC_GROUPS $fsgroup; do
151 if ! test -d "$SRC_DIR/$d" ; then
154 grpl="$grpl $(get_sub_group_list $d $grp)"
159 # Find all tests, excluding files that are test metadata such as group files.
160 # It matches test names against $VALID_TEST_NAME defined in common/rc
164 for d in $SRC_GROUPS $FSTYP; do
165 if ! test -d "$SRC_DIR/$d" ; then
170 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
171 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
175 # takes the list of tests to run in $tmp.list, and removes the tests passed to
176 # the function from that list.
185 if [ $numsed -gt 100 ]; then
186 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
187 mv $tmp.tmp $tmp.list
191 echo "^$t\$" >>$tmp.grep
192 numsed=`expr $numsed + 1`
194 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
195 mv $tmp.tmp $tmp.list
214 # Tests specified on the command line
215 if [ -s $tmp.arglist ]; then
216 cat $tmp.arglist > $tmp.list
221 # Specified groups to include
222 # Note that the CLI processing adds a leading space to the first group
223 # parameter, so we have to catch that here checking for "all"
224 if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then
225 # no test numbers, do everything
228 for group in $GROUP_LIST; do
229 list=$(get_group_list $group)
230 if [ -z "$list" ]; then
231 echo "Group \"$group\" is empty or not defined?"
236 grep -s "^$t\$" $tmp.list >/dev/null || \
237 echo "$t" >>$tmp.list
242 # Specified groups to exclude
243 for xgroup in $XGROUP_LIST; do
244 list=$(get_group_list $xgroup)
245 if [ -z "$list" ]; then
246 echo "Group \"$xgroup\" is empty or not defined?"
253 # sort the list of tests into numeric order unless we're running tests
254 # in the exact order specified
255 if ! $exact_order; then
257 if type shuf >& /dev/null; then
260 sorter="awk -v seed=$RANDOM -f randomize.awk"
265 list=`sort -n $tmp.list | uniq | $sorter`
272 # Process command arguments first.
273 while [ $# -gt 0 ]; do
275 -\? | -h | --help) usage ;;
277 -nfs|-glusterfs|-cifs|-9p|-virtiofs|-pvfs2|-tmpfs|-ubifs)
285 -g) group=$2 ; shift ;
286 GROUP_LIST="$GROUP_LIST ${group//,/ }"
289 -x) xgroup=$2 ; shift ;
290 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
293 -X) subdir_xfile=$2; shift ;
297 echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
300 -E) xfile=$2; shift ;
301 if [ -f $xfile ]; then
302 sed "s/#.*$//" "$xfile" >> $tmp.xlist
305 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
306 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
308 -udiff) diff="$diff -u" ;;
312 if $exact_order; then
313 echo "Cannot specify -r and --exact-order."
320 echo "Cannnot specify --exact-order and -r."
325 -i) iterations=$2; shift ;;
326 -I) iterations=$2; istop=true; shift ;;
327 -T) timestamp=true ;;
328 -d) DUMP_OUTPUT=true ;;
329 -b) brief_test_summary=true;;
330 -R) report_fmt=$2 ; shift ;
331 REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }"
334 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
335 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
338 *) # not an argument, we've got tests now.
339 have_test_arg=true ;;
342 # if we've found a test specification, the break out of the processing
343 # loop before we shift the arguments so that this is the first argument
344 # that we process in the test arg loop below.
345 if $have_test_arg; then
352 # we need common/rc, that also sources common/config. We need to source it
353 # after processing args, overlay needs FSTYP set before sourcing common/config
354 if ! . ./common/rc; then
355 echo "check: failed to source common/rc"
359 if [ -n "$subdir_xfile" ]; then
360 for d in $SRC_GROUPS $FSTYP; do
361 [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
362 for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
363 echo $d/$f >> $tmp.xlist
368 # Process tests from command line now.
369 if $have_test_arg; then
370 while [ $# -gt 0 ]; do
372 -*) echo "Arguments before tests, please!"
376 *) # Expand test pattern (e.g. xfs/???, *fs/001)
377 list=$(cd $SRC_DIR; echo $1)
379 test_dir=`dirname $t`
380 test_dir=${test_dir#$SRC_DIR/*}
381 test_name=`basename $t`
382 group_file=$SRC_DIR/$test_dir/group.list
384 if egrep -q "^$test_name" $group_file; then
385 # in group file ... OK
386 echo $SRC_DIR/$test_dir/$test_name \
390 echo "$t - unknown test, ignored"
398 elif [ -z "$GROUP_LIST" ]; then
399 # default group list is the auto group. If any other group or test is
400 # specified, we use that instead.
406 echo "check: QA must be run as root"
418 echo "$1" >> $check.log
419 if $OPTIONS_HAVE_SECTIONS; then
420 echo "$1" >> ${REPORT_DIR}/check.log
427 check="$RESULT_BASE/check"
429 if $showme && $needwrap; then
431 # $showme = all selected tests are notrun (no tries)
432 _make_section_report "${#notrun[*]}" "0" "${#notrun[*]}"
436 if [ -f $check.time -a -f $tmp.time ]; then
437 cat $check.time $tmp.time \
442 for (i in t) print i " " t[i]
446 mv $tmp.out $check.time
447 if $OPTIONS_HAVE_SECTIONS; then
448 cp $check.time ${REPORT_DIR}/check.time
453 _global_log "$(date)"
455 echo "SECTION -- $section" >>$tmp.summary
456 echo "=========================" >>$tmp.summary
457 if ((${#try[*]} > 0)); then
458 if [ $brief_test_summary == "false" ]; then
459 echo "Ran: ${try[*]}"
460 echo "Ran: ${try[*]}" >>$tmp.summary
462 _global_log "Ran: ${try[*]}"
465 $interrupt && echo "Interrupted!" | tee -a $check.log
466 if $OPTIONS_HAVE_SECTIONS; then
467 $interrupt && echo "Interrupted!" | tee -a \
468 ${REPORT_DIR}/check.log
471 if ((${#notrun[*]} > 0)); then
472 if [ $brief_test_summary == "false" ]; then
473 echo "Not run: ${notrun[*]}"
474 echo "Not run: ${notrun[*]}" >>$tmp.summary
476 _global_log "Not run: ${notrun[*]}"
479 if ((${#bad[*]} > 0)); then
480 echo "Failures: ${bad[*]}"
481 echo "Failed ${#bad[*]} of ${#try[*]} tests"
482 _global_log "Failures: ${bad[*]}"
483 _global_log "Failed ${#bad[*]} of ${#try[*]} tests"
484 echo "Failures: ${bad[*]}" >>$tmp.summary
485 echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary
487 echo "Passed all ${#try[*]} tests"
488 _global_log "Passed all ${#try[*]} tests"
489 echo "Passed all ${#try[*]} tests" >>$tmp.summary
491 echo "" >>$tmp.summary
493 _make_section_report "${#try[*]}" "${#bad[*]}" "${#notrun[*]}"
498 sum_bad=`expr $sum_bad + ${#bad[*]}`
500 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
501 if ! $OPTIONS_HAVE_SECTIONS; then
512 count=`wc -L $tmp.summary | cut -f1 -d" "`
523 if [ -f ${RESULT_DIR}/require_test ]; then
524 _check_test_fs || ret=1
525 rm -f ${RESULT_DIR}/require_test*
527 _test_unmount 2> /dev/null
529 if [ -f ${RESULT_DIR}/require_scratch ]; then
530 _check_scratch_fs || ret=1
531 rm -f ${RESULT_DIR}/require_scratch*
533 _scratch_unmount 2> /dev/null
540 if [ -s $tmp.xlist ]; then
541 if grep -q $TEST_ID $tmp.xlist; then
549 # Can we run systemd scopes?
551 systemctl reset-failed "fstests-check" &>/dev/null
552 systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
553 test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
555 # Make the check script unattractive to the OOM killer...
556 OOM_SCORE_ADJ="/proc/self/oom_score_adj"
557 function _adjust_oom_score() {
558 test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
560 _adjust_oom_score -500
562 # ...and make the tests themselves somewhat more attractive to it, so that if
563 # the system runs out of memory it'll be the test that gets killed and not the
564 # test framework. The test is run in a separate process without any of our
565 # functions, so we open-code adjusting the OOM score.
567 # If systemd is available, run the entire test script in a scope so that we can
568 # kill all subprocesses of the test if it fails to clean up after itself. This
569 # is essential for ensuring that the post-test unmount succeeds. Note that
570 # systemd doesn't automatically remove transient scopes that fail to terminate
571 # when systemd tells them to terminate (e.g. programs stuck in D state when
572 # systemd sends SIGKILL), so we use reset-failed to tear down the scope.
574 local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
576 if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
577 local unit="$(systemd-escape "fs$seq").scope"
578 systemctl reset-failed "${unit}" &> /dev/null
579 systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
581 systemctl stop "${unit}" &> /dev/null
591 if $OPTIONS_HAVE_SECTIONS; then
592 trap "_summary; exit \$status" 0 1 2 3 15
594 trap "_wrapup; exit \$status" 0 1 2 3 15
597 function run_section()
602 OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
603 get_next_config $section
605 # Do we need to run only some sections ?
606 if [ ! -z "$RUN_SECTION" ]; then
608 for s in $RUN_SECTION; do
609 if [ $section == $s ]; then
619 # Did this section get excluded?
620 if [ ! -z "$EXCLUDE_SECTION" ]; then
622 for s in $EXCLUDE_SECTION; do
623 if [ $section == $s ]; then
633 mkdir -p $RESULT_BASE
634 if [ ! -d $RESULT_BASE ]; then
635 echo "failed to create results directory $RESULT_BASE"
640 if $OPTIONS_HAVE_SECTIONS; then
641 echo "SECTION -- $section"
644 sect_start=`_wallclock`
645 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
646 echo "RECREATING -- $FSTYP on $TEST_DEV"
647 _test_unmount 2> /dev/null
648 if ! _test_mkfs >$tmp.err 2>&1
650 echo "our local _test_mkfs routine ..."
652 echo "check: failed to mkfs \$TEST_DEV using specified options"
658 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
662 # TEST_DEV has been recreated, previous FSTYP derived from
663 # TEST_DEV could be changed, source common/rc again with
664 # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
667 elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
668 _test_unmount 2> /dev/null
671 echo "check: failed to mount $TEST_DEV on $TEST_DIR"
680 check="$RESULT_BASE/check"
682 # don't leave old full output behind on a clean run
685 [ -f $check.time ] || touch $check.time
687 # print out our test configuration
688 echo "FSTYP -- `_full_fstyp_details`"
689 echo "PLATFORM -- `_full_platform_details`"
690 if [ ! -z "$SCRATCH_DEV" ]; then
691 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
692 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
697 if [ ! -z "$SCRATCH_DEV" ]; then
698 _scratch_unmount 2> /dev/null
699 # call the overridden mkfs - make sure the FS is built
700 # the same as we'll create it later.
702 if ! _scratch_mkfs >$tmp.err 2>&1
704 echo "our local _scratch_mkfs routine ..."
706 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
711 # call the overridden mount - make sure the FS mounts with
712 # the same options that we'll mount with later.
713 if ! _try_scratch_mount >$tmp.err 2>&1
715 echo "our local mount routine ..."
717 echo "check: failed to mount \$SCRATCH_DEV using specified options"
728 local tc_status="init"
730 for seq in $list ; do
731 # Run report for previous test!
732 if [ "$tc_status" == "fail" ]; then
735 if $do_report && [[ ! $tc_status =~ ^(init|expunge)$ ]]; then
736 _make_testcase_report "$prev_seq" "$tc_status"
740 if [ ! -f $seq ]; then
741 # Try to get full name in case the user supplied only
742 # seq id and the test has a name. A bit of hassle to
743 # find really the test and not its sample output or
745 bname=$(basename $seq)
746 full_seq=$(find $(dirname $seq) -name $bname* -executable |
747 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
748 END { print shortest }')
749 if [ -f $full_seq ] && \
750 [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
755 # the filename for the test and the name output are different.
756 # we don't include the tests/ directory in the name output.
757 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
759 # Similarly, the result directory needs to replace the tests/
760 # part of the test location.
762 if $OPTIONS_HAVE_SECTIONS; then
763 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
764 REPORT_DIR="$RESULT_BASE/$section"
766 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
767 REPORT_DIR="$RESULT_BASE"
769 seqres="$REPORT_DIR/$seqnum"
772 rm -f ${RESULT_DIR}/require_scratch*
773 rm -f ${RESULT_DIR}/require_test*
777 _expunge_test $seqnum
778 if [ $? -eq 1 ]; then
791 if [ ! -f $seq ]; then
792 echo " - no such test?"
796 # really going to try and run this one
797 rm -f $seqres.out.bad $seqres.hints
799 # check if we really should run it
800 _expunge_test $seqnum
801 if [ $? -eq 1 ]; then
806 # record that we really tried to run this test.
809 awk 'BEGIN {lasttime=" "} \
810 $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \
811 END {printf "%s", lasttime}' "$check.time"
812 rm -f core $seqres.notrun
815 $timestamp && echo -n " ["`date "+%T"`"]"
816 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
817 $LOGGER_PROG "run xfstest $seqnum"
818 if [ -w /dev/kmsg ]; then
819 export date_time=`date +"%F %T"`
820 echo "run fstests $seqnum at $date_time" > /dev/kmsg
821 # _check_dmesg depends on this log in dmesg
822 touch ${RESULT_DIR}/check_dmesg
824 _try_wipe_scratch_devs > /dev/null 2>&1
826 # clear the WARN_ONCE state to allow a potential problem
827 # to be reported for each test
828 (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
830 if [ "$DUMP_OUTPUT" = true ]; then
831 _run_seq 2>&1 | tee $tmp.out
832 # Because $? would get tee's return code
835 _run_seq >$tmp.out 2>&1
840 _dump_err_cont "[dumped core]"
841 mv core $RESULT_BASE/$seqnum.core
845 if [ -f $seqres.notrun ]; then
846 $timestamp && _timestamp
848 $timestamp || echo -n "[not run] "
849 $timestamp && echo " [not run]" && \
850 echo -n " $seqnum -- "
855 # Unmount the scratch fs so that we can wipe the scratch
856 # dev state prior to the next test run.
857 _scratch_unmount 2> /dev/null
861 if [ $sts -ne 0 ]; then
862 _dump_err_cont "[failed, exit status $sts]"
863 _test_unmount 2> /dev/null
864 _scratch_unmount 2> /dev/null
865 rm -f ${RESULT_DIR}/require_test*
866 rm -f ${RESULT_DIR}/require_scratch*
869 # The test apparently passed, so check for corruption
870 # and log messages that shouldn't be there. Run the
871 # checking tools from a subshell with adjusted OOM
872 # score so that the OOM killer will target them instead
873 # of the check script itself.
874 (_adjust_oom_score 250; _check_filesystems) || tc_status="fail"
875 _check_dmesg || tc_status="fail"
878 # Reload the module after each test to check for leaks or
880 if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
881 _test_unmount 2> /dev/null
882 _scratch_unmount 2> /dev/null
883 modprobe -r fs-$FSTYP
887 # Scan for memory leaks after every test so that associating
888 # a leak to a particular test will be as accurate as possible.
889 _check_kmemleak || tc_status="fail"
891 # test ends after all checks are done.
892 $timestamp && _timestamp
895 if [ ! -f $seq.out ]; then
896 _dump_err "no qualified output"
901 # coreutils 8.16+ changed quote formats in error messages
902 # from `foo' to 'foo'. Filter old versions to match the new
904 sed -i "s/\`/\'/g" $tmp.out
905 if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
906 if [ "$tc_status" != "fail" ]; then
907 echo "$seqnum `expr $stop - $start`" >>$tmp.time
908 echo -n " `expr $stop - $start`s"
912 _dump_err "- output mismatch (see $seqres.out.bad)"
913 mv $tmp.out $seqres.out.bad
914 $diff $seq.out $seqres.out.bad | {
915 if test "$DIFF_LENGTH" -le 0; then
918 head -n "$DIFF_LENGTH"
920 echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
921 " to see the entire diff)"
922 fi; } | sed -e 's/^\(.\)/ \1/'
925 if [ -f $seqres.hints ]; then
926 if [ "$tc_status" == "fail" ]; then
935 # make sure we record the status of the last test we ran.
936 if [ "$tc_status" == "fail" ]; then
939 if $do_report && [[ ! $tc_status =~ ^(init|expunge)$ ]]; then
940 _make_testcase_report "$prev_seq" "$tc_status"
943 sect_stop=`_wallclock`
949 _test_unmount 2> /dev/null
950 _scratch_unmount 2> /dev/null
953 for ((iters = 0; iters < $iterations; iters++)) do
954 for section in $HOST_OPTIONS_SECTIONS; do
956 if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
958 status=`expr $sum_bad != 0`
965 status=`expr $sum_bad != 0`