status=0
needwrap=true
needsum=true
-n_try=0
-try=""
-n_bad=0
+try=()
sum_bad=0
-bad=""
-n_notrun=0
-notrun=""
+bad=()
+notrun=()
interrupt=true
diff="diff -u"
showme=false
DUMP_OUTPUT=false
iterations=1
istop=false
+loop_on_fail=0
+exclude_tests=()
# This is a global variable used to pass test failure text to reporting gunk
_err_msg=""
# start the initialisation work now
iam=check
+# mkfs.xfs uses the presence of both of these variables to enable formerly
+# supported tiny filesystem configurations that fstests use for fuzz testing
+# in a controlled environment
export MSGVERB="text:action"
export QA_CHECK_FS=${QA_CHECK_FS:=true}
# by default don't output timestamps
timestamp=${TIMESTAMP:=false}
-rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
+rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.report.* $tmp.arglist
SRC_GROUPS="generic shared"
export SRC_DIR="tests"
check options
-nfs test NFS
+ -afs test AFS
-glusterfs test GlusterFS
-cifs test CIFS
-9p test 9p
+ -fuse test fuse
-virtiofs test virtiofs
-overlay test overlay
-pvfs2 test PVFS2
-I <n> iterate the test list <n> times, but stops iterating further in case of any test failure
-d dump test output to stdout
-b brief test summary
- -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
+ -R fmt[,fmt] generate report in formats specified. Supported formats: xunit, xunit-quiet
--large-fs optimise scratch device for large filesystems
-s section run only specified section from config file
-S section exclude the specified section from the config file
+ -L <n> loop tests <n> times following a failure, measuring aggregate pass/fail metrics
testlist options
-g group[,group...] include tests from these groups
# the function from that list.
trim_test_list()
{
- test_list="$*"
+ local test_list="$*"
rm -f $tmp.grep
- numsed=0
+ local numsed=0
for t in $test_list
do
if [ $numsed -gt 100 ]; then
_timestamp()
{
- now=`date "+%T"`
+ local now=`date "+%T"`
echo -n " [$now]"
}
case "$1" in
-\? | -h | --help) usage ;;
- -nfs) FSTYP=nfs ;;
- -glusterfs) FSTYP=glusterfs ;;
- -cifs) FSTYP=cifs ;;
- -9p) FSTYP=9p ;;
- -virtiofs) FSTYP=virtiofs ;;
- -overlay) FSTYP=overlay; export OVERLAY=true ;;
- -pvfs2) FSTYP=pvfs2 ;;
- -tmpfs) FSTYP=tmpfs ;;
- -ubifs) FSTYP=ubifs ;;
+ -nfs|-afs|-glusterfs|-cifs|-9p|-fuse|-virtiofs|-pvfs2|-tmpfs|-ubifs)
+ FSTYP="${1:1}"
+ ;;
+ -overlay)
+ [ "$FSTYP" == overlay ] || export OVL_BASE_FSTYP="$FSTYP"
+ FSTYP=overlay
+ export OVERLAY=true
+ ;;
-g) group=$2 ; shift ;
GROUP_LIST="$GROUP_LIST ${group//,/ }"
;;
-e)
xfile=$2; shift ;
- echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
+ readarray -t -O "${#exclude_tests[@]}" exclude_tests < \
+ <(echo "$xfile" | tr ', ' '\n\n')
;;
-E) xfile=$2; shift ;
if [ -f $xfile ]; then
- sed "s/#.*$//" "$xfile" >> $tmp.xlist
- fi
+ readarray -t -O ${#exclude_tests[@]} exclude_tests < \
+ <(sed "s/#.*$//" $xfile)
+ fi
;;
-s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
-S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
;;
--large-fs) export LARGE_SCRATCH_DEV=yes ;;
--extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
+ -L) [[ $2 =~ ^[0-9]+$ ]] || usage
+ loop_on_fail=$2; shift
+ ;;
-*) usage ;;
*) # not an argument, we've got tests now.
exit 1
fi
+# If the test config specified a soak test duration, see if there are any
+# unit suffixes that need converting to an integer seconds count.
+if [ -n "$SOAK_DURATION" ]; then
+ SOAK_DURATION="$(echo "$SOAK_DURATION" | \
+ sed -e 's/^\([.0-9]*\)\([a-z]\)*/\1 \2/g' | \
+ $AWK_PROG -f $here/src/soak_duration.awk)"
+ if [ $? -ne 0 ]; then
+ status=1
+ exit 1
+ fi
+fi
+
if [ -n "$subdir_xfile" ]; then
for d in $SRC_GROUPS $FSTYP; do
[ -f $SRC_DIR/$d/$subdir_xfile ] || continue
for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
- echo $d/$f >> $tmp.xlist
+ exclude_tests+=($d/$f)
done
done
fi
*) # Expand test pattern (e.g. xfs/???, *fs/001)
list=$(cd $SRC_DIR; echo $1)
for t in $list; do
- test_dir=`dirname $t`
- test_dir=${test_dir#$SRC_DIR/*}
- test_name=`basename $t`
+ t=${t#$SRC_DIR/}
+ test_dir=${t%%/*}
+ test_name=${t##*/}
group_file=$SRC_DIR/$test_dir/group.list
- if egrep -q "^$test_name" $group_file; then
+ if grep -Eq "^$test_name" $group_file; then
# in group file ... OK
echo $SRC_DIR/$test_dir/$test_name \
>>$tmp.arglist
_wipe_counters()
{
- n_try="0"
- n_bad="0"
- n_notrun="0"
- unset try notrun bad
+ try=()
+ notrun=()
+ bad=()
}
_global_log() {
fi
}
+if [ -n "$REPORT_GCOV" ]; then
+ . ./common/gcov
+ _gcov_check_report_gcov
+fi
+
_wrapup()
{
seq="check"
check="$RESULT_BASE/check"
+ $interrupt && sect_stop=`_wallclock`
- if $showme; then
- if $needwrap; then
- if $do_report; then
- _make_section_report
- fi
- needwrap=false
+ if $showme && $needwrap; then
+ if $do_report; then
+ # $showme = all selected tests are notrun (no tries)
+ _make_section_report "$section" "${#notrun[*]}" "0" \
+ "${#notrun[*]}" \
+ "$((sect_stop - sect_start))"
fi
+ needwrap=false
elif $needwrap; then
if [ -f $check.time -a -f $tmp.time ]; then
cat $check.time $tmp.time \
echo "SECTION -- $section" >>$tmp.summary
echo "=========================" >>$tmp.summary
- if [ ! -z "$n_try" -a $n_try != 0 ]; then
+ if ((${#try[*]} > 0)); then
if [ $brief_test_summary == "false" ]; then
- echo "Ran:$try"
- echo "Ran:$try" >>$tmp.summary
+ echo "Ran: ${try[*]}"
+ echo "Ran: ${try[*]}" >>$tmp.summary
fi
- _global_log "Ran:$try"
+ _global_log "Ran: ${try[*]}"
fi
$interrupt && echo "Interrupted!" | tee -a $check.log
${REPORT_DIR}/check.log
fi
- if [ ! -z "$notrun" ]; then
+ if ((${#notrun[*]} > 0)); then
if [ $brief_test_summary == "false" ]; then
- echo "Not run:$notrun"
- echo "Not run:$notrun" >>$tmp.summary
+ echo "Not run: ${notrun[*]}"
+ echo "Not run: ${notrun[*]}" >>$tmp.summary
fi
- _global_log "Not run:$notrun"
+ _global_log "Not run: ${notrun[*]}"
fi
- if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
- echo "Failures:$bad"
- echo "Failed $n_bad of $n_try tests"
- _global_log "Failures:$bad"
- _global_log "Failed $n_bad of $n_try tests"
- echo "Failures:$bad" >>$tmp.summary
- echo "Failed $n_bad of $n_try tests" >>$tmp.summary
+ if ((${#bad[*]} > 0)); then
+ echo "Failures: ${bad[*]}"
+ echo "Failed ${#bad[*]} of ${#try[*]} tests"
+ _global_log "Failures: ${bad[*]}"
+ _global_log "Failed ${#bad[*]} of ${#try[*]} tests"
+ echo "Failures: ${bad[*]}" >>$tmp.summary
+ echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary
else
- echo "Passed all $n_try tests"
- _global_log "Passed all $n_try tests"
- echo "Passed all $n_try tests" >>$tmp.summary
+ echo "Passed all ${#try[*]} tests"
+ _global_log "Passed all ${#try[*]} tests"
+ echo "Passed all ${#try[*]} tests" >>$tmp.summary
fi
echo "" >>$tmp.summary
if $do_report; then
- _make_section_report
+ _make_section_report "$section" "${#try[*]}" \
+ "${#bad[*]}" "${#notrun[*]}" \
+ "$((sect_stop - sect_start))"
+ fi
+
+ # Generate code coverage report
+ if [ -n "$REPORT_GCOV" ]; then
+ # don't trigger multiple times if caller hits ^C
+ local gcov_report_dir="$REPORT_GCOV"
+ test "$gcov_report_dir" = "1" && \
+ gcov_report_dir="$REPORT_DIR/gcov"
+ unset REPORT_GCOV
+
+ _gcov_generate_report "$gcov_report_dir"
fi
+
needwrap=false
fi
- sum_bad=`expr $sum_bad + $n_bad`
+ sum_bad=`expr $sum_bad + ${#bad[*]}`
_wipe_counters
rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
if ! $OPTIONS_HAVE_SECTIONS; then
local ret=0
if [ -f ${RESULT_DIR}/require_test ]; then
- _check_test_fs || ret=1
+ if ! _check_test_fs ; then
+ ret=1
+ echo "Trying to repair broken TEST_DEV file system"
+ _repair_test_fs
+ _test_mount
+ fi
rm -f ${RESULT_DIR}/require_test*
else
_test_unmount 2> /dev/null
_expunge_test()
{
local TEST_ID="$1"
- if [ -s $tmp.xlist ]; then
- if grep -q $TEST_ID $tmp.xlist; then
+
+ for f in "${exclude_tests[@]}"; do
+ # $f may contain traling spaces and comments
+ local id_regex="^${TEST_ID}\b"
+ if [[ "$f" =~ ${id_regex} ]]; then
echo " [expunged]"
- return 1
+ return 0
fi
+ done
+ return 1
+}
+
+# retain files which would be overwritten in subsequent reruns of the same test
+_stash_fail_loop_files() {
+ local seq_prefix="${REPORT_DIR}/${1}"
+ local cp_suffix="$2"
+
+ for i in ".full" ".dmesg" ".out.bad" ".notrun" ".core" ".hints"; do
+ rm -f "${seq_prefix}${i}${cp_suffix}"
+ if [ -f "${seq_prefix}${i}" ]; then
+ cp "${seq_prefix}${i}" "${seq_prefix}${i}${cp_suffix}"
+ fi
+ done
+}
+
+# Retain in @bad / @notrun the result of the just-run @test_seq. @try array
+# entries are added prior to execution.
+_stash_test_status() {
+ local test_seq="$1"
+ local test_status="$2"
+
+ if $do_report && [[ $test_status != "expunge" ]]; then
+ _make_testcase_report "$section" "$test_seq" \
+ "$test_status" "$((stop - start))"
+ fi
+
+ if ((${#loop_status[*]} > 0)); then
+ # continuing or completing rerun-on-failure loop
+ _stash_fail_loop_files "$test_seq" ".rerun${#loop_status[*]}"
+ loop_status+=("$test_status")
+ if ((${#loop_status[*]} > loop_on_fail)); then
+ printf "%s aggregate results across %d runs: " \
+ "$test_seq" "${#loop_status[*]}"
+ awk "BEGIN {
+ n=split(\"${loop_status[*]}\", arr);"'
+ for (i = 1; i <= n; i++)
+ stats[arr[i]]++;
+ for (x in stats)
+ printf("%s=%d (%.1f%%)",
+ (i-- > n ? x : ", " x),
+ stats[x], 100 * stats[x] / n);
+ }'
+ echo
+ loop_status=()
+ fi
+ return # only stash @bad result for initial failure in loop
fi
- return 0
+
+ case "$test_status" in
+ fail)
+ if ((loop_on_fail > 0)); then
+ # initial failure, start rerun-on-failure loop
+ _stash_fail_loop_files "$test_seq" ".rerun0"
+ loop_status+=("$test_status")
+ fi
+ bad+=("$test_seq")
+ ;;
+ list|notrun)
+ notrun+=("$test_seq")
+ ;;
+ pass|expunge)
+ ;;
+ *)
+ echo "Unexpected test $test_seq status: $test_status"
+ ;;
+ esac
}
# Can we run systemd scopes?
function _adjust_oom_score() {
test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
}
-_adjust_oom_score -1000
+_adjust_oom_score -500
# ...and make the tests themselves somewhat more attractive to it, so that if
# the system runs out of memory it'll be the test that gets killed and not the
_detect_kmemleak
_prepare_test_list
+fstests_start_time="$(date +"%F %T")"
if $OPTIONS_HAVE_SECTIONS; then
trap "_summary; exit \$status" 0 1 2 3 15
function run_section()
{
- local section=$1
+ local section=$1 skip
OLD_FSTYP=$FSTYP
OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
- get_next_config $section
# Do we need to run only some sections ?
if [ ! -z "$RUN_SECTION" ]; then
fi
fi
+ get_next_config $section
+ _canonicalize_devices
+
mkdir -p $RESULT_BASE
if [ ! -d $RESULT_BASE ]; then
echo "failed to create results directory $RESULT_BASE"
echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
fi
echo
+ test -n "$REPORT_GCOV" && _gcov_reset
needwrap=true
if [ ! -z "$SCRATCH_DEV" ]; then
seqres="$check"
_check_test_fs
- err=false
- first_test=true
- prev_seq=""
- for seq in $list ; do
- # Run report for previous test!
- if $err ; then
- bad="$bad $seqnum"
- n_bad=`expr $n_bad + 1`
- tc_status="fail"
- fi
- if $do_report && ! $first_test ; then
- if [ $tc_status != "expunge" ] ; then
- _make_testcase_report "$prev_seq" "$tc_status"
- fi
- fi
- first_test=false
+ loop_status=() # track rerun-on-failure state
+ local tc_status ix
+ local -a _list=( $list )
+ for ((ix = 0; ix < ${#_list[*]}; !${#loop_status[*]} && ix++)); do
+ seq="${_list[$ix]}"
- err=false
- prev_seq="$seq"
if [ ! -f $seq ]; then
# Try to get full name in case the user supplied only
# seq id and the test has a name. A bit of hassle to
# the filename for the test and the name output are different.
# we don't include the tests/ directory in the name output.
- export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
-
- # Similarly, the result directory needs to replace the tests/
- # part of the test location.
- group=`dirname $seq`
+ export seqnum=${seq#$SRC_DIR/}
+ group=${seqnum%%/*}
if $OPTIONS_HAVE_SECTIONS; then
- export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
REPORT_DIR="$RESULT_BASE/$section"
else
- export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
REPORT_DIR="$RESULT_BASE"
fi
+ export RESULT_DIR="$REPORT_DIR/$group"
seqres="$REPORT_DIR/$seqnum"
- mkdir -p $RESULT_DIR
- rm -f ${RESULT_DIR}/require_scratch*
- rm -f ${RESULT_DIR}/require_test*
+ # Generate the entire section report with whatever test results
+ # we have so far. Leave the $sect_time parameter empty so that
+ # it's a little more obvious that this test run is incomplete.
+ if $do_report; then
+ _make_section_report "$section" "${#try[*]}" \
+ "${#bad[*]}" "${#notrun[*]}" \
+ "" &> /dev/null
+ fi
+
echo -n "$seqnum"
if $showme; then
- _expunge_test $seqnum
- if [ $? -eq 1 ]; then
- tc_status="expunge"
- continue
+ if _expunge_test $seqnum; then
+ tc_status="expunge"
+ else
+ echo
+ start=0
+ stop=0
+ tc_status="list"
fi
- echo
- start=0
- stop=0
- tc_status="list"
- n_notrun=`expr $n_notrun + 1`
+ _stash_test_status "$seqnum" "$tc_status"
continue
fi
tc_status="pass"
if [ ! -f $seq ]; then
echo " - no such test?"
+ _stash_test_status "$seqnum" "$tc_status"
continue
fi
# really going to try and run this one
- rm -f $seqres.out.bad
+ mkdir -p $RESULT_DIR
+ rm -f ${RESULT_DIR}/require_scratch*
+ rm -f ${RESULT_DIR}/require_test*
+ rm -f $seqres.out.bad $seqres.hints
# check if we really should run it
- _expunge_test $seqnum
- if [ $? -eq 1 ]; then
+ if _expunge_test $seqnum; then
tc_status="expunge"
+ _stash_test_status "$seqnum" "$tc_status"
continue
fi
# record that we really tried to run this test.
- try="$try $seqnum"
- n_try=`expr $n_try + 1`
-
- # slashes now in names, sed barfs on them so use grep
- lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
- if [ "X$lasttime" != X ]; then
- echo -n " ${lasttime}s ... "
- else
- echo -n " " # prettier output with timestamps.
+ if ((!${#loop_status[*]})); then
+ try+=("$seqnum")
fi
+
+ awk 'BEGIN {lasttime=" "} \
+ $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \
+ END {printf "%s", lasttime}' "$check.time"
rm -f core $seqres.notrun
start=`_wallclock`
- $timestamp && echo -n " ["`date "+%T"`"]"
+ $timestamp && _timestamp
[ ! -x $seq ] && chmod u+x $seq # ensure we can run it
$LOGGER_PROG "run xfstest $seqnum"
if [ -w /dev/kmsg ]; then
echo "run fstests $seqnum at $date_time" > /dev/kmsg
# _check_dmesg depends on this log in dmesg
touch ${RESULT_DIR}/check_dmesg
+ rm -f ${RESULT_DIR}/dmesg_filter
fi
_try_wipe_scratch_devs > /dev/null 2>&1
# to be reported for each test
(echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
+ test_start_time="$(date +"%F %T")"
if [ "$DUMP_OUTPUT" = true ]; then
_run_seq 2>&1 | tee $tmp.out
# Because $? would get tee's return code
sts=$?
fi
- if [ -f core ]; then
- _dump_err_cont "[dumped core]"
- mv core $RESULT_BASE/$seqnum.core
- err=true
- fi
+ # If someone sets kernel.core_pattern or kernel.core_uses_pid,
+ # coredumps generated by fstests might have a longer name than
+ # just "core". Use globbing to find the most common patterns,
+ # assuming there are no other coredump capture packages set up.
+ local cores=0
+ for i in core core.*; do
+ test -f "$i" || continue
+ if ((cores++ == 0)); then
+ _dump_err_cont "[dumped core]"
+ fi
+ (_adjust_oom_score 250; _save_coredump "$i")
+ tc_status="fail"
+ done
if [ -f $seqres.notrun ]; then
$timestamp && _timestamp
$timestamp && echo " [not run]" && \
echo -n " $seqnum -- "
cat $seqres.notrun
- notrun="$notrun $seqnum"
- n_notrun=`expr $n_notrun + 1`
tc_status="notrun"
+ _stash_test_status "$seqnum" "$tc_status"
# Unmount the scratch fs so that we can wipe the scratch
# dev state prior to the next test run.
_scratch_unmount 2> /dev/null
rm -f ${RESULT_DIR}/require_test*
rm -f ${RESULT_DIR}/require_scratch*
- err=true
+ # Even though we failed, there may be something interesting in
+ # dmesg which can help debugging.
+ _check_dmesg
+ (_adjust_oom_score 250; _check_filesystems)
+ tc_status="fail"
else
# The test apparently passed, so check for corruption
# and log messages that shouldn't be there. Run the
# checking tools from a subshell with adjusted OOM
# score so that the OOM killer will target them instead
# of the check script itself.
- (_adjust_oom_score 250; _check_filesystems) || err=true
- _check_dmesg || err=true
+ (_adjust_oom_score 250; _check_filesystems) || tc_status="fail"
+ _check_dmesg || tc_status="fail"
+
+ # Save any coredumps from the post-test fs checks
+ for i in core core.*; do
+ test -f "$i" || continue
+ if ((cores++ == 0)); then
+ _dump_err_cont "[dumped core]"
+ fi
+ (_adjust_oom_score 250; _save_coredump "$i")
+ tc_status="fail"
+ done
fi
# Reload the module after each test to check for leaks or
# Scan for memory leaks after every test so that associating
# a leak to a particular test will be as accurate as possible.
- _check_kmemleak || err=true
+ _check_kmemleak || tc_status="fail"
# test ends after all checks are done.
$timestamp && _timestamp
if [ ! -f $seq.out ]; then
_dump_err "no qualified output"
- err=true
+ tc_status="fail"
+ _stash_test_status "$seqnum" "$tc_status"
continue;
fi
# version.
sed -i "s/\`/\'/g" $tmp.out
if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
- if ! $err ; then
+ if [ "$tc_status" != "fail" ]; then
echo "$seqnum `expr $stop - $start`" >>$tmp.time
echo -n " `expr $stop - $start`s"
fi
echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
" to see the entire diff)"
fi; } | sed -e 's/^\(.\)/ \1/'
- err=true
+ tc_status="fail"
fi
- done
-
- # make sure we record the status of the last test we ran.
- if $err ; then
- bad="$bad $seqnum"
- n_bad=`expr $n_bad + 1`
- tc_status="fail"
- fi
- if $do_report && ! $first_test ; then
- if [ $tc_status != "expunge" ] ; then
- _make_testcase_report "$prev_seq" "$tc_status"
+ if [ -f $seqres.hints ]; then
+ if [ "$tc_status" == "fail" ]; then
+ echo
+ cat $seqres.hints
+ else
+ rm -f $seqres.hints
+ fi
fi
- fi
+ _stash_test_status "$seqnum" "$tc_status"
+ done
sect_stop=`_wallclock`
interrupt=false