#!/bin/bash
-#
-# Control script for QA
-#
+# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it would be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-#
+# Control script for QA
#
-
tmp=/tmp/$$
status=0
needwrap=true
needsum=true
-n_try=0
-try=""
-n_bad=0
+try=()
sum_bad=0
-bad=""
-n_notrun=0
-notrun=""
+bad=()
+notrun=()
interrupt=true
diff="diff -u"
showme=false
have_test_arg=false
randomize=false
+exact_order=false
export here=`pwd`
xfile=""
+subdir_xfile=""
brief_test_summary=false
-_err_msg=""
do_report=false
DUMP_OUTPUT=false
+iterations=1
+istop=false
+loop_on_fail=0
+exclude_tests=()
+
+# This is a global variable used to pass test failure text to reporting gunk
+_err_msg=""
# start the initialisation work now
iam=check
+# mkfs.xfs uses the presence of both of these variables to enable formerly
+# supported tiny filesystem configurations that fstests use for fuzz testing
+# in a controlled environment
export MSGVERB="text:action"
export QA_CHECK_FS=${QA_CHECK_FS:=true}
# by default don't output timestamps
timestamp=${TIMESTAMP:=false}
-rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.*
+rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.report.* $tmp.arglist
SRC_GROUPS="generic shared"
export SRC_DIR="tests"
echo "Usage: $0 [options] [testlist]"'
check options
- -nfs test NFS
- -glusterfs test GlusterFS
- -cifs test CIFS
+ -nfs test NFS
+ -afs test AFS
+ -glusterfs test GlusterFS
+ -cifs test CIFS
-9p test 9p
+ -fuse test fuse
+ -virtiofs test virtiofs
-overlay test overlay
- -pvfs2 test PVFS2
- -tmpfs test TMPFS
- -ubifs test ubifs
+ -pvfs2 test PVFS2
+ -tmpfs test TMPFS
+ -ubifs test ubifs
-l line mode diff
-udiff show unified diff (default)
-n show me, do not run tests
-T output timestamps
-r randomize test order
+ --exact-order run tests in the exact order specified
+ -i <n> iterate the test list <n> times
+ -I <n> iterate the test list <n> times, but stops iterating further in case of any test failure
-d dump test output to stdout
-b brief test summary
- -R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
+ -R fmt[,fmt] generate report in formats specified. Supported formats: xunit, xunit-quiet
--large-fs optimise scratch device for large filesystems
-s section run only specified section from config file
-S section exclude the specified section from the config file
+ -L <n> loop tests <n> times following a failure, measuring aggregate pass/fail metrics
testlist options
-g group[,group...] include tests from these groups
-x group[,group...] exclude tests from these groups
-X exclude_file exclude individual tests
+ -e testlist exclude a specific list of tests
-E external_file exclude individual tests
[testlist] include tests matching names in testlist
check -X .exclude -g auto
check -E ~/.xfstests.exclude
'
- exit 0
+ exit 1
}
get_sub_group_list()
local d=$1
local grp=$2
- test -s "$SRC_DIR/$d/group" || return 1
+ test -s "$SRC_DIR/$d/group.list" || return 1
- local grpl=$(sed -n < $SRC_DIR/$d/group \
+ local grpl=$(sed -n < $SRC_DIR/$d/group.list \
-e 's/#.*//' \
-e 's/$/ /' \
-e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
local grp=$1
local grpl=""
local sub=$(dirname $grp)
+ local fsgroup="$FSTYP"
if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
# group is given as <subdir>/<group> (e.g. xfs/quick)
return
fi
- for d in $SRC_GROUPS $FSTYP; do
+ if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
+ fsgroup=ext4
+ fi
+ for d in $SRC_GROUPS $fsgroup; do
if ! test -d "$SRC_DIR/$d" ; then
continue
fi
# the function from that list.
trim_test_list()
{
- test_list="$*"
+ local test_list="$*"
rm -f $tmp.grep
- numsed=0
+ local numsed=0
for t in $test_list
do
if [ $numsed -gt 100 ]; then
_timestamp()
{
- now=`date "+%T"`
+ local now=`date "+%T"`
echo -n " [$now]"
}
list=$(get_group_list $xgroup)
if [ -z "$list" ]; then
echo "Group \"$xgroup\" is empty or not defined?"
- exit 1
+ continue
fi
trim_test_list $list
done
- # sort the list of tests into numeric order
- list=`sort -n $tmp.list | uniq`
- rm -f $tmp.list
-
- if $randomize
- then
- list=`echo $list | awk -f randomize.awk`
+ # sort the list of tests into numeric order unless we're running tests
+ # in the exact order specified
+ if ! $exact_order; then
+ if $randomize; then
+ if type shuf >& /dev/null; then
+ sorter="shuf"
+ else
+ sorter="awk -v seed=$RANDOM -f randomize.awk"
+ fi
+ else
+ sorter="cat"
+ fi
+ list=`sort -n $tmp.list | uniq | $sorter`
+ else
+ list=`cat $tmp.list`
fi
+ rm -f $tmp.list
}
# Process command arguments first.
case "$1" in
-\? | -h | --help) usage ;;
- -nfs) FSTYP=nfs ;;
- -glusterfs) FSTYP=glusterfs ;;
- -cifs) FSTYP=cifs ;;
- -9p) FSTYP=9p ;;
- -overlay) FSTYP=overlay; export OVERLAY=true ;;
- -pvfs2) FSTYP=pvfs2 ;;
- -tmpfs) FSTYP=tmpfs ;;
- -ubifs) FSTYP=ubifs ;;
+ -nfs|-afs|-glusterfs|-cifs|-9p|-fuse|-virtiofs|-pvfs2|-tmpfs|-ubifs)
+ FSTYP="${1:1}"
+ ;;
+ -overlay)
+ [ "$FSTYP" == overlay ] || export OVL_BASE_FSTYP="$FSTYP"
+ FSTYP=overlay
+ export OVERLAY=true
+ ;;
-g) group=$2 ; shift ;
GROUP_LIST="$GROUP_LIST ${group//,/ }"
XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
;;
- -X) xfile=$2; shift ;
- for d in $SRC_GROUPS $FSTYP; do
- [ -f $SRC_DIR/$d/$xfile ] || continue
- for f in `sed "s/#.*$//" $SRC_DIR/$d/$xfile`; do
- echo $d/$f >> $tmp.xlist
- done
- done
+ -X) subdir_xfile=$2; shift ;
+ ;;
+ -e)
+ xfile=$2; shift ;
+ readarray -t -O "${#exclude_tests[@]}" exclude_tests < \
+ <(echo "$xfile" | tr ', ' '\n\n')
;;
+
-E) xfile=$2; shift ;
if [ -f $xfile ]; then
- sed "s/#.*$//" "$xfile" >> $tmp.xlist
- fi
+ readarray -t -O ${#exclude_tests[@]} exclude_tests < \
+ <(sed "s/#.*$//" $xfile)
+ fi
;;
-s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
-S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
-udiff) diff="$diff -u" ;;
-n) showme=true ;;
- -r) randomize=true ;;
-
+ -r)
+ if $exact_order; then
+ echo "Cannot specify -r and --exact-order."
+ exit 1
+ fi
+ randomize=true
+ ;;
+ --exact-order)
+ if $randomize; then
+ echo "Cannnot specify --exact-order and -r."
+ exit 1
+ fi
+ exact_order=true
+ ;;
+ -i) iterations=$2; shift ;;
+ -I) iterations=$2; istop=true; shift ;;
-T) timestamp=true ;;
-d) DUMP_OUTPUT=true ;;
-b) brief_test_summary=true;;
;;
--large-fs) export LARGE_SCRATCH_DEV=yes ;;
--extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
+ -L) [[ $2 =~ ^[0-9]+$ ]] || usage
+ loop_on_fail=$2; shift
+ ;;
-*) usage ;;
*) # not an argument, we've got tests now.
shift
done
-# we need common/config, source it after processing args, overlay needs FSTYP
-# set before sourcing common/config
-if ! . ./common/config; then
- echo "$iam: failed to source common/config"
+# we need common/rc, that also sources common/config. We need to source it
+# after processing args, overlay needs FSTYP set before sourcing common/config
+if ! . ./common/rc; then
+ echo "check: failed to source common/rc"
exit 1
fi
+# If the test config specified a soak test duration, see if there are any
+# unit suffixes that need converting to an integer seconds count.
+if [ -n "$SOAK_DURATION" ]; then
+ SOAK_DURATION="$(echo "$SOAK_DURATION" | \
+ sed -e 's/^\([.0-9]*\)\([a-z]\)*/\1 \2/g' | \
+ $AWK_PROG -f $here/src/soak_duration.awk)"
+ if [ $? -ne 0 ]; then
+ status=1
+ exit 1
+ fi
+fi
+
+if [ -n "$subdir_xfile" ]; then
+ for d in $SRC_GROUPS $FSTYP; do
+ [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
+ for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
+ exclude_tests+=($d/$f)
+ done
+ done
+fi
+
# Process tests from command line now.
if $have_test_arg; then
while [ $# -gt 0 ]; do
*) # Expand test pattern (e.g. xfs/???, *fs/001)
list=$(cd $SRC_DIR; echo $1)
for t in $list; do
- test_dir=`dirname $t`
- test_dir=${test_dir#$SRC_DIR/*}
- test_name=`basename $t`
- group_file=$SRC_DIR/$test_dir/group
+ t=${t#$SRC_DIR/}
+ test_dir=${t%%/*}
+ test_name=${t##*/}
+ group_file=$SRC_DIR/$test_dir/group.list
- if egrep -q "^$test_name" $group_file; then
+ if grep -Eq "^$test_name" $group_file; then
# in group file ... OK
echo $SRC_DIR/$test_dir/$test_name \
>>$tmp.arglist
GROUP_LIST="auto"
fi
-# we need common/rc
-if ! . ./common/rc
-then
- echo "check: failed to source common/rc"
- exit 1
-fi
-
if [ `id -u` -ne 0 ]
then
echo "check: QA must be run as root"
_wipe_counters()
{
- n_try="0"
- n_bad="0"
- n_notrun="0"
- unset try notrun bad
+ try=()
+ notrun=()
+ bad=()
}
+_global_log() {
+ echo "$1" >> $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ echo "$1" >> ${REPORT_DIR}/check.log
+ fi
+}
+
+if [ -n "$REPORT_GCOV" ]; then
+ . ./common/gcov
+ _gcov_check_report_gcov
+fi
+
_wrapup()
{
seq="check"
check="$RESULT_BASE/check"
+ $interrupt && sect_stop=`_wallclock`
- if $showme; then
- if $needwrap; then
- if $do_report; then
- _make_section_report
- fi
- needwrap=false
+ if $showme && $needwrap; then
+ if $do_report; then
+ # $showme = all selected tests are notrun (no tries)
+ _make_section_report "$section" "${#notrun[*]}" "0" \
+ "${#notrun[*]}" \
+ "$((sect_stop - sect_start))"
fi
+ needwrap=false
elif $needwrap; then
if [ -f $check.time -a -f $tmp.time ]; then
cat $check.time $tmp.time \
}' \
| sort -n >$tmp.out
mv $tmp.out $check.time
+ if $OPTIONS_HAVE_SECTIONS; then
+ cp $check.time ${REPORT_DIR}/check.time
+ fi
fi
- echo "" >>$check.log
- date >>$check.log
+ _global_log ""
+ _global_log "$(date)"
echo "SECTION -- $section" >>$tmp.summary
echo "=========================" >>$tmp.summary
- if [ ! -z "$n_try" -a $n_try != 0 ]; then
+ if ((${#try[*]} > 0)); then
if [ $brief_test_summary == "false" ]; then
- echo "Ran:$try"
- echo "Ran:$try" >>$tmp.summary
+ echo "Ran: ${try[*]}"
+ echo "Ran: ${try[*]}" >>$tmp.summary
fi
- echo "Ran:$try" >>$check.log
+ _global_log "Ran: ${try[*]}"
fi
$interrupt && echo "Interrupted!" | tee -a $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ $interrupt && echo "Interrupted!" | tee -a \
+ ${REPORT_DIR}/check.log
+ fi
- if [ ! -z "$notrun" ]; then
+ if ((${#notrun[*]} > 0)); then
if [ $brief_test_summary == "false" ]; then
- echo "Not run:$notrun"
- echo "Not run:$notrun" >>$tmp.summary
+ echo "Not run: ${notrun[*]}"
+ echo "Not run: ${notrun[*]}" >>$tmp.summary
fi
- echo "Not run:$notrun" >>$check.log
+ _global_log "Not run: ${notrun[*]}"
fi
- if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
- echo "Failures:$bad"
- echo "Failed $n_bad of $n_try tests"
- echo "Failures:$bad" >>$check.log
- echo "Failed $n_bad of $n_try tests" >>$check.log
- echo "Failures:$bad" >>$tmp.summary
- echo "Failed $n_bad of $n_try tests" >>$tmp.summary
+ if ((${#bad[*]} > 0)); then
+ echo "Failures: ${bad[*]}"
+ echo "Failed ${#bad[*]} of ${#try[*]} tests"
+ _global_log "Failures: ${bad[*]}"
+ _global_log "Failed ${#bad[*]} of ${#try[*]} tests"
+ echo "Failures: ${bad[*]}" >>$tmp.summary
+ echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary
else
- echo "Passed all $n_try tests"
- echo "Passed all $n_try tests" >>$check.log
- echo "Passed all $n_try tests" >>$tmp.summary
+ echo "Passed all ${#try[*]} tests"
+ _global_log "Passed all ${#try[*]} tests"
+ echo "Passed all ${#try[*]} tests" >>$tmp.summary
fi
echo "" >>$tmp.summary
if $do_report; then
- _make_section_report
+ _make_section_report "$section" "${#try[*]}" \
+ "${#bad[*]}" "${#notrun[*]}" \
+ "$((sect_stop - sect_start))"
fi
+
+ # Generate code coverage report
+ if [ -n "$REPORT_GCOV" ]; then
+ # don't trigger multiple times if caller hits ^C
+ local gcov_report_dir="$REPORT_GCOV"
+ test "$gcov_report_dir" = "1" && \
+ gcov_report_dir="$REPORT_DIR/gcov"
+ unset REPORT_GCOV
+
+ _gcov_generate_report "$gcov_report_dir"
+ fi
+
needwrap=false
fi
- sum_bad=`expr $sum_bad + $n_bad`
+ sum_bad=`expr $sum_bad + ${#bad[*]}`
_wipe_counters
rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
if ! $OPTIONS_HAVE_SECTIONS; then
_check_filesystems()
{
+ local ret=0
+
if [ -f ${RESULT_DIR}/require_test ]; then
- _check_test_fs || err=true
+ if ! _check_test_fs ; then
+ ret=1
+ echo "Trying to repair broken TEST_DEV file system"
+ _repair_test_fs
+ _test_mount
+ fi
rm -f ${RESULT_DIR}/require_test*
else
_test_unmount 2> /dev/null
fi
if [ -f ${RESULT_DIR}/require_scratch ]; then
- _check_scratch_fs || err=true
+ _check_scratch_fs || ret=1
rm -f ${RESULT_DIR}/require_scratch*
- else
- _scratch_unmount 2> /dev/null
fi
+ _scratch_unmount 2> /dev/null
+ return $ret
}
_expunge_test()
{
local TEST_ID="$1"
- if [ -s $tmp.xlist ]; then
- if grep -q $TEST_ID $tmp.xlist; then
+
+ for f in "${exclude_tests[@]}"; do
+ # $f may contain traling spaces and comments
+ local id_regex="^${TEST_ID}\b"
+ if [[ "$f" =~ ${id_regex} ]]; then
echo " [expunged]"
- return 1
+ return 0
+ fi
+ done
+ return 1
+}
+
+# retain files which would be overwritten in subsequent reruns of the same test
+_stash_fail_loop_files() {
+ local seq_prefix="${REPORT_DIR}/${1}"
+ local cp_suffix="$2"
+
+ for i in ".full" ".dmesg" ".out.bad" ".notrun" ".core" ".hints"; do
+ rm -f "${seq_prefix}${i}${cp_suffix}"
+ if [ -f "${seq_prefix}${i}" ]; then
+ cp "${seq_prefix}${i}" "${seq_prefix}${i}${cp_suffix}"
+ fi
+ done
+}
+
+# Retain in @bad / @notrun the result of the just-run @test_seq. @try array
+# entries are added prior to execution.
+_stash_test_status() {
+ local test_seq="$1"
+ local test_status="$2"
+
+ if $do_report && [[ $test_status != "expunge" ]]; then
+ _make_testcase_report "$section" "$test_seq" \
+ "$test_status" "$((stop - start))"
+ fi
+
+ if ((${#loop_status[*]} > 0)); then
+ # continuing or completing rerun-on-failure loop
+ _stash_fail_loop_files "$test_seq" ".rerun${#loop_status[*]}"
+ loop_status+=("$test_status")
+ if ((${#loop_status[*]} > loop_on_fail)); then
+ printf "%s aggregate results across %d runs: " \
+ "$test_seq" "${#loop_status[*]}"
+ awk "BEGIN {
+ n=split(\"${loop_status[*]}\", arr);"'
+ for (i = 1; i <= n; i++)
+ stats[arr[i]]++;
+ for (x in stats)
+ printf("%s=%d (%.1f%%)",
+ (i-- > n ? x : ", " x),
+ stats[x], 100 * stats[x] / n);
+ }'
+ echo
+ loop_status=()
fi
+ return # only stash @bad result for initial failure in loop
fi
- return 0
+
+ case "$test_status" in
+ fail)
+ if ((loop_on_fail > 0)); then
+ # initial failure, start rerun-on-failure loop
+ _stash_fail_loop_files "$test_seq" ".rerun0"
+ loop_status+=("$test_status")
+ fi
+ bad+=("$test_seq")
+ ;;
+ list|notrun)
+ notrun+=("$test_seq")
+ ;;
+ pass|expunge)
+ ;;
+ *)
+ echo "Unexpected test $test_seq status: $test_status"
+ ;;
+ esac
}
-_init_kmemleak
+# Can we run systemd scopes?
+HAVE_SYSTEMD_SCOPES=
+systemctl reset-failed "fstests-check" &>/dev/null
+systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
+test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
+
+# Make the check script unattractive to the OOM killer...
+OOM_SCORE_ADJ="/proc/self/oom_score_adj"
+function _adjust_oom_score() {
+ test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
+}
+_adjust_oom_score -500
+
+# ...and make the tests themselves somewhat more attractive to it, so that if
+# the system runs out of memory it'll be the test that gets killed and not the
+# test framework. The test is run in a separate process without any of our
+# functions, so we open-code adjusting the OOM score.
+#
+# If systemd is available, run the entire test script in a scope so that we can
+# kill all subprocesses of the test if it fails to clean up after itself. This
+# is essential for ensuring that the post-test unmount succeeds. Note that
+# systemd doesn't automatically remove transient scopes that fail to terminate
+# when systemd tells them to terminate (e.g. programs stuck in D state when
+# systemd sends SIGKILL), so we use reset-failed to tear down the scope.
+_run_seq() {
+ local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
+
+ if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
+ local unit="$(systemd-escape "fs$seq").scope"
+ systemctl reset-failed "${unit}" &> /dev/null
+ systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
+ res=$?
+ systemctl stop "${unit}" &> /dev/null
+ return "${res}"
+ else
+ "${cmd[@]}"
+ fi
+}
+
+_detect_kmemleak
_prepare_test_list
+fstests_start_time="$(date +"%F %T")"
if $OPTIONS_HAVE_SECTIONS; then
trap "_summary; exit \$status" 0 1 2 3 15
trap "_wrapup; exit \$status" 0 1 2 3 15
fi
-for section in $HOST_OPTIONS_SECTIONS; do
+function run_section()
+{
+ local section=$1 skip
+
OLD_FSTYP=$FSTYP
OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
- get_next_config $section
# Do we need to run only some sections ?
if [ ! -z "$RUN_SECTION" ]; then
fi
done
if $skip; then
- continue
+ return
fi
fi
fi
done
if $skip; then
- continue
+ return
fi
fi
+ get_next_config $section
+ _canonicalize_devices
+
mkdir -p $RESULT_BASE
if [ ! -d $RESULT_BASE ]; then
echo "failed to create results directory $RESULT_BASE"
status=1
exit
fi
+ # TEST_DEV has been recreated, previous FSTYP derived from
+ # TEST_DEV could be changed, source common/rc again with
+ # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
+ . common/rc
_prepare_test_list
elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
_test_unmount 2> /dev/null
echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
fi
echo
+ test -n "$REPORT_GCOV" && _gcov_reset
needwrap=true
if [ ! -z "$SCRATCH_DEV" ]; then
# call the overridden mount - make sure the FS mounts with
# the same options that we'll mount with later.
- if ! _scratch_mount >$tmp.err 2>&1
+ if ! _try_scratch_mount >$tmp.err 2>&1
then
echo "our local mount routine ..."
cat $tmp.err
echo "check: failed to mount \$SCRATCH_DEV using specified options"
status=1
exit
+ else
+ _scratch_unmount
fi
fi
seqres="$check"
_check_test_fs
- for seq in $list
- do
- err=false
- _err_msg=""
- if [ ! -f $seq ]; then
- # Try to get full name in case the user supplied only seq id
- # and the test has a name. A bit of hassle to find really
- # the test and not its sample output or helping files.
- bname=$(basename $seq)
- full_seq=$(find $(dirname $seq) -name $bname* -executable |
- awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
- END { print shortest }')
- if [ -f $full_seq ] \
- && [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
- seq=$full_seq
- fi
- fi
+ loop_status=() # track rerun-on-failure state
+ local tc_status ix
+ local -a _list=( $list )
+ for ((ix = 0; ix < ${#_list[*]}; !${#loop_status[*]} && ix++)); do
+ seq="${_list[$ix]}"
+
+ if [ ! -f $seq ]; then
+ # Try to get full name in case the user supplied only
+ # seq id and the test has a name. A bit of hassle to
+ # find really the test and not its sample output or
+ # helping files.
+ bname=$(basename $seq)
+ full_seq=$(find $(dirname $seq) -name $bname* -executable |
+ awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
+ END { print shortest }')
+ if [ -f $full_seq ] && \
+ [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
+ seq=$full_seq
+ fi
+ fi
- # the filename for the test and the name output are different.
- # we don't include the tests/ directory in the name output.
- export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
-
- # Similarly, the result directory needs to replace the tests/
- # part of the test location.
- group=`dirname $seq`
- if $OPTIONS_HAVE_SECTIONS; then
- export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
- REPORT_DIR="$RESULT_BASE/$section"
- else
- export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
- REPORT_DIR="$RESULT_BASE"
- fi
- seqres="$REPORT_DIR/$seqnum"
+ # the filename for the test and the name output are different.
+ # we don't include the tests/ directory in the name output.
+ export seqnum=${seq#$SRC_DIR/}
+ group=${seqnum%%/*}
+ if $OPTIONS_HAVE_SECTIONS; then
+ REPORT_DIR="$RESULT_BASE/$section"
+ else
+ REPORT_DIR="$RESULT_BASE"
+ fi
+ export RESULT_DIR="$REPORT_DIR/$group"
+ seqres="$REPORT_DIR/$seqnum"
- mkdir -p $RESULT_DIR
+ # Generate the entire section report with whatever test results
+ # we have so far. Leave the $sect_time parameter empty so that
+ # it's a little more obvious that this test run is incomplete.
+ if $do_report; then
+ _make_section_report "$section" "${#try[*]}" \
+ "${#bad[*]}" "${#notrun[*]}" \
+ "" &> /dev/null
+ fi
- echo -n "$seqnum"
+ echo -n "$seqnum"
- if $showme; then
- _expunge_test $seqnum
- if [ $? -eq 1 ]; then
+ if $showme; then
+ if _expunge_test $seqnum; then
+ tc_status="expunge"
+ else
+ echo
+ start=0
+ stop=0
+ tc_status="list"
+ fi
+ _stash_test_status "$seqnum" "$tc_status"
continue
fi
- echo
- start=0
- stop=0
- n_notrun=`expr $n_notrun + 1`
- if $do_report; then
- _make_testcase_report "list"
+
+ tc_status="pass"
+ if [ ! -f $seq ]; then
+ echo " - no such test?"
+ _stash_test_status "$seqnum" "$tc_status"
+ continue
fi
- continue
- fi
- tc_status="pass"
- if [ ! -f $seq ]; then
- echo " - no such test?"
- else
+
# really going to try and run this one
- #
- rm -f $seqres.out.bad
+ mkdir -p $RESULT_DIR
+ rm -f ${RESULT_DIR}/require_scratch*
+ rm -f ${RESULT_DIR}/require_test*
+ rm -f $seqres.out.bad $seqres.hints
# check if we really should run it
- _expunge_test $seqnum
- if [ $? -eq 1 ]; then
+ if _expunge_test $seqnum; then
+ tc_status="expunge"
+ _stash_test_status "$seqnum" "$tc_status"
continue
fi
- # slashes now in names, sed barfs on them so use grep
- lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
- if [ "X$lasttime" != X ]; then
- echo -n " ${lasttime}s ..."
- else
- echo -n " " # prettier output with timestamps.
+ # record that we really tried to run this test.
+ if ((!${#loop_status[*]})); then
+ try+=("$seqnum")
fi
+
+ awk 'BEGIN {lasttime=" "} \
+ $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \
+ END {printf "%s", lasttime}' "$check.time"
rm -f core $seqres.notrun
start=`_wallclock`
- $timestamp && echo -n " ["`date "+%T"`"]"
+ $timestamp && _timestamp
[ ! -x $seq ] && chmod u+x $seq # ensure we can run it
$LOGGER_PROG "run xfstest $seqnum"
if [ -w /dev/kmsg ]; then
echo "run fstests $seqnum at $date_time" > /dev/kmsg
# _check_dmesg depends on this log in dmesg
touch ${RESULT_DIR}/check_dmesg
+ rm -f ${RESULT_DIR}/dmesg_filter
fi
+ _try_wipe_scratch_devs > /dev/null 2>&1
+
+ # clear the WARN_ONCE state to allow a potential problem
+ # to be reported for each test
+ (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
+
+ test_start_time="$(date +"%F %T")"
if [ "$DUMP_OUTPUT" = true ]; then
- ./$seq 2>&1 | tee $tmp.out
+ _run_seq 2>&1 | tee $tmp.out
# Because $? would get tee's return code
sts=${PIPESTATUS[0]}
else
- ./$seq >$tmp.out 2>&1
+ _run_seq >$tmp.out 2>&1
sts=$?
fi
- $timestamp && _timestamp
- stop=`_wallclock`
- if [ -f core ]
- then
- _err_msg="[dumped core]"
- echo -n " $_err_msg"
- mv core $RESULT_BASE/$seqnum.core
- err=true
+ # If someone sets kernel.core_pattern or kernel.core_uses_pid,
+ # coredumps generated by fstests might have a longer name than
+ # just "core". Use globbing to find the most common patterns,
+ # assuming there are no other coredump capture packages set up.
+ local cores=0
+ for i in core core.*; do
+ test -f "$i" || continue
+ if ((cores++ == 0)); then
+ _dump_err_cont "[dumped core]"
+ fi
+ (_adjust_oom_score 250; _save_coredump "$i")
+ tc_status="fail"
+ done
+
+ if [ -f $seqres.notrun ]; then
+ $timestamp && _timestamp
+ stop=`_wallclock`
+ $timestamp || echo -n "[not run] "
+ $timestamp && echo " [not run]" && \
+ echo -n " $seqnum -- "
+ cat $seqres.notrun
+ tc_status="notrun"
+ _stash_test_status "$seqnum" "$tc_status"
+
+ # Unmount the scratch fs so that we can wipe the scratch
+ # dev state prior to the next test run.
+ _scratch_unmount 2> /dev/null
+ continue;
fi
- if [ -f $seqres.notrun ]
- then
- $timestamp || echo -n " [not run] "
- $timestamp && echo " [not run]" && echo -n " $seqnum -- "
- cat $seqres.notrun
- notrun="$notrun $seqnum"
- n_notrun=`expr $n_notrun + 1`
- tc_status="notrun"
+ if [ $sts -ne 0 ]; then
+ _dump_err_cont "[failed, exit status $sts]"
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+ rm -f ${RESULT_DIR}/require_test*
+ rm -f ${RESULT_DIR}/require_scratch*
+ # Even though we failed, there may be something interesting in
+ # dmesg which can help debugging.
+ _check_dmesg
+ (_adjust_oom_score 250; _check_filesystems)
+ tc_status="fail"
else
- if [ $sts -ne 0 ]
- then
- _err_msg="[failed, exit status $sts]"
- echo -n " $_err_msg"
- err=true
- fi
- if [ ! -f $seq.out ]
- then
+ # The test apparently passed, so check for corruption
+ # and log messages that shouldn't be there. Run the
+ # checking tools from a subshell with adjusted OOM
+ # score so that the OOM killer will target them instead
+ # of the check script itself.
+ (_adjust_oom_score 250; _check_filesystems) || tc_status="fail"
+ _check_dmesg || tc_status="fail"
+
+ # Save any coredumps from the post-test fs checks
+ for i in core core.*; do
+ test -f "$i" || continue
+ if ((cores++ == 0)); then
+ _dump_err_cont "[dumped core]"
+ fi
+ (_adjust_oom_score 250; _save_coredump "$i")
+ tc_status="fail"
+ done
+ fi
+
+ # Reload the module after each test to check for leaks or
+ # other problems.
+ if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+ modprobe -r fs-$FSTYP
+ modprobe fs-$FSTYP
+ fi
+
+ # Scan for memory leaks after every test so that associating
+ # a leak to a particular test will be as accurate as possible.
+ _check_kmemleak || tc_status="fail"
+
+ # test ends after all checks are done.
+ $timestamp && _timestamp
+ stop=`_wallclock`
+
+ if [ ! -f $seq.out ]; then
_dump_err "no qualified output"
- err=true
- else
-
- # coreutils 8.16+ changed quote formats in error messages from
- # `foo' to 'foo'. Filter old versions to match the new version.
- sed -i "s/\`/\'/g" $tmp.out
- if diff $seq.out $tmp.out >/dev/null 2>&1
- then
- if $err
- then
- :
- else
+ tc_status="fail"
+ _stash_test_status "$seqnum" "$tc_status"
+ continue;
+ fi
+
+ # coreutils 8.16+ changed quote formats in error messages
+ # from `foo' to 'foo'. Filter old versions to match the new
+ # version.
+ sed -i "s/\`/\'/g" $tmp.out
+ if diff $seq.out $tmp.out >/dev/null 2>&1 ; then
+ if [ "$tc_status" != "fail" ]; then
echo "$seqnum `expr $stop - $start`" >>$tmp.time
echo -n " `expr $stop - $start`s"
- fi
- echo ""
+ fi
+ echo ""
+ else
+ _dump_err "- output mismatch (see $seqres.out.bad)"
+ mv $tmp.out $seqres.out.bad
+ $diff $seq.out $seqres.out.bad | {
+ if test "$DIFF_LENGTH" -le 0; then
+ cat
else
- echo " - output mismatch (see $seqres.out.bad)"
- mv $tmp.out $seqres.out.bad
- $diff $seq.out $seqres.out.bad | {
- if test "$DIFF_LENGTH" -le 0; then
- cat
- else
- head -n "$DIFF_LENGTH"
- echo "..."
- echo "(Run '$diff $seq.out $seqres.out.bad'" \
- " to see the entire diff)"
- fi; } | \
- sed -e 's/^\(.\)/ \1/'
- _err_msg="output mismatch (see $diff $seq.out $seqres.out.bad)"
- err=true
+ head -n "$DIFF_LENGTH"
+ echo "..."
+ echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
+ " to see the entire diff)"
+ fi; } | sed -e 's/^\(.\)/ \1/'
+ tc_status="fail"
+ fi
+ if [ -f $seqres.hints ]; then
+ if [ "$tc_status" == "fail" ]; then
+ echo
+ cat $seqres.hints
+ else
+ rm -f $seqres.hints
fi
- fi
- try="$try $seqnum"
- n_try=`expr $n_try + 1`
- _check_filesystems
- _check_dmesg || err=true
- _check_kmemleak || err=true
fi
-
- fi
-
- # come here for each test, except when $showme is true
- #
- if $err
- then
- bad="$bad $seqnum"
- n_bad=`expr $n_bad + 1`
- tc_status="fail"
- fi
- if $do_report; then
- _make_testcase_report "$tc_status"
- fi
- seq="after_$seqnum"
+ _stash_test_status "$seqnum" "$tc_status"
done
+
sect_stop=`_wallclock`
interrupt=false
_wrapup
_test_unmount 2> /dev/null
_scratch_unmount 2> /dev/null
+}
+
+for ((iters = 0; iters < $iterations; iters++)) do
+ for section in $HOST_OPTIONS_SECTIONS; do
+ run_section $section
+ if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
+ interrupt=false
+ status=`expr $sum_bad != 0`
+ exit
+ fi
+ done
done
interrupt=false