showme=false
have_test_arg=false
randomize=false
+exact_order=false
export here=`pwd`
xfile=""
subdir_xfile=""
brief_test_summary=false
do_report=false
DUMP_OUTPUT=false
+iterations=1
+istop=false
# This is a global variable used to pass test failure text to reporting gunk
_err_msg=""
echo "Usage: $0 [options] [testlist]"'
check options
- -nfs test NFS
- -glusterfs test GlusterFS
- -cifs test CIFS
+ -nfs test NFS
+ -glusterfs test GlusterFS
+ -cifs test CIFS
-9p test 9p
+ -virtiofs test virtiofs
-overlay test overlay
- -pvfs2 test PVFS2
- -tmpfs test TMPFS
- -ubifs test ubifs
+ -pvfs2 test PVFS2
+ -tmpfs test TMPFS
+ -ubifs test ubifs
-l line mode diff
-udiff show unified diff (default)
-n show me, do not run tests
-T output timestamps
-r randomize test order
+ --exact-order run tests in the exact order specified
+ -i <n> iterate the test list <n> times
+ -I <n> iterate the test list <n> times, but stops iterating further in case of any test failure
-d dump test output to stdout
-b brief test summary
-R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
-g group[,group...] include tests from these groups
-x group[,group...] exclude tests from these groups
-X exclude_file exclude individual tests
+ -e testlist exclude a specific list of tests
-E external_file exclude individual tests
[testlist] include tests matching names in testlist
check -X .exclude -g auto
check -E ~/.xfstests.exclude
'
- exit 0
+ exit 1
}
get_sub_group_list()
local d=$1
local grp=$2
- test -s "$SRC_DIR/$d/group" || return 1
+ test -s "$SRC_DIR/$d/group.list" || return 1
- local grpl=$(sed -n < $SRC_DIR/$d/group \
+ local grpl=$(sed -n < $SRC_DIR/$d/group.list \
-e 's/#.*//' \
-e 's/$/ /' \
-e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
local grp=$1
local grpl=""
local sub=$(dirname $grp)
+ local fsgroup="$FSTYP"
if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
# group is given as <subdir>/<group> (e.g. xfs/quick)
return
fi
- for d in $SRC_GROUPS $FSTYP; do
+ if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
+ fsgroup=ext4
+ fi
+ for d in $SRC_GROUPS $fsgroup; do
if ! test -d "$SRC_DIR/$d" ; then
continue
fi
list=$(get_group_list $xgroup)
if [ -z "$list" ]; then
echo "Group \"$xgroup\" is empty or not defined?"
- exit 1
+ continue
fi
trim_test_list $list
done
- # sort the list of tests into numeric order
- if $randomize; then
- if type shuf >& /dev/null; then
- sorter="shuf"
+ # sort the list of tests into numeric order unless we're running tests
+ # in the exact order specified
+ if ! $exact_order; then
+ if $randomize; then
+ if type shuf >& /dev/null; then
+ sorter="shuf"
+ else
+ sorter="awk -v seed=$RANDOM -f randomize.awk"
+ fi
else
- sorter="awk -v seed=$RANDOM -f randomize.awk"
+ sorter="cat"
fi
+ list=`sort -n $tmp.list | uniq | $sorter`
else
- sorter="cat"
+ list=`cat $tmp.list`
fi
- list=`sort -n $tmp.list | uniq | $sorter`
rm -f $tmp.list
}
-glusterfs) FSTYP=glusterfs ;;
-cifs) FSTYP=cifs ;;
-9p) FSTYP=9p ;;
+ -virtiofs) FSTYP=virtiofs ;;
-overlay) FSTYP=overlay; export OVERLAY=true ;;
-pvfs2) FSTYP=pvfs2 ;;
-tmpfs) FSTYP=tmpfs ;;
-X) subdir_xfile=$2; shift ;
;;
+ -e)
+ xfile=$2; shift ;
+ echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
+ ;;
+
-E) xfile=$2; shift ;
if [ -f $xfile ]; then
sed "s/#.*$//" "$xfile" >> $tmp.xlist
-udiff) diff="$diff -u" ;;
-n) showme=true ;;
- -r) randomize=true ;;
-
+ -r)
+ if $exact_order; then
+ echo "Cannot specify -r and --exact-order."
+ exit 1
+ fi
+ randomize=true
+ ;;
+ --exact-order)
+ if $randomize; then
+ echo "Cannnot specify --exact-order and -r."
+ exit 1
+ fi
+ exact_order=true
+ ;;
+ -i) iterations=$2; shift ;;
+ -I) iterations=$2; istop=true; shift ;;
-T) timestamp=true ;;
-d) DUMP_OUTPUT=true ;;
-b) brief_test_summary=true;;
test_dir=`dirname $t`
test_dir=${test_dir#$SRC_DIR/*}
test_name=`basename $t`
- group_file=$SRC_DIR/$test_dir/group
+ group_file=$SRC_DIR/$test_dir/group.list
if egrep -q "^$test_name" $group_file; then
# in group file ... OK
unset try notrun bad
}
+_global_log() {
+ echo "$1" >> $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ echo "$1" >> ${REPORT_DIR}/check.log
+ fi
+}
+
_wrapup()
{
seq="check"
}' \
| sort -n >$tmp.out
mv $tmp.out $check.time
+ if $OPTIONS_HAVE_SECTIONS; then
+ cp $check.time ${REPORT_DIR}/check.time
+ fi
fi
- echo "" >>$check.log
- date >>$check.log
+ _global_log ""
+ _global_log "$(date)"
echo "SECTION -- $section" >>$tmp.summary
echo "=========================" >>$tmp.summary
echo "Ran:$try"
echo "Ran:$try" >>$tmp.summary
fi
- echo "Ran:$try" >>$check.log
+ _global_log "Ran:$try"
fi
$interrupt && echo "Interrupted!" | tee -a $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ $interrupt && echo "Interrupted!" | tee -a \
+ ${REPORT_DIR}/check.log
+ fi
if [ ! -z "$notrun" ]; then
if [ $brief_test_summary == "false" ]; then
echo "Not run:$notrun"
echo "Not run:$notrun" >>$tmp.summary
fi
- echo "Not run:$notrun" >>$check.log
+ _global_log "Not run:$notrun"
fi
if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
echo "Failures:$bad"
echo "Failed $n_bad of $n_try tests"
- echo "Failures:$bad" >>$check.log
- echo "Failed $n_bad of $n_try tests" >>$check.log
+ _global_log "Failures:$bad"
+ _global_log "Failed $n_bad of $n_try tests"
echo "Failures:$bad" >>$tmp.summary
echo "Failed $n_bad of $n_try tests" >>$tmp.summary
else
echo "Passed all $n_try tests"
- echo "Passed all $n_try tests" >>$check.log
+ _global_log "Passed all $n_try tests"
echo "Passed all $n_try tests" >>$tmp.summary
fi
echo "" >>$tmp.summary
_check_filesystems()
{
+ local ret=0
+
if [ -f ${RESULT_DIR}/require_test ]; then
- _check_test_fs || err=true
+ _check_test_fs || ret=1
rm -f ${RESULT_DIR}/require_test*
else
_test_unmount 2> /dev/null
fi
if [ -f ${RESULT_DIR}/require_scratch ]; then
- _check_scratch_fs || err=true
+ _check_scratch_fs || ret=1
rm -f ${RESULT_DIR}/require_scratch*
- else
- _scratch_unmount 2> /dev/null
fi
+ _scratch_unmount 2> /dev/null
+ return $ret
}
_expunge_test()
return 0
}
+# Can we run systemd scopes?
+HAVE_SYSTEMD_SCOPES=
+systemctl reset-failed "fstests-check" &>/dev/null
+systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
+test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
+
+# Make the check script unattractive to the OOM killer...
+OOM_SCORE_ADJ="/proc/self/oom_score_adj"
+function _adjust_oom_score() {
+ test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}"
+}
+_adjust_oom_score -1000
+
+# ...and make the tests themselves somewhat more attractive to it, so that if
+# the system runs out of memory it'll be the test that gets killed and not the
+# test framework. The test is run in a separate process without any of our
+# functions, so we open-code adjusting the OOM score.
+#
+# If systemd is available, run the entire test script in a scope so that we can
+# kill all subprocesses of the test if it fails to clean up after itself. This
+# is essential for ensuring that the post-test unmount succeeds. Note that
+# systemd doesn't automatically remove transient scopes that fail to terminate
+# when systemd tells them to terminate (e.g. programs stuck in D state when
+# systemd sends SIGKILL), so we use reset-failed to tear down the scope.
+_run_seq() {
+ local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
+
+ if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
+ local unit="$(systemd-escape "fs$seq").scope"
+ systemctl reset-failed "${unit}" &> /dev/null
+ systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
+ res=$?
+ systemctl stop "${unit}" &> /dev/null
+ return "${res}"
+ else
+ "${cmd[@]}"
+ fi
+}
+
_detect_kmemleak
_prepare_test_list
trap "_wrapup; exit \$status" 0 1 2 3 15
fi
-for section in $HOST_OPTIONS_SECTIONS; do
+function run_section()
+{
+ local section=$1
+
OLD_FSTYP=$FSTYP
OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
get_next_config $section
fi
done
if $skip; then
- continue
+ return
fi
fi
fi
done
if $skip; then
- continue
+ return
fi
fi
status=1
exit
fi
+ # TEST_DEV has been recreated, previous FSTYP derived from
+ # TEST_DEV could be changed, source common/rc again with
+ # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
+ . common/rc
_prepare_test_list
elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
_test_unmount 2> /dev/null
echo "check: failed to mount \$SCRATCH_DEV using specified options"
status=1
exit
+ else
+ _scratch_unmount
fi
fi
seqres="$REPORT_DIR/$seqnum"
mkdir -p $RESULT_DIR
+ rm -f ${RESULT_DIR}/require_scratch*
+ rm -f ${RESULT_DIR}/require_test*
echo -n "$seqnum"
if $showme; then
# _check_dmesg depends on this log in dmesg
touch ${RESULT_DIR}/check_dmesg
fi
+ _try_wipe_scratch_devs > /dev/null 2>&1
+
+ # clear the WARN_ONCE state to allow a potential problem
+ # to be reported for each test
+ (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
+
if [ "$DUMP_OUTPUT" = true ]; then
- ./$seq 2>&1 | tee $tmp.out
+ _run_seq 2>&1 | tee $tmp.out
# Because $? would get tee's return code
sts=${PIPESTATUS[0]}
else
- ./$seq >$tmp.out 2>&1
+ _run_seq >$tmp.out 2>&1
sts=$?
fi
notrun="$notrun $seqnum"
n_notrun=`expr $n_notrun + 1`
tc_status="notrun"
+
+ # Unmount the scratch fs so that we can wipe the scratch
+ # dev state prior to the next test run.
+ _scratch_unmount 2> /dev/null
continue;
fi
rm -f ${RESULT_DIR}/require_scratch*
err=true
else
- # the test apparently passed, so check for corruption
- # and log messages that shouldn't be there.
- _check_filesystems
+ # The test apparently passed, so check for corruption
+ # and log messages that shouldn't be there. Run the
+ # checking tools from a subshell with adjusted OOM
+ # score so that the OOM killer will target them instead
+ # of the check script itself.
+ (_adjust_oom_score 250; _check_filesystems) || err=true
_check_dmesg || err=true
fi
+ # Reload the module after each test to check for leaks or
+ # other problems.
+ if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+ modprobe -r fs-$FSTYP
+ modprobe fs-$FSTYP
+ fi
+
# Scan for memory leaks after every test so that associating
# a leak to a particular test will be as accurate as possible.
_check_kmemleak || err=true
_test_unmount 2> /dev/null
_scratch_unmount 2> /dev/null
+}
+
+for ((iters = 0; iters < $iterations; iters++)) do
+ for section in $HOST_OPTIONS_SECTIONS; do
+ run_section $section
+ if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
+ interrupt=false
+ status=`expr $sum_bad != 0`
+ exit
+ fi
+ done
done
interrupt=false