#! /bin/sh
-#
-# XFS QA Test No. 001
+# FS QA Test No. 001
#
# Random file copier to produce chains of identical files so the head
-# and the tail cna be diff'd at then end of each iteration.
+# and the tail can be diff'd at the end of each iteration.
#
# Exercises creat, write and unlink for a variety of directory sizes, and
# checks for data corruption.
trap "_cleanup; rm -f $tmp.*; exit \$status" 0 1 2 3 15
# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
verbose=true
+verify=$here/verify_fill
if [ $# -eq 0 ]
then
fi
ncopy=200 # number of file copies in the chain step
+udf_fsize=20240 # number of sectors for UDF
_setup()
{
- if mkdir -p $TEST_DIR/$$
+ if mkdir -p $testdir/$$
then
:
else
- echo "Error: cannot mkdir \"$TEST_DIR/$$\""
+ echo "Error: cannot mkdir \"$testdir/$$\""
exit 1
fi
- cd $TEST_DIR/$$
+ cd $testdir/$$
$verbose && echo -n "setup "
sed -e '/^#/d' $tmp.config \
#
_chain()
{
- $AWK_PROG <$tmp.config '
+ $AWK_PROG -v full_file=$here/$seq.full -v verify=$verify <$tmp.config '
BEGIN { nfile = 0 }
/^\#/ { next }
{ file[nfile] = $1
+ size[nfile] = $2
link[nfile] = 0
nfile++
+ total_size += $2
}
END { srand('$iter')
for (i=0; i < '$ncopy'; i++) {
while (j < 0 || j >= nfile)
j = int(rand() * nfile)
if (link[j] == 0) {
+ # previous should already exist and next one should not exist
printf "if [ ! -f %s ]; then echo \"%s missing!\"; exit; fi\n",file[j],file[j]
printf "if [ -f %s.0 ]; then echo \"%s.0 already present!\"; exit; fi\n",file[j],file[j]
- printf "cp %s %s.0\n",file[j],file[j]
+ printf "cp %s %s.0 || exit 1\n",file[j],file[j]
+ printf "ls -i %s.0\n", file[j] >full_file;
+ total_size += size[j]
+ printf "# total size = %d\n", total_size
}
else {
+ # previous should already exist and next one should not exist
printf "if [ ! -f %s.%d ]; then echo \"%s.%d missing!\"; exit; fi\n",file[j],link[j]-1,file[j],link[j]-1
printf "if [ -f %s.%d ]; then echo \"%s.%d already present!\"; exit; fi\n",file[j],link[j],file[j],link[j]
- printf "cp %s.%d %s.%d\n",file[j],link[j]-1,file[j],link[j]
+ printf "cp %s.%d %s.%d || exit 1\n",file[j],link[j]-1,file[j],link[j]
+ printf "ls -i %s.%d\n", file[j], link[j] >full_file;
+ total_size += size[j]
+ printf "# total size = %d\n", total_size
}
link[j]++
}
- # close all the chains, and remove all of the files except
- # the head of the chain
+ # close all the chains,
+ # if have at least one copy then move the last copy to "file[j].last"
+ # and remove all of the other files except the head of the chain
for (j=0; j<nfile; j++) {
- if (link[j] > 0)
+ if (link[j] > 0) {
printf "mv %s.%d %s.last\n",file[j],link[j]-1,file[j]
+ printf "ls -i %s.last\n", file[j] >full_file;
+ }
for (i=0; i<link[j]-1; i++) {
printf "rm -f %s.%d\n",file[j],i
}
}
}' \
- | sh
+ | tee -a $here/$seq.full | sh
}
_check()
sed -e '/^#/d' $tmp.config \
| while read file nbytes
do
+ # the file is never removed so it should exist
if [ ! -f $file ]
then
$verbose && echo
touch $tmp.bad
continue
fi
+ # checks that the file and its last copy are the same
if [ -f $file.last ]
then
if cmp $file $file.last >/dev/null 2>&1
then
$verbose && echo "cleanup"
cd /
- rm -rf $TEST_DIR/$$
+ rm -rf $testdir/$$
+ _cleanup_testdir
done_cleanup=true
fi
}
+rm -f $here/$seq.full
status=0
_cleanup
status=1
done_cleanup=false
+_setup_testdir
_setup
# do the test
for iter in 1 2 3 4 5
do
echo -n "iter $iter chain ... "
+ echo "iter $iter" >> $here/$seq.full
_chain
_check
if [ -f $tmp.bad ]
#! /bin/sh
#
-# XFS QA Test No. 002
+# FS QA Test No. 002
#
# simple inode link count test for a regular file
#
tmp=/tmp/$$
here=`pwd`
status=0 # success is the default!
-trap "rm -f $tmp.*; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_cleanup()
+{
+ rm -f $tmp.*
+ _cleanup_testdir
+}
# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
+
+_setup_testdir
echo "Silence is goodness ..."
# ensure target directory exists
-mkdir `dirname $TEST_DIR/$tmp` 2>/dev/null
+mkdir `dirname $testdir/$tmp` 2>/dev/null
-touch $TEST_DIR/$tmp.1
+touch $testdir/$tmp.1
for l in 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
do
- ln $TEST_DIR/$tmp.1 $TEST_DIR/$tmp.$l
- x=`src/lstat64 $TEST_DIR/$tmp.1 | sed -n -e '/ Links: /s/.*Links: *//p'`
+ ln $testdir/$tmp.1 $testdir/$tmp.$l
+ x=`src/lstat64 $testdir/$tmp.1 | sed -n -e '/ Links: /s/.*Links: *//p'`
if [ "$l" -ne $x ]
then
echo "Arrgh, created link #$l and lstat64 looks like ..."
- src/lstat64 $TEST_DIR/$tmp.1
+ src/lstat64 $testdir/$tmp.1
status=1
fi
done
for l in 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1
do
- x=`src/lstat64 $TEST_DIR/$tmp.1 | sed -n -e '/ Links: /s/.*Links: *//p'`
+ x=`src/lstat64 $testdir/$tmp.1 | sed -n -e '/ Links: /s/.*Links: *//p'`
if [ "$l" -ne $x ]
then
echo "Arrgh, about to remove link #$l and lstat64 looks like ..."
- src/lstat64 $TEST_DIR/$tmp.1
+ src/lstat64 $testdir/$tmp.1
status=1
fi
- rm -f $TEST_DIR/$tmp.$l
+ rm -f $testdir/$tmp.$l
done
# success, all done
#! /bin/sh
#
-# XFS QA Test No. 003
+# FS QA Test No. 003
#
# exercise xfs_db bug #784078
#
_need_to_be_root
# real QA test starts here
-
-PATH=".:$PATH"
+_supported_fs xfs
+_supported_os IRIX Linux
[ -f core ] && rm -f core
[ -f core ] && echo "Warning: can't nuke existing core file!"
#! /bin/sh
-# XFS QA Test No. 004
+# FS QA Test No. 004
#
# exercise xfs_db bug #789674 and other freesp functionality
#
{
echo "=== mkfs output ===" >>$seq.full
_scratch_mkfs_xfs | tee -a $seq.full | _filter_mkfs 2>$tmp.mkfs
- source $tmp.mkfs
+ . $tmp.mkfs
_scratch_mount
dd if=/dev/zero of=$SCRATCH_MNT/foo count=200 bs=4096 >/dev/null 2>&1 &
dd if=/dev/zero of=$SCRATCH_MNT/goo count=400 bs=4096 >/dev/null 2>&1 &
. ./common.rc
. ./common.filter
+# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
_need_to_be_root
_require_scratch
_require_nobigloopfs
-# real QA test starts here
rm -f $seq.full
_populate_scratch
-eval `df -P -T --block-size=512 $SCRATCH_MNT 2>&1 \
- | $AWK_PROG 'END { printf "blocks=%u used=%u avail=%u\n", $3, $4, $5 }'`
+[ "$HOSTOS" = "Linux" ] && DF_PROG="$DF_PROG -P --block-size=512"
+
+eval `$DF_PROG $SCRATCH_MNT 2>&1 \
+ | tail -1 | $AWK_PROG '{ printf "blocks=%u used=%u avail=%u\n", $3, $4, $5 }'`
echo "df gave: blocks=$blocks used=$used avail=$avail" >>$seq.full
echo "blocksize from mkfs is '$dbsize'" >>$seq.full
#! /bin/sh
-# XFS QA Test No. 005
+# FS QA Test No. 005
#
# Test symlinks & ELOOP
#
owner=dxm@sgi.com
#
-# note ELOOP limit used to be 32 but changed to 8. Who know what
+# note ELOOP limit used to be 32 but changed to 8. Who knows what
# it might be next.
#
_cleanup()
{
- cd $TEST_DIR
+ cd $testdir
rm -f symlink_{0,1,2,3}{0,1,2,3,4,5,6,7,8,9} symlink_self empty_file
+ cd /
+ _cleanup_testdir
}
_touch()
. ./common.rc
. ./common.filter
+# link correct .out file
+_link_out_file $seq.out
+
# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
+
+_setup_testdir
-cd $TEST_DIR
+cd $testdir
o=empty_file
_touch $o
echo ""
_touch symlink_self
+
+
exit
-QA output created by 005
-*** touch deep symlinks
-
-touch: symlink_05: Too many levels of symbolic links
-touch: symlink_06: Too many levels of symbolic links
-touch: symlink_07: Too many levels of symbolic links
-touch: symlink_08: Too many levels of symbolic links
-touch: symlink_09: Too many levels of symbolic links
-touch: symlink_10: Too many levels of symbolic links
-touch: symlink_11: Too many levels of symbolic links
-touch: symlink_12: Too many levels of symbolic links
-touch: symlink_13: Too many levels of symbolic links
-touch: symlink_14: Too many levels of symbolic links
-touch: symlink_15: Too many levels of symbolic links
-touch: symlink_16: Too many levels of symbolic links
-touch: symlink_17: Too many levels of symbolic links
-touch: symlink_18: Too many levels of symbolic links
-touch: symlink_19: Too many levels of symbolic links
-touch: symlink_20: Too many levels of symbolic links
-touch: symlink_21: Too many levels of symbolic links
-touch: symlink_22: Too many levels of symbolic links
-touch: symlink_23: Too many levels of symbolic links
-touch: symlink_24: Too many levels of symbolic links
-touch: symlink_25: Too many levels of symbolic links
-touch: symlink_26: Too many levels of symbolic links
-touch: symlink_27: Too many levels of symbolic links
-touch: symlink_28: Too many levels of symbolic links
-touch: symlink_29: Too many levels of symbolic links
-touch: symlink_30: Too many levels of symbolic links
-touch: symlink_31: Too many levels of symbolic links
-touch: symlink_32: Too many levels of symbolic links
-touch: symlink_33: Too many levels of symbolic links
-touch: symlink_34: Too many levels of symbolic links
-touch: symlink_35: Too many levels of symbolic links
-touch: symlink_36: Too many levels of symbolic links
-touch: symlink_37: Too many levels of symbolic links
-touch: symlink_38: Too many levels of symbolic links
-touch: symlink_39: Too many levels of symbolic links
-
-*** touch recusive symlinks
-
-touch: symlink_self: Too many levels of symbolic links
#! /bin/sh
-# XFS QA Test No. 006
+# FS QA Test No. 006
#
# permname
#
_cleanup()
{
- rm -rf $TEST_DIR/permname.$$
+ cd /
+ rm -f $tmp.*
+ rm -rf $testdir/permname.$$
+ _cleanup_testdir
}
_count()
. ./common.filter
# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
+_setup_testdir
-mkdir $TEST_DIR/permname.$$
+mkdir $testdir/permname.$$
echo ""
echo "single thread permname"
echo "----------------------"
-mkdir $TEST_DIR/permname.$$/a
-cd $TEST_DIR/permname.$$/a
+mkdir $testdir/permname.$$/a
+cd $testdir/permname.$$/a
$here/src/permname -c 4 -l 6 -p 1 || echo "permname returned $?"
find . | _count
echo ""
echo "multi thread permname"
echo "----------------------"
-mkdir $TEST_DIR/permname.$$/b
-cd $TEST_DIR/permname.$$/b
+mkdir $testdir/permname.$$/b
+cd $testdir/permname.$$/b
$here/src/permname -c 4 -l 6 -p 4 || echo "permname returned $?"
find . | _count
#! /bin/sh
-# XFS QA Test No. 007
+# FS QA Test No. 007
#
# drive the src/nametest program
# which does a heap of open(create)/unlink/stat
_cleanup()
{
+ cd /
rm -f $tmp.*
- rm -rf $TEST_DIR/$seq
+ rm -rf $testdir/$seq
+ _cleanup_testdir
}
# get standard environment, filters and checks
. ./common.filter
# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
+
+_setup_testdir
+
status=1 # default failure
sourcefile=$tmp.nametest
seed=1
i=`expr $i + 1`
done
-mkdir $TEST_DIR/$seq
-cd $TEST_DIR/$seq
+mkdir $testdir/$seq
+cd $testdir/$seq
$here/src/nametest -l $sourcefile -s $seed -i $iterations -z
-
-#optional stuff if your test has verbose output to help resolve problems
-#echo
-#echo "If failure, check $seq.full (this) and $seq.full.ok (reference)"
-
-
# success, all done
status=0
exit
#! /bin/sh
-# XFS QA Test No. 008
+# FS QA Test No. 008
#
# randholes test
#
tmp=/tmp/$$
status=0 # success is the default!
pgsize=`$here/src/feature -p`
-trap "rm -f $tmp.*; _cleanup; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
- rm -rf $TEST_DIR/randholes.$$.*
+ rm -f $tmp.*
+ rm -rf $testdir/randholes.$$.*
+ _cleanup_testdir
}
_filter()
_holes="$2"
_param="$3"
- out=$TEST_DIR/randholes.$$.$_n
+ out=$testdir/randholes.$$.$_n
echo ""
echo "randholes.$_n : $_param" | _filter
echo "------------------------------------------"
}
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
+_setup_testdir
rm -f $here/$seq.out.full
#! /bin/sh
-# XFS QA Test No. 009
+# FS QA Test No. 009
#
# alloc test
#
. ./common.filter
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_require_scratch
#! /bin/sh
-# XFS QA Test No. 010
+# FS QA Test No. 010
#
# dbtest
#
here=`pwd`
tmp=/tmp/$$
status=0 # success is the default!
+trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
- rm -f $TEST_DIR/DBtest*.{pag,dir}
+ cd /
+ rm -f $tmp.*
+ rm -f $testdir/DBtest*.{pag,dir}
+ _cleanup_testdir
}
-trap "_cleanup; rm -f $tmp.*; exit \$status" 0 1 2 3 15
# filter random number output from dbtest
#
. ./common.rc
. ./common.filter
+# link correct .out file
+_link_out_file $seq.out
+
+# put operating system in $os
+_get_os
+
[ -x $here/src/dbtest ] || _notrun "dbtest was not built for this platform"
# real QA test starts here
-cd $TEST_DIR
-$here/src/dbtest -l 5 -n 3000 | _filter_dbtest
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
+
+_setup_testdir
+
+rm -f $seq.full
+
+cd $testdir
+
+if [ $os == 'irix' ]; then
+ $here/src/dbtest -l 5 -n 500 2>&1 | tee -a $here/$seq.full | _filter_dbtest
+elif [ $os == 'linux' ]; then
+ $here/src/dbtest -l 5 -n 3000 2>&1 | tee -a $here/$seq.full | _filter_dbtest
+fi
# success, all done
exit
-QA output created by 010
-dbtest v1.0
-
-Creating database containing 3000 records...
- performing lookups for 5 iterations...
- using BLEEP as seed for srandom()...
-
-
-There were BLEEP duplicate checksums generated
-
-Performing lookups on database...
-
- Sequential lookups...
-
-
- Random lookups...
-
-Lookups succeeded...
-
-Performing lookups on database...
-
- Sequential lookups...
-
-
- Random lookups...
-
-Lookups succeeded...
-
-Performing lookups on database...
-
- Sequential lookups...
-
-
- Random lookups...
-
-Lookups succeeded...
-
-Performing lookups on database...
-
- Sequential lookups...
-
-
- Random lookups...
-
-Lookups succeeded...
-
-Performing lookups on database...
-
- Sequential lookups...
-
-
- Random lookups...
-
-Lookups succeeded...
-
-Cleaning up database...
-
-There were BLEEP duplicate checksums generated
#! /bin/sh
-# XFS QA Test No. 011
+# FS QA Test No. 011
#
# dirstress
#
seq=`basename $0`
echo "QA output created by $seq"
-# get standard environment, filters and checks
-. ./common.rc
-. ./common.filter
-
+out=""
here=`pwd`
tmp=/tmp/$$
status=0 # success is the default!
-out=$TEST_DIR/dirstress.$$
-trap "rm -f $tmp.*; _cleanup; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
- rm -rf $out
+ cd /
+ rm -f $tmp.*
+ [ -n "$out" ] && rm -rf $out
+ _cleanup_testdir
}
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
-count=1000
-if ! mkdir $out
-then
- echo "!! couldn't mkdir $out"
- status=1
- exit
-fi
+# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
+
+_setup_testdir
+
+out=$testdir/dirstress.$$
-rm -f $seq.out.full
+rm -f $seq.full
_test()
{
if ! $here/src/dirstress -d $out -f $count $args >$tmp.out 2>&1
then
echo " dirstress failed"
- echo "*** TEST $test -d $out -f $count $args" >>$seq.out.full
- cat $tmp.out >>$seq.out.full
+ echo "*** TEST $test -d $out -f $count $args" >>$seq.full
+ cat $tmp.out >>$seq.full
status=1
fi
}
# dirstress doesn't check returns - this is a crash & burn test.
+if ! mkdir $out
+then
+ echo "!! couldn't mkdir $out"
+ status=1
+ exit
+fi
count=1000
_test 1 "-p 1 -n 1" $count
#! /bin/sh
-# XFS QA Test No. 012
+# FS QA Test No. 012
#
# holes
#
here=`pwd`
tmp=/tmp/$$
status=0 # success is the default!
-trap "rm -f $tmp.*; _cleanup; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
- rm -rf $TEST_DIR/holes.$$.*
+ cd /
+ rm -f $tmp.*
+ rm -rf $testdir/holes.$$.*
+ _cleanup_testdir
}
_filesize()
failed=0
- out=$TEST_DIR/holes.$$.$_n
+ out=$testdir/holes.$$.$_n
echo ""
echo "holes.$_n : $_param"
echo "-----------------------------------------------"
}
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
+_setup_testdir
rm -f $here/$seq.out.full
#! /bin/sh
-# XFS QA Test No. 013
+# FS QA Test No. 013
#
# fsstress
#
_cleanup()
{
+ cd /
# we might get here with a RO FS
mount -o remount,rw $TEST_DEV >/dev/null 2>&1
# now kill!
- rm -rf $TEST_DIR/fsstress.$$.*
+ rm -rf $testdir/fsstress.$$.*
+ _cleanup_testdir
}
_filesize()
. ./common.rc
. ./common.filter
+# put operating system in $os
+_get_os
+
_do_test()
{
_n="$1"
_param="$2"
_count="$3"
- out=$TEST_DIR/fsstress.$$.$_n
+ out=$testdir/fsstress.$$.$_n
rm -rf $out
if ! mkdir $out
then
# real QA test starts here
+_supported_fs xfs udf
+_supported_os IRIX Linux
+
+_setup_testdir
rm -f $here/$seq.full
echo "brevity is wit..."
-count=2000
+if [ $os == 'irix' ]; then
+ count=1024
+elif [ $os == 'linux' ]; then
+ count=2000
+else
+ echo Error test $seq does not run on the operating system: `uname`
+ exit
+fi
procs=20
_check_test_fs
#! /bin/sh
-# XFS QA Test No. 014
+# FS QA Test No. 014
#
# truncfile
#
here=`pwd`
tmp=/tmp/$$
status=0 # success is the default!
-trap "rm -f $tmp.*; _cleanup; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
- rm -rf $TEST_DIR/truncfile.$$.*
+ cd /
+ rm -f $tmp.*
+ rm -rf $testdir/truncfile.$$.*
+ _cleanup_testdir
}
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
-echo "berevity is wit..."
+_setup_testdir
+
+echo "brevity is wit..."
echo "------"
echo "test 1"
echo "------"
-if ! src/truncfile -c 10000 $TEST_DIR/truncfile.$$.0 >$tmp.out 2>&1
+if ! src/truncfile -c 10000 $testdir/truncfile.$$.0 >$tmp.out 2>&1
then
out=`cat $tmp.out`
echo "truncfile returned $? : \"$out\""
QA output created by 014
-berevity is wit...
+brevity is wit...
------
test 1
------
#! /bin/sh
-# XFS QA Test No. 015
+# FS QA Test No. 015
#
# check out-of-space behaviour
#
}
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
_require_scratch
_require_nobigloopfs
#! /bin/sh
-# XFS QA Test No. 016
+# FS QA Test No. 016
#
# test end of log overwrite bug #796141
#
_cleanup()
{
+ cd /
rm -f $tmp.*
echo "*** unmount"
umount $SCRATCH_MNT 2>/dev/null
[ $? -ne 0 ] && \
_notrun "Cannot mkfs for this test using MKFS_OPTIONS specified"
_filter_mkfs <$tmp.mkfs0 >/dev/null 2>$tmp.mkfs
- source $tmp.mkfs
+ . $tmp.mkfs
}
_log_traffic()
f="c6c6c6c6"
echo "*** check for corruption"
echo "expect $f..." >>$seq.full
- xfs_db -r $1 -c "fsblock $2" -c "print" | head | tee -a $seq.full | \
+ xfs_db -r -c "fsblock $2" -c "print" $1 | head | tee -a $seq.full | \
grep -q -v "$f $f $f $f $f $f $f $f" && \
_fail "!!! block $2 corrupted!"
}
. ./common.filter
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
rm -f $seq.full
#! /bin/sh
-# XFS QA Test No. 017
+# FS QA Test No. 017
#
# test remount ro - pv 795642
#
here=`pwd`
tmp=/tmp/$$
status=1
+trap "_cleanup; rm -f $tmp.*; exit \$status" 0 1 2 3 15
_cleanup()
{
echo "*** unmount"
umount $SCRATCH_MNT 2>/dev/null
}
-trap "_cleanup; rm -f $tmp.*; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
_require_scratch
#! /bin/sh
-# XFS QA Test No. 018
+# FS QA Test No. 018
#
# xfs_logprint test - test v2 logs of different LR sizes
#
_cleanup()
{
+ cd /
_cleanup_logfiles
rm -f $tmp.*
echo "*** unmount"
trap "_cleanup; exit \$status" 0 1 2 3 15
# real QA test starts here
-
+_supported_fs xfs
+_supported_os IRIX Linux
# prelim
rm -f $seq.full $tmp.*
_require_scratch
+
echo "*** init FS"
umount $SCRATCH_DEV >/dev/null 2>&1
fi
export MKFS_OPTIONS=$mkfs
export MOUNT_OPTIONS=$mnt
+
_mkfs_log
_create_log
_check_log
#! /bin/sh
-# XFS QA Test No. 020
+# FS QA Test No. 020
#
# extended attributes
#
here=`pwd`
tmp=/tmp/$$
status=0 # success is the default!
-trap "rm -f $tmp.* $testfile; exit \$status" 0 1 2 3 15
+trap "_cleanup; rm -f $tmp.* $testfile; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
+_cleanup()
+{
+ _cleanup_testdir
+}
+
_filter()
{
- sed "s#$TEST_DIR[^ :]*#<TESTFILE>#g;
+ sed "s#$testdir[^ :]*#<TESTFILE>#g;
s#$tmp[^ :]*#<TMPFILE>#g;
s#/proc[^ :]*#<PROCFILE>#g" $1
}
fi
}
+
+# real QA test starts here
+_supported_fs xfs udf
+_supported_os Linux
+
[ -x /usr/bin/attr ] || _notrun "attr is not installed"
[ -x /usr/bin/getfattr ] || _notrun "getfattr is not installed"
-# real QA test starts here
+_setup_testdir
rm -f $seq.full
-testfile=$TEST_DIR/attribute_$$
+testfile=$testdir/attribute_$$
echo "*** list non-existant file"
_attr_list $testfile
#! /bin/sh
-# XFS QA Test No. 022
+# FS QA Test No. 022
#
# Test out a level 0 dump/restore to a tape of a subdir
# i.e. it is testing out drive_scsitape.c
. ./common.rc
. ./common.dump
+# link correct .out file
+_link_out_file $seq.out
+
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_require_tape $TAPE_DEV
_create_dumpdir_stress
+++ /dev/null
-QA output created by 022
-Put scsi tape driver into variable block size mode
-Creating directory system to dump using fsstress.
-
------------------------------------------------
-fsstress : -f link=10 -f creat=10 -f mkdir=10 -f truncate=5 -f symlink=10
------------------------------------------------
-Erasing tape
-Dumping to tape...
-xfsdump -s DUMP_SUBDIR -f TAPE_DEV -M stress_tape_media -L stress_022 SCRATCH_MNT
-xfsdump: using scsi tape (drive_scsitape) strategy
-xfsdump: level 0 dump of HOSTNAME:SCRATCH_MNT
-xfsdump: dump date: DATE
-xfsdump: session id: ID
-xfsdump: session label: "stress_022"
-xfsdump: ino map phase 1: parsing subtree selections
-xfsdump: ino map phase 2: constructing initial dump list
-xfsdump: ino map phase 3: pruning unneeded subtrees
-xfsdump: ino map phase 4: estimating dump size
-xfsdump: ino map phase 5: skipping (only one dump stream)
-xfsdump: ino map construction complete
-xfsdump: estimated dump size: NUM bytes
-xfsdump: /var/xfsdump/inventory created
-xfsdump: preparing drive
-xfsdump: creating dump session media file 0 (media 0, file 0)
-xfsdump: dumping ino map
-xfsdump: dumping directories
-xfsdump: dumping non-directory files
-xfsdump: ending media file
-xfsdump: media file size NUM bytes
-xfsdump: dumping session inventory
-xfsdump: beginning inventory media file
-xfsdump: media file 1 (media 0, file 1)
-xfsdump: ending inventory media file
-xfsdump: inventory media file size NUM bytes
-xfsdump: writing stream terminator
-xfsdump: beginning media stream terminator
-xfsdump: media file 2 (media 0, file 2)
-xfsdump: ending media stream terminator
-xfsdump: media stream terminator size BLOCKSZ bytes
-xfsdump: dump size (non-dir files) : NUM bytes
-xfsdump: dump complete: SECS seconds elapsed
-xfsdump: Dump Status: SUCCESS
-Rewinding tape
-Restoring from tape...
-xfsrestore -f TAPE_DEV -L stress_022 RESTORE_DIR
-xfsrestore: using scsi tape (drive_scsitape) strategy
-xfsrestore: using online session inventory
-xfsrestore: searching media for directory dump
-xfsrestore: preparing drive
-xfsrestore: examining media file 0
-xfsrestore: reading directories
-xfsrestore: 34 directories and 94 entries processed
-xfsrestore: directory post-processing
-xfsrestore: restoring non-directory files
-xfsrestore: restore complete: SECS seconds elapsed
-xfsrestore: Restore Status: SUCCESS
-Comparing listing of dump directory with restore directory
-Files TMP.dump_dir and TMP.restore_dir are identical
#! /bin/sh
-# XFS QA Test No. 023
+# FS QA Test No. 023
#
# To test xfsdump/restore to tape using a directory with
# files with data created by src/fill.
. ./common.rc
. ./common.dump
+# link correct .out file
+_link_out_file $seq.out
+
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_require_tape $TAPE_DEV
_create_dumpdir_fill
+++ /dev/null
-QA output created by 023
-Put scsi tape driver into variable block size mode
-Creating directory system to dump using src/fill.
-Setup ....................................
-Erasing tape
-Dumping to tape...
-xfsdump -s DUMP_SUBDIR -f TAPE_DEV -M stress_tape_media -L stress_023 SCRATCH_MNT
-xfsdump: using scsi tape (drive_scsitape) strategy
-xfsdump: level 0 dump of HOSTNAME:SCRATCH_MNT
-xfsdump: dump date: DATE
-xfsdump: session id: ID
-xfsdump: session label: "stress_023"
-xfsdump: ino map phase 1: parsing subtree selections
-xfsdump: ino map phase 2: constructing initial dump list
-xfsdump: ino map phase 3: pruning unneeded subtrees
-xfsdump: ino map phase 4: estimating dump size
-xfsdump: ino map phase 5: skipping (only one dump stream)
-xfsdump: ino map construction complete
-xfsdump: estimated dump size: NUM bytes
-xfsdump: /var/xfsdump/inventory created
-xfsdump: preparing drive
-xfsdump: creating dump session media file 0 (media 0, file 0)
-xfsdump: dumping ino map
-xfsdump: dumping directories
-xfsdump: dumping non-directory files
-xfsdump: ending media file
-xfsdump: media file size NUM bytes
-xfsdump: dumping session inventory
-xfsdump: beginning inventory media file
-xfsdump: media file 1 (media 0, file 1)
-xfsdump: ending inventory media file
-xfsdump: inventory media file size NUM bytes
-xfsdump: writing stream terminator
-xfsdump: beginning media stream terminator
-xfsdump: media file 2 (media 0, file 2)
-xfsdump: ending media stream terminator
-xfsdump: media stream terminator size BLOCKSZ bytes
-xfsdump: dump size (non-dir files) : NUM bytes
-xfsdump: dump complete: SECS seconds elapsed
-xfsdump: Dump Status: SUCCESS
-Rewinding tape
-Restoring from tape...
-xfsrestore -f TAPE_DEV -L stress_023 RESTORE_DIR
-xfsrestore: using scsi tape (drive_scsitape) strategy
-xfsrestore: using online session inventory
-xfsrestore: searching media for directory dump
-xfsrestore: preparing drive
-xfsrestore: examining media file 0
-xfsrestore: reading directories
-xfsrestore: 3 directories and 38 entries processed
-xfsrestore: directory post-processing
-xfsrestore: restoring non-directory files
-xfsrestore: restore complete: SECS seconds elapsed
-xfsrestore: Restore Status: SUCCESS
-Comparing dump directory with restore directory
-Files DUMP_DIR/big and RESTORE_DIR/DUMP_SUBDIR/big are identical
-Files DUMP_DIR/small and RESTORE_DIR/DUMP_SUBDIR/small are identical
-Files DUMP_DIR/sub/a and RESTORE_DIR/DUMP_SUBDIR/sub/a are identical
-Files DUMP_DIR/sub/a00 and RESTORE_DIR/DUMP_SUBDIR/sub/a00 are identical
-Files DUMP_DIR/sub/a000 and RESTORE_DIR/DUMP_SUBDIR/sub/a000 are identical
-Files DUMP_DIR/sub/b and RESTORE_DIR/DUMP_SUBDIR/sub/b are identical
-Files DUMP_DIR/sub/b00 and RESTORE_DIR/DUMP_SUBDIR/sub/b00 are identical
-Files DUMP_DIR/sub/big and RESTORE_DIR/DUMP_SUBDIR/sub/big are identical
-Files DUMP_DIR/sub/c and RESTORE_DIR/DUMP_SUBDIR/sub/c are identical
-Files DUMP_DIR/sub/c00 and RESTORE_DIR/DUMP_SUBDIR/sub/c00 are identical
-Files DUMP_DIR/sub/d and RESTORE_DIR/DUMP_SUBDIR/sub/d are identical
-Files DUMP_DIR/sub/d00 and RESTORE_DIR/DUMP_SUBDIR/sub/d00 are identical
-Files DUMP_DIR/sub/e and RESTORE_DIR/DUMP_SUBDIR/sub/e are identical
-Files DUMP_DIR/sub/e00 and RESTORE_DIR/DUMP_SUBDIR/sub/e00 are identical
-Files DUMP_DIR/sub/e000 and RESTORE_DIR/DUMP_SUBDIR/sub/e000 are identical
-Files DUMP_DIR/sub/f and RESTORE_DIR/DUMP_SUBDIR/sub/f are identical
-Files DUMP_DIR/sub/f00 and RESTORE_DIR/DUMP_SUBDIR/sub/f00 are identical
-Files DUMP_DIR/sub/g and RESTORE_DIR/DUMP_SUBDIR/sub/g are identical
-Files DUMP_DIR/sub/g00 and RESTORE_DIR/DUMP_SUBDIR/sub/g00 are identical
-Files DUMP_DIR/sub/h and RESTORE_DIR/DUMP_SUBDIR/sub/h are identical
-Files DUMP_DIR/sub/h00 and RESTORE_DIR/DUMP_SUBDIR/sub/h00 are identical
-Files DUMP_DIR/sub/h000 and RESTORE_DIR/DUMP_SUBDIR/sub/h000 are identical
-Files DUMP_DIR/sub/i and RESTORE_DIR/DUMP_SUBDIR/sub/i are identical
-Files DUMP_DIR/sub/i00 and RESTORE_DIR/DUMP_SUBDIR/sub/i00 are identical
-Files DUMP_DIR/sub/j and RESTORE_DIR/DUMP_SUBDIR/sub/j are identical
-Files DUMP_DIR/sub/j00 and RESTORE_DIR/DUMP_SUBDIR/sub/j00 are identical
-Files DUMP_DIR/sub/k and RESTORE_DIR/DUMP_SUBDIR/sub/k are identical
-Files DUMP_DIR/sub/k00 and RESTORE_DIR/DUMP_SUBDIR/sub/k00 are identical
-Files DUMP_DIR/sub/k000 and RESTORE_DIR/DUMP_SUBDIR/sub/k000 are identical
-Files DUMP_DIR/sub/l and RESTORE_DIR/DUMP_SUBDIR/sub/l are identical
-Files DUMP_DIR/sub/l00 and RESTORE_DIR/DUMP_SUBDIR/sub/l00 are identical
-Files DUMP_DIR/sub/m and RESTORE_DIR/DUMP_SUBDIR/sub/m are identical
-Files DUMP_DIR/sub/m00 and RESTORE_DIR/DUMP_SUBDIR/sub/m00 are identical
-Files DUMP_DIR/sub/n and RESTORE_DIR/DUMP_SUBDIR/sub/n are identical
-Files DUMP_DIR/sub/n00 and RESTORE_DIR/DUMP_SUBDIR/sub/n00 are identical
-Files DUMP_DIR/sub/small and RESTORE_DIR/DUMP_SUBDIR/sub/small are identical
-Comparing listing of dump directory with restore directory
-Files TMP.dump_dir and TMP.restore_dir are identical
#! /bin/sh
-# XFS QA Test No. 024
+# FS QA Test No. 024
#
# Test out incremental dumps
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
_require_tape $TAPE_DEV
_create_dumpdir_fill
#! /bin/sh
-# XFS QA Test No. 025
+# FS QA Test No. 025
#
# Test dump/restore using -m option (min strategy)
#
. ./common.rc
. ./common.dump
+# link correct .out file
+_link_out_file $seq.out
+
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_require_tape $TAPE_DEV
_create_dumpdir_fill
-QA output created by 025
-Put scsi tape driver into variable block size mode
-Creating directory system to dump using src/fill.
-Setup ....................................
-Erasing tape
-Dumping to tape...
-xfsdump -m -b 1048576 -l0 -f TAPE_DEV -M stress_tape_media -L stress_025 SCRATCH_MNT
-xfsdump: using minimum scsi tape (drive_minrmt) strategy
-xfsdump: level 0 dump of HOSTNAME:SCRATCH_MNT
-xfsdump: dump date: DATE
-xfsdump: session id: ID
-xfsdump: session label: "stress_025"
-xfsdump: ino map phase 1: skipping (no subtrees specified)
-xfsdump: ino map phase 2: constructing initial dump list
-xfsdump: ino map phase 3: skipping (no pruning necessary)
-xfsdump: ino map phase 4: skipping (size estimated in phase 2)
-xfsdump: ino map phase 5: skipping (only one dump stream)
-xfsdump: ino map construction complete
-xfsdump: estimated dump size: NUM bytes
-xfsdump: /var/xfsdump/inventory created
-xfsdump: preparing drive
-xfsdump: creating dump session media file 0 (media 0, file 0)
-xfsdump: dumping ino map
-xfsdump: dumping directories
-xfsdump: dumping non-directory files
-xfsdump: ending media file
-xfsdump: media file size NUM bytes
-xfsdump: dumping session inventory
-xfsdump: beginning inventory media file
-xfsdump: media file 1 (media 0, file 1)
-xfsdump: ending inventory media file
-xfsdump: inventory media file size NUM bytes
-xfsdump: dump size (non-dir files) : NUM bytes
-xfsdump: dump complete: SECS seconds elapsed
-xfsdump: Dump Status: SUCCESS
-Rewinding tape
-Restoring from tape...
-xfsrestore -m -b 1048576 -f TAPE_DEV -L stress_025 RESTORE_DIR
-xfsrestore: using minimum scsi tape (drive_minrmt) strategy
-xfsrestore: using online session inventory
-xfsrestore: searching media for directory dump
-xfsrestore: preparing drive
-xfsrestore: examining media file 0
-xfsrestore: reading directories
-xfsrestore: 3 directories and 38 entries processed
-xfsrestore: directory post-processing
-xfsrestore: restoring non-directory files
-xfsrestore: restore complete: SECS seconds elapsed
-xfsrestore: Restore Status: SUCCESS
-Comparing dump directory with restore directory
-Files DUMP_DIR/big and RESTORE_DIR/DUMP_SUBDIR/big are identical
-Files DUMP_DIR/small and RESTORE_DIR/DUMP_SUBDIR/small are identical
-Files DUMP_DIR/sub/a and RESTORE_DIR/DUMP_SUBDIR/sub/a are identical
-Files DUMP_DIR/sub/a00 and RESTORE_DIR/DUMP_SUBDIR/sub/a00 are identical
-Files DUMP_DIR/sub/a000 and RESTORE_DIR/DUMP_SUBDIR/sub/a000 are identical
-Files DUMP_DIR/sub/b and RESTORE_DIR/DUMP_SUBDIR/sub/b are identical
-Files DUMP_DIR/sub/b00 and RESTORE_DIR/DUMP_SUBDIR/sub/b00 are identical
-Files DUMP_DIR/sub/big and RESTORE_DIR/DUMP_SUBDIR/sub/big are identical
-Files DUMP_DIR/sub/c and RESTORE_DIR/DUMP_SUBDIR/sub/c are identical
-Files DUMP_DIR/sub/c00 and RESTORE_DIR/DUMP_SUBDIR/sub/c00 are identical
-Files DUMP_DIR/sub/d and RESTORE_DIR/DUMP_SUBDIR/sub/d are identical
-Files DUMP_DIR/sub/d00 and RESTORE_DIR/DUMP_SUBDIR/sub/d00 are identical
-Files DUMP_DIR/sub/e and RESTORE_DIR/DUMP_SUBDIR/sub/e are identical
-Files DUMP_DIR/sub/e00 and RESTORE_DIR/DUMP_SUBDIR/sub/e00 are identical
-Files DUMP_DIR/sub/e000 and RESTORE_DIR/DUMP_SUBDIR/sub/e000 are identical
-Files DUMP_DIR/sub/f and RESTORE_DIR/DUMP_SUBDIR/sub/f are identical
-Files DUMP_DIR/sub/f00 and RESTORE_DIR/DUMP_SUBDIR/sub/f00 are identical
-Files DUMP_DIR/sub/g and RESTORE_DIR/DUMP_SUBDIR/sub/g are identical
-Files DUMP_DIR/sub/g00 and RESTORE_DIR/DUMP_SUBDIR/sub/g00 are identical
-Files DUMP_DIR/sub/h and RESTORE_DIR/DUMP_SUBDIR/sub/h are identical
-Files DUMP_DIR/sub/h00 and RESTORE_DIR/DUMP_SUBDIR/sub/h00 are identical
-Files DUMP_DIR/sub/h000 and RESTORE_DIR/DUMP_SUBDIR/sub/h000 are identical
-Files DUMP_DIR/sub/i and RESTORE_DIR/DUMP_SUBDIR/sub/i are identical
-Files DUMP_DIR/sub/i00 and RESTORE_DIR/DUMP_SUBDIR/sub/i00 are identical
-Files DUMP_DIR/sub/j and RESTORE_DIR/DUMP_SUBDIR/sub/j are identical
-Files DUMP_DIR/sub/j00 and RESTORE_DIR/DUMP_SUBDIR/sub/j00 are identical
-Files DUMP_DIR/sub/k and RESTORE_DIR/DUMP_SUBDIR/sub/k are identical
-Files DUMP_DIR/sub/k00 and RESTORE_DIR/DUMP_SUBDIR/sub/k00 are identical
-Files DUMP_DIR/sub/k000 and RESTORE_DIR/DUMP_SUBDIR/sub/k000 are identical
-Files DUMP_DIR/sub/l and RESTORE_DIR/DUMP_SUBDIR/sub/l are identical
-Files DUMP_DIR/sub/l00 and RESTORE_DIR/DUMP_SUBDIR/sub/l00 are identical
-Files DUMP_DIR/sub/m and RESTORE_DIR/DUMP_SUBDIR/sub/m are identical
-Files DUMP_DIR/sub/m00 and RESTORE_DIR/DUMP_SUBDIR/sub/m00 are identical
-Files DUMP_DIR/sub/n and RESTORE_DIR/DUMP_SUBDIR/sub/n are identical
-Files DUMP_DIR/sub/n00 and RESTORE_DIR/DUMP_SUBDIR/sub/n00 are identical
-Files DUMP_DIR/sub/small and RESTORE_DIR/DUMP_SUBDIR/sub/small are identical
-Only in SCRATCH_MNT: RESTORE_SUBDIR
#! /bin/sh
-# XFS QA Test No. 026
+# FS QA Test No. 026
#
# Test xfsdump/xfsrestore to a dump file (as opposed to a tape)
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_create_dumpdir_fill
_do_dump_file
-#! /bin/sh
-# XFS QA Test No. 027
+#! /bin/sh -x
+# FS QA Test No. 027
#
# Test out "xfsdump | xfsrestore"
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_create_dumpdir_fill
_do_dump_restore
xfsrestore: level: 0
xfsrestore: session label: ""
xfsrestore: media label: ""
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: 3 directories and 39 entries processed
#! /bin/sh
-# XFS QA Test No. 028
+# FS QA Test No. 028
#
# To test out xfsinvutil
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
# wipe test dir clean first
# so dump can be real quick
# to see if it did the job
#
_dump_inventory
-_do_invutil -n
+_do_invutil -F
_dump_inventory
xfsdump: dump complete: SECS seconds elapsed
xfsdump: Dump Status: SUCCESS
file system 0:
- fs id: ID
+ fs ID: ID
session 0:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 1:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 2:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 3:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 4:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
xfsdump: Dump Status: SUCCESS
Processing file /var/xfsdump/inventory/UUIDstab
Found entry for HOSTNAME:SCRATCH_MNT
Session 3: HOSTNAME:SCRATCH_MNT
Session 4: HOSTNAME:SCRATCH_MNT
file system 0:
- fs id: ID
+ fs ID: ID
session 0:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 1:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
xfsdump: Dump Status: SUCCESS
#! /bin/sh
-# XFS QA Test No. 029
+# FS QA Test No. 029
#
# exercise mkfs log (internal/external) zeroing
#
}
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+
_require_scratch
echo
#! /bin/sh
-# XFS QA Test No. 030
+# FS QA Test No. 030
#
# exercise xfs_repair repairing broken filesystems
#
_cleanup()
{
+ cd /
umount $SCRATCH_DEV 2>/dev/null
rm -f $tmp.*
}
. ./common.filter
. ./common.repair
+# link correct .out file
+_link_out_file $seq.out
+
# nuke the superblock, AGI, AGF, AGFL; then try repair the damage
#
_check_ag()
}
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
_require_nobigloopfs
_require_scratch
# now kick off the real repair test...
#
_scratch_mkfs_xfs $DSIZE | _filter_mkfs 2>$tmp.mkfs
-source $tmp.mkfs
+. $tmp.mkfs
_check_ag 0
_check_ag -1
+++ /dev/null
-QA output created by 030
-meta-data=DDEV isize=XXX agcount=N, agsize=XXX blks
-data = bsize=XXX blocks=XXX, imaxpct=PCT
- = sunit=XXX swidth=XXX, unwritten=X
-naming =VERN bsize=XXX
-log =LDEV bsize=XXX blocks=XXX
-realtime =RDEV extsz=XXX blocks=XXX, rtextents=XXX
-Corrupting sb 0 - setting bits to 0
-Wrote X.XXKb (value 0x0)
-Phase 1 - find and verify superblock...
-bad primary superblock - bad magic number !!!
-
-attempting to find secondary superblock...
-found candidate secondary superblock...
-verified secondary superblock...
-writing modified primary superblock
-sb root inode value INO inconsistent with calculated value INO
-resetting superblock root inode pointer to INO
-sb realtime bitmap inode INO inconsistent with calculated value INO
-resetting superblock realtime bitmap ino pointer to INO
-sb realtime summary inode INO inconsistent with calculated value INO
-resetting superblock realtime summary ino pointer to INO
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-Note - stripe unit (0) and width (0) fields have been reset.
-Please set with mount -o sunit=<value>,swidth=<value>
-done
-Corrupting agf 0 - setting bits to 0
-Wrote X.XXKb (value 0x0)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
-bad magic # 0x0 for agf 0
-bad version # 0 for agf 0
-bad length 0 for agf 0, should be LENGTH
-reset bad agf for ag 0
-bad agbno AGBNO for btbno root, agno 0
-bad agbno AGBNO for btbcnt root, agno 0
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-done
-Corrupting agi 0 - setting bits to 0
-Wrote X.XXKb (value 0x0)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
-bad magic # 0x0 for agi 0
-bad version # 0 for agi 0
-bad length # 0 for agi 0, should be LENGTH
-reset bad agi for ag 0
-bad agbno AGBNO for inobt root, agno 0
-root inode chunk not found
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
-error following ag 0 unlinked list
- - process known inodes and perform inode discovery...
-imap claims in-use inode INO is free, correcting imap
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-done
-Corrupting agfl 0 - setting bits to 0
-Wrote X.XXKb (value 0x0)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-done
-Corrupting sb 0 - setting bits to -1
-Wrote X.XXKb (value 0xffffffff)
-Phase 1 - find and verify superblock...
-bad primary superblock - bad magic number !!!
-
-attempting to find secondary superblock...
-found candidate secondary superblock...
-verified secondary superblock...
-writing modified primary superblock
-sb root inode value INO inconsistent with calculated value INO
-resetting superblock root inode pointer to INO
-sb realtime bitmap inode INO inconsistent with calculated value INO
-resetting superblock realtime bitmap ino pointer to INO
-sb realtime summary inode INO inconsistent with calculated value INO
-resetting superblock realtime summary ino pointer to INO
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-Note - stripe unit (0) and width (0) fields have been reset.
-Please set with mount -o sunit=<value>,swidth=<value>
-done
-Corrupting agf 0 - setting bits to -1
-Wrote X.XXKb (value 0xffffffff)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
-bad magic # 0xffffffff for agf 0
-bad version # -1 for agf 0
-bad sequence # -1 for agf 0
-bad length -1 for agf 0, should be LENGTH
-flfirst -1 in agf 0 too large (max = MAX)
-fllast -1 in agf 0 too large (max = MAX)
-reset bad agf for ag 0
-freeblk count 1 != flcount -1 in ag 0
-bad agbno AGBNO for btbno root, agno 0
-bad agbno AGBNO for btbcnt root, agno 0
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-done
-Corrupting agi 0 - setting bits to -1
-Wrote X.XXKb (value 0xffffffff)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
-bad magic # 0xffffffff for agi 0
-bad version # -1 for agi 0
-bad sequence # -1 for agi 0
-bad length # -1 for agi 0, should be LENGTH
-reset bad agi for ag 0
-bad agbno AGBNO for inobt root, agno 0
-root inode chunk not found
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
-imap claims in-use inode INO is free, correcting imap
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-done
-Corrupting agfl 0 - setting bits to -1
-Wrote X.XXKb (value 0xffffffff)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-done
#! /bin/sh
-# XFS QA Test No. 031
+# FS QA Test No. 031
#
# exercise xfs_repair - ensure repeated use doesn't corrupt
#
. ./common.rc
. ./common.filter
+# link correct .out file
+_link_out_file $seq.out
+
_check_repair()
{
_scratch_xfs_repair >$tmp.0 2>&1
}
# real QA test starts here
-#
+_supported_fs xfs
+_supported_os IRIX Linux
+
_require_nobigloopfs
_require_scratch
echo "=== version 1, one entry"
_scratch_mkfs_xfs $MKFSV1 >$tmp.mkfs0 2>&1
_filter_mkfs <$tmp.mkfs0 >/dev/null 2>$tmp.mkfs
-source $tmp.mkfs
+. $tmp.mkfs
_check_repair
echo "=== version 2, one entry (shortform)"
_scratch_mkfs_xfs $MKFSV2 | _filter_mkfs >/dev/null 2>&1
+++ /dev/null
-QA output created by 031
-=== version 1, one entry
-Repairing, iteration 1
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 2
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 3
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 4
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-
-=== version 2, one entry (shortform)
-Repairing, iteration 1
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 2
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 3
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 4
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-
-=== version 1, twenty entries
-Repairing, iteration 1
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 2
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 3
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 4
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-
-=== version 2, twenty entries (block form)
-Repairing, iteration 1
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-rebuilding directory inode INO
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 2
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-rebuilding directory inode INO
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 3
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-rebuilding directory inode INO
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 4
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-rebuilding directory inode INO
-Phase 7 - verify and correct link counts...
-done
-
-=== version 1, thousand entries
-Repairing, iteration 1
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 2
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 3
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 4
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-Phase 7 - verify and correct link counts...
-done
-
-=== version 2, thousand entries (leaf form)
-Repairing, iteration 1
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-rebuilding directory inode INO
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 2
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-rebuilding directory inode INO
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 3
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-rebuilding directory inode INO
-Phase 7 - verify and correct link counts...
-done
-Repairing, iteration 4
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
-Phase 3 - for each AG...
-Phase 4 - check for duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
-Phase 6 - check inode connectivity...
-rebuilding directory inode INO
-Phase 7 - verify and correct link counts...
-done
-
#! /bin/sh
-# XFS QA Test No. 032
+# FS QA Test No. 032
#
# cross check mkfs detection of foreign filesystems
#
. ./common.filter
# real QA test starts here
-#
+_supported_fs xfs
+_supported_os Linux
+
_require_nobigloopfs
_require_scratch
#! /bin/sh
-# XFS QA Test No. 033
+# FS QA Test No. 033
#
# exercise xfs_repair repairing broken filesystems (root inodes)
#
_cleanup()
{
- umount $SCRATCH_DEV 2>/dev/null
- rm -f $tmp.*
+ cd /
+ umount $SCRATCH_DEV 2>/dev/null
+ rm -f $tmp.*
}
trap "_cleanup; exit \$status" 0 1 2 3 15
. ./common.filter
. ./common.repair
+# link correct .out file
+_link_out_file $seq.out
+
# nuke the root, rt bitmap, and rt summary inodes
#
_check_root_inos()
}
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
_require_nobigloopfs
_require_scratch
# devzero blows away 512byte blocks, so make 512byte inodes (at least)
_scratch_mkfs_xfs | _filter_mkfs 2>$tmp.mkfs
-source $tmp.mkfs
+. $tmp.mkfs
[ $isize -lt 512 ] && \
_scratch_mkfs_xfs -isize=512 | _filter_mkfs >/dev/null 2>&1
+++ /dev/null
-QA output created by 033
-meta-data=DDEV isize=XXX agcount=N, agsize=XXX blks
-data = bsize=XXX blocks=XXX, imaxpct=PCT
- = sunit=XXX swidth=XXX, unwritten=X
-naming =VERN bsize=XXX
-log =LDEV bsize=XXX blocks=XXX
-realtime =RDEV extsz=XXX blocks=XXX, rtextents=XXX
-Corrupting root inode - setting bits to 0
-Wrote X.XXKb (value 0x0)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
-bad magic number 0x0 on inode INO
-bad version number 0x0 on inode INO
-bad magic number 0x0 on inode INO, resetting magic number
-bad version number 0x0 on inode INO, resetting version number
-imap claims a free inode INO is in use, correcting imap and clearing inode
-cleared root inode INO
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
-root inode lost
- - clear lost+found (if it exists) ...
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
-reinitializing root directory
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-resetting inode INO nlinks from 2 to 3
-done
-Corrupting rt bitmap inode - setting bits to 0
-Wrote X.XXKb (value 0x0)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
-bad magic number 0x0 on inode INO
-bad version number 0x0 on inode INO
-bad magic number 0x0 on inode INO, resetting magic number
-bad version number 0x0 on inode INO, resetting version number
-imap claims a free inode INO is in use, correcting imap and clearing inode
-cleared realtime bitmap inode INO
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
-reinitializing realtime bitmap inode
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-done
-Corrupting rt summary inode - setting bits to 0
-Wrote X.XXKb (value 0x0)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
-bad magic number 0x0 on inode INO
-bad version number 0x0 on inode INO
-bad magic number 0x0 on inode INO, resetting magic number
-bad version number 0x0 on inode INO, resetting version number
-imap claims a free inode INO is in use, correcting imap and clearing inode
-cleared realtime summary inode INO
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
-reinitializing realtime summary inode
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-Phase 7 - verify and correct link counts...
-done
-Corrupting root inode - setting bits to -1
-Wrote X.XXKb (value 0xffffffff)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
-bad magic number 0xffff on inode INO
-bad version number 0xffffffff on inode INO
-bad (negative) size -1 on inode INO
-bad magic number 0xffff on inode INO, resetting magic number
-bad version number 0xffffffff on inode INO, resetting version number
-bad (negative) size -1 on inode INO
-cleared root inode INO
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
-root inode lost
- - clear lost+found (if it exists) ...
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
-reinitializing root directory
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-disconnected dir inode INO, moving to lost+found
-Phase 7 - verify and correct link counts...
-resetting inode INO nlinks from 2 to 3
-done
-Corrupting rt bitmap inode - setting bits to -1
-Wrote X.XXKb (value 0xffffffff)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
-bad magic number 0xffff on inode INO
-bad version number 0xffffffff on inode INO
-bad (negative) size -1 on inode INO
-bad magic number 0xffff on inode INO, resetting magic number
-bad version number 0xffffffff on inode INO, resetting version number
-bad (negative) size -1 on inode INO
-cleared realtime bitmap inode INO
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
-reinitializing realtime bitmap inode
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-disconnected dir inode INO, moving to lost+found
-Phase 7 - verify and correct link counts...
-done
-Corrupting rt summary inode - setting bits to -1
-Wrote X.XXKb (value 0xffffffff)
-Phase 1 - find and verify superblock...
-Phase 2 - using <TYPEOF> log
- - zero log...
- - scan filesystem freespace and inode maps...
- - found root inode chunk
-Phase 3 - for each AG...
- - scan and clear agi unlinked lists...
- - process known inodes and perform inode discovery...
-bad magic number 0xffff on inode INO
-bad version number 0xffffffff on inode INO
-bad (negative) size -1 on inode INO
-bad magic number 0xffff on inode INO, resetting magic number
-bad version number 0xffffffff on inode INO, resetting version number
-bad (negative) size -1 on inode INO
-cleared realtime summary inode INO
- - process newly discovered inodes...
-Phase 4 - check for duplicate blocks...
- - setting up duplicate extent list...
- - clear lost+found (if it exists) ...
- - clearing existing "lost+found" inode
- - deleting existing "lost+found" entry
- - check for inodes claiming duplicate blocks...
-Phase 5 - rebuild AG headers and trees...
- - reset superblock...
-Phase 6 - check inode connectivity...
-reinitializing realtime summary inode
- - resetting contents of realtime bitmap and summary inodes
- - ensuring existence of lost+found directory
- - traversing filesystem starting at / ...
- - traversal finished ...
- - traversing all unattached subtrees ...
- - traversals finished ...
- - moving disconnected inodes to lost+found ...
-disconnected dir inode INO, moving to lost+found
-Phase 7 - verify and correct link counts...
-done
#! /bin/sh
-# XFS QA Test No. 034
+# FS QA Test No. 034
#
# pv 801241 - check for reference leaks from the *handle xfsctls
#
_cleanup()
{
+ cd /
+ rm -f $tmp.*
echo "*** unmount"
umount $SCRATCH_MNT 2>/dev/null
}
-trap "_cleanup; rm -f $tmp.*; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
_require_scratch
#! /bin/sh
-# XFS QA Test No. 035
+# FS QA Test No. 035
#
# Test doing multiple dumps to tape and restoring the 2nd one
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_require_tape $TAPE_DEV
_create_dumpdir_fill
#! /bin/sh
-# XFS QA Test No. 036
+# FS QA Test No. 036
#
# Test xfsdump/restore minrmt to a remote IRIX tape
#
. ./common.rc
. ./common.dump
+# link correct .out file
+_link_out_file $seq.out
+
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_require_tape $RMT_IRIXTAPE_DEV
_create_dumpdir_fill
+++ /dev/null
-QA output created by 036
-Creating directory system to dump using src/fill.
-Setup ....................................
-Erasing tape
-Dumping to tape...
-xfsdump -o -F -m -b 1048576 -l0 -f TAPE_DEV -M stress_tape_media -L stress_036 SCRATCH_MNT
-xfsdump: using minimum scsi tape (drive_minrmt) strategy
-xfsdump: level 0 dump of HOSTNAME:SCRATCH_MNT
-xfsdump: dump date: DATE
-xfsdump: session id: ID
-xfsdump: session label: "stress_036"
-xfsdump: ino map phase 1: skipping (no subtrees specified)
-xfsdump: ino map phase 2: constructing initial dump list
-xfsdump: ino map phase 3: skipping (no pruning necessary)
-xfsdump: ino map phase 4: skipping (size estimated in phase 2)
-xfsdump: ino map phase 5: skipping (only one dump stream)
-xfsdump: ino map construction complete
-xfsdump: estimated dump size: NUM bytes
-xfsdump: /var/xfsdump/inventory created
-xfsdump: preparing drive
-xfsdump: WARNING: media may contain data. Overwrite option specified
-xfsdump: creating dump session media file 0 (media 0, file 0)
-xfsdump: dumping ino map
-xfsdump: dumping directories
-xfsdump: dumping non-directory files
-xfsdump: ending media file
-xfsdump: media file size NUM bytes
-xfsdump: dumping session inventory
-xfsdump: beginning inventory media file
-xfsdump: media file 1 (media 0, file 1)
-xfsdump: ending inventory media file
-xfsdump: inventory media file size NUM bytes
-xfsdump: dump size (non-dir files) : NUM bytes
-xfsdump: dump complete: SECS seconds elapsed
-xfsdump: Dump Status: SUCCESS
-Rewinding tape
-Restoring from tape...
-xfsrestore -m -b 1048576 -f TAPE_DEV -L stress_036 RESTORE_DIR
-xfsrestore: using minimum scsi tape (drive_minrmt) strategy
-xfsrestore: using online session inventory
-xfsrestore: searching media for directory dump
-xfsrestore: preparing drive
-xfsrestore: examining media file 0
-xfsrestore: reading directories
-xfsrestore: 3 directories and 38 entries processed
-xfsrestore: directory post-processing
-xfsrestore: restoring non-directory files
-xfsrestore: restore complete: SECS seconds elapsed
-xfsrestore: Restore Status: SUCCESS
-Comparing dump directory with restore directory
-Files DUMP_DIR/big and RESTORE_DIR/DUMP_SUBDIR/big are identical
-Files DUMP_DIR/small and RESTORE_DIR/DUMP_SUBDIR/small are identical
-Files DUMP_DIR/sub/a and RESTORE_DIR/DUMP_SUBDIR/sub/a are identical
-Files DUMP_DIR/sub/a00 and RESTORE_DIR/DUMP_SUBDIR/sub/a00 are identical
-Files DUMP_DIR/sub/a000 and RESTORE_DIR/DUMP_SUBDIR/sub/a000 are identical
-Files DUMP_DIR/sub/b and RESTORE_DIR/DUMP_SUBDIR/sub/b are identical
-Files DUMP_DIR/sub/b00 and RESTORE_DIR/DUMP_SUBDIR/sub/b00 are identical
-Files DUMP_DIR/sub/big and RESTORE_DIR/DUMP_SUBDIR/sub/big are identical
-Files DUMP_DIR/sub/c and RESTORE_DIR/DUMP_SUBDIR/sub/c are identical
-Files DUMP_DIR/sub/c00 and RESTORE_DIR/DUMP_SUBDIR/sub/c00 are identical
-Files DUMP_DIR/sub/d and RESTORE_DIR/DUMP_SUBDIR/sub/d are identical
-Files DUMP_DIR/sub/d00 and RESTORE_DIR/DUMP_SUBDIR/sub/d00 are identical
-Files DUMP_DIR/sub/e and RESTORE_DIR/DUMP_SUBDIR/sub/e are identical
-Files DUMP_DIR/sub/e00 and RESTORE_DIR/DUMP_SUBDIR/sub/e00 are identical
-Files DUMP_DIR/sub/e000 and RESTORE_DIR/DUMP_SUBDIR/sub/e000 are identical
-Files DUMP_DIR/sub/f and RESTORE_DIR/DUMP_SUBDIR/sub/f are identical
-Files DUMP_DIR/sub/f00 and RESTORE_DIR/DUMP_SUBDIR/sub/f00 are identical
-Files DUMP_DIR/sub/g and RESTORE_DIR/DUMP_SUBDIR/sub/g are identical
-Files DUMP_DIR/sub/g00 and RESTORE_DIR/DUMP_SUBDIR/sub/g00 are identical
-Files DUMP_DIR/sub/h and RESTORE_DIR/DUMP_SUBDIR/sub/h are identical
-Files DUMP_DIR/sub/h00 and RESTORE_DIR/DUMP_SUBDIR/sub/h00 are identical
-Files DUMP_DIR/sub/h000 and RESTORE_DIR/DUMP_SUBDIR/sub/h000 are identical
-Files DUMP_DIR/sub/i and RESTORE_DIR/DUMP_SUBDIR/sub/i are identical
-Files DUMP_DIR/sub/i00 and RESTORE_DIR/DUMP_SUBDIR/sub/i00 are identical
-Files DUMP_DIR/sub/j and RESTORE_DIR/DUMP_SUBDIR/sub/j are identical
-Files DUMP_DIR/sub/j00 and RESTORE_DIR/DUMP_SUBDIR/sub/j00 are identical
-Files DUMP_DIR/sub/k and RESTORE_DIR/DUMP_SUBDIR/sub/k are identical
-Files DUMP_DIR/sub/k00 and RESTORE_DIR/DUMP_SUBDIR/sub/k00 are identical
-Files DUMP_DIR/sub/k000 and RESTORE_DIR/DUMP_SUBDIR/sub/k000 are identical
-Files DUMP_DIR/sub/l and RESTORE_DIR/DUMP_SUBDIR/sub/l are identical
-Files DUMP_DIR/sub/l00 and RESTORE_DIR/DUMP_SUBDIR/sub/l00 are identical
-Files DUMP_DIR/sub/m and RESTORE_DIR/DUMP_SUBDIR/sub/m are identical
-Files DUMP_DIR/sub/m00 and RESTORE_DIR/DUMP_SUBDIR/sub/m00 are identical
-Files DUMP_DIR/sub/n and RESTORE_DIR/DUMP_SUBDIR/sub/n are identical
-Files DUMP_DIR/sub/n00 and RESTORE_DIR/DUMP_SUBDIR/sub/n00 are identical
-Files DUMP_DIR/sub/small and RESTORE_DIR/DUMP_SUBDIR/sub/small are identical
-Only in SCRATCH_MNT: RESTORE_SUBDIR
#! /bin/sh
-# XFS QA Test No. 037
+# FS QA Test No. 037
#
# Test xfsdump/restore minrmt to a remote linux tape
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
_require_tape $RMT_TAPE_DEV
_create_dumpdir_fill
#! /bin/sh
-# XFS QA Test No. 038
+# FS QA Test No. 038
#
# Test xfsdump/restore to a remote linux tape
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
_require_tape $RMT_TAPE_DEV
_create_dumpdir_fill
#! /bin/sh
-# XFS QA Test No. 039
+# FS QA Test No. 039
#
# Test xfsdump/restore to a remote IRIX tape
#
. ./common.rc
. ./common.dump
+# link correct .out file
+_link_out_file $seq.out
+
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_require_tape $RMT_IRIXTAPE_DEV
_create_dumpdir_fill
+++ /dev/null
-QA output created by 039
-Creating directory system to dump using src/fill.
-Setup ....................................
-Erasing tape
-Dumping to tape...
-xfsdump -o -F -f TAPE_DEV -M stress_tape_media -L stress_039 SCRATCH_MNT
-xfsdump: using scsi tape (drive_scsitape) strategy
-xfsdump: level 0 dump of HOSTNAME:SCRATCH_MNT
-xfsdump: dump date: DATE
-xfsdump: session id: ID
-xfsdump: session label: "stress_039"
-xfsdump: ino map phase 1: skipping (no subtrees specified)
-xfsdump: ino map phase 2: constructing initial dump list
-xfsdump: ino map phase 3: skipping (no pruning necessary)
-xfsdump: ino map phase 4: skipping (size estimated in phase 2)
-xfsdump: ino map phase 5: skipping (only one dump stream)
-xfsdump: ino map construction complete
-xfsdump: estimated dump size: NUM bytes
-xfsdump: /var/xfsdump/inventory created
-xfsdump: preparing drive
-xfsdump: WARNING: media may contain data. Overwrite option specified
-xfsdump: creating dump session media file 0 (media 0, file 0)
-xfsdump: dumping ino map
-xfsdump: dumping directories
-xfsdump: dumping non-directory files
-xfsdump: ending media file
-xfsdump: media file size NUM bytes
-xfsdump: dumping session inventory
-xfsdump: beginning inventory media file
-xfsdump: media file 1 (media 0, file 1)
-xfsdump: ending inventory media file
-xfsdump: inventory media file size NUM bytes
-xfsdump: writing stream terminator
-xfsdump: beginning media stream terminator
-xfsdump: media file 2 (media 0, file 2)
-xfsdump: ending media stream terminator
-xfsdump: media stream terminator size 245760 bytes
-xfsdump: dump size (non-dir files) : NUM bytes
-xfsdump: dump complete: SECS seconds elapsed
-xfsdump: Dump Status: SUCCESS
-Rewinding tape
-Restoring from tape...
-xfsrestore -f TAPE_DEV -L stress_039 RESTORE_DIR
-xfsrestore: using scsi tape (drive_scsitape) strategy
-xfsrestore: using online session inventory
-xfsrestore: searching media for directory dump
-xfsrestore: preparing drive
-xfsrestore: examining media file 0
-xfsrestore: reading directories
-xfsrestore: 3 directories and 38 entries processed
-xfsrestore: directory post-processing
-xfsrestore: restoring non-directory files
-xfsrestore: restore complete: SECS seconds elapsed
-xfsrestore: Restore Status: SUCCESS
-Comparing dump directory with restore directory
-Files DUMP_DIR/big and RESTORE_DIR/DUMP_SUBDIR/big are identical
-Files DUMP_DIR/small and RESTORE_DIR/DUMP_SUBDIR/small are identical
-Files DUMP_DIR/sub/a and RESTORE_DIR/DUMP_SUBDIR/sub/a are identical
-Files DUMP_DIR/sub/a00 and RESTORE_DIR/DUMP_SUBDIR/sub/a00 are identical
-Files DUMP_DIR/sub/a000 and RESTORE_DIR/DUMP_SUBDIR/sub/a000 are identical
-Files DUMP_DIR/sub/b and RESTORE_DIR/DUMP_SUBDIR/sub/b are identical
-Files DUMP_DIR/sub/b00 and RESTORE_DIR/DUMP_SUBDIR/sub/b00 are identical
-Files DUMP_DIR/sub/big and RESTORE_DIR/DUMP_SUBDIR/sub/big are identical
-Files DUMP_DIR/sub/c and RESTORE_DIR/DUMP_SUBDIR/sub/c are identical
-Files DUMP_DIR/sub/c00 and RESTORE_DIR/DUMP_SUBDIR/sub/c00 are identical
-Files DUMP_DIR/sub/d and RESTORE_DIR/DUMP_SUBDIR/sub/d are identical
-Files DUMP_DIR/sub/d00 and RESTORE_DIR/DUMP_SUBDIR/sub/d00 are identical
-Files DUMP_DIR/sub/e and RESTORE_DIR/DUMP_SUBDIR/sub/e are identical
-Files DUMP_DIR/sub/e00 and RESTORE_DIR/DUMP_SUBDIR/sub/e00 are identical
-Files DUMP_DIR/sub/e000 and RESTORE_DIR/DUMP_SUBDIR/sub/e000 are identical
-Files DUMP_DIR/sub/f and RESTORE_DIR/DUMP_SUBDIR/sub/f are identical
-Files DUMP_DIR/sub/f00 and RESTORE_DIR/DUMP_SUBDIR/sub/f00 are identical
-Files DUMP_DIR/sub/g and RESTORE_DIR/DUMP_SUBDIR/sub/g are identical
-Files DUMP_DIR/sub/g00 and RESTORE_DIR/DUMP_SUBDIR/sub/g00 are identical
-Files DUMP_DIR/sub/h and RESTORE_DIR/DUMP_SUBDIR/sub/h are identical
-Files DUMP_DIR/sub/h00 and RESTORE_DIR/DUMP_SUBDIR/sub/h00 are identical
-Files DUMP_DIR/sub/h000 and RESTORE_DIR/DUMP_SUBDIR/sub/h000 are identical
-Files DUMP_DIR/sub/i and RESTORE_DIR/DUMP_SUBDIR/sub/i are identical
-Files DUMP_DIR/sub/i00 and RESTORE_DIR/DUMP_SUBDIR/sub/i00 are identical
-Files DUMP_DIR/sub/j and RESTORE_DIR/DUMP_SUBDIR/sub/j are identical
-Files DUMP_DIR/sub/j00 and RESTORE_DIR/DUMP_SUBDIR/sub/j00 are identical
-Files DUMP_DIR/sub/k and RESTORE_DIR/DUMP_SUBDIR/sub/k are identical
-Files DUMP_DIR/sub/k00 and RESTORE_DIR/DUMP_SUBDIR/sub/k00 are identical
-Files DUMP_DIR/sub/k000 and RESTORE_DIR/DUMP_SUBDIR/sub/k000 are identical
-Files DUMP_DIR/sub/l and RESTORE_DIR/DUMP_SUBDIR/sub/l are identical
-Files DUMP_DIR/sub/l00 and RESTORE_DIR/DUMP_SUBDIR/sub/l00 are identical
-Files DUMP_DIR/sub/m and RESTORE_DIR/DUMP_SUBDIR/sub/m are identical
-Files DUMP_DIR/sub/m00 and RESTORE_DIR/DUMP_SUBDIR/sub/m00 are identical
-Files DUMP_DIR/sub/n and RESTORE_DIR/DUMP_SUBDIR/sub/n are identical
-Files DUMP_DIR/sub/n00 and RESTORE_DIR/DUMP_SUBDIR/sub/n00 are identical
-Files DUMP_DIR/sub/small and RESTORE_DIR/DUMP_SUBDIR/sub/small are identical
-Only in SCRATCH_MNT: RESTORE_SUBDIR
#! /bin/sh
-# XFS QA Test No. 042
+# FS QA Test No. 042
#
# xfs_fsr QA tests
# create a large fragmented file and check that xfs_fsr doesn't corrupt
. ./common.filter
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_require_scratch
_do "Remove other files" "rm -rf $SCRATCH_MNT/{pad,hole*}"
# defragment
-_do "Run xfs_fsr on filesystem" "xfs_fsr -v $SCRATCH_DEV"
+_do "Run xfs_fsr on filesystem" "$XFS_FSR_PROG -v $SCRATCH_DEV"
_do "xfs_bmap -v $SCRATCH_MNT/fragmented"
_do "Check 4k files" "src/fill2fs_check $tmp.manifest"
#! /bin/sh
-# XFS QA Test No. 043
+# FS QA Test No. 043
#
# Test out xfsdump/restore but rmv inventory prior to restore.
# This checks that the on-disk inventory can be successfully
. ./common.rc
. ./common.dump
+# link correct .out file
+_link_out_file $seq.out
+
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_require_tape $TAPE_DEV
_create_dumpdir_fill
+++ /dev/null
-QA output created by 043
-Put scsi tape driver into variable block size mode
-Creating directory system to dump using src/fill.
-Setup ....................................
-Erasing tape
-Dumping to tape...
-xfsdump -s DUMP_SUBDIR -f TAPE_DEV -M stress_tape_media -L stress_043 SCRATCH_MNT
-xfsdump: using scsi tape (drive_scsitape) strategy
-xfsdump: level 0 dump of HOSTNAME:SCRATCH_MNT
-xfsdump: dump date: DATE
-xfsdump: session id: ID
-xfsdump: session label: "stress_043"
-xfsdump: ino map phase 1: parsing subtree selections
-xfsdump: ino map phase 2: constructing initial dump list
-xfsdump: ino map phase 3: pruning unneeded subtrees
-xfsdump: ino map phase 4: estimating dump size
-xfsdump: ino map phase 5: skipping (only one dump stream)
-xfsdump: ino map construction complete
-xfsdump: estimated dump size: NUM bytes
-xfsdump: /var/xfsdump/inventory created
-xfsdump: preparing drive
-xfsdump: creating dump session media file 0 (media 0, file 0)
-xfsdump: dumping ino map
-xfsdump: dumping directories
-xfsdump: dumping non-directory files
-xfsdump: ending media file
-xfsdump: media file size NUM bytes
-xfsdump: dumping session inventory
-xfsdump: beginning inventory media file
-xfsdump: media file 1 (media 0, file 1)
-xfsdump: ending inventory media file
-xfsdump: inventory media file size NUM bytes
-xfsdump: writing stream terminator
-xfsdump: beginning media stream terminator
-xfsdump: media file 2 (media 0, file 2)
-xfsdump: ending media stream terminator
-xfsdump: media stream terminator size BLOCKSZ bytes
-xfsdump: dump size (non-dir files) : NUM bytes
-xfsdump: dump complete: SECS seconds elapsed
-xfsdump: Dump Status: SUCCESS
-Rewinding tape
-Restoring from tape...
-xfsrestore -f TAPE_DEV -L stress_043 RESTORE_DIR
-xfsrestore: using scsi tape (drive_scsitape) strategy
-xfsrestore: searching media for dump
-xfsrestore: preparing drive
-xfsrestore: examining media file 0
-xfsrestore: found dump matching specified label:
-xfsrestore: hostname: HOSTNAME
-xfsrestore: mount point: SCRATCH_MNT
-xfsrestore: volume: SCRATCH_DEV
-xfsrestore: session time: TIME
-xfsrestore: level: 0
-xfsrestore: session label: "stress_043"
-xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
-xfsrestore: session id: ID
-xfsrestore: media id: ID
-xfsrestore: searching media for directory dump
-xfsrestore: reading directories
-xfsrestore: 3 directories and 38 entries processed
-xfsrestore: directory post-processing
-xfsrestore: restoring non-directory files
-xfsrestore: examining media file 1
-xfsrestore: incorporating on-media session inventory into online inventory
-xfsrestore: /var/xfsdump/inventory created
-xfsrestore: using on-media session inventory
-xfsrestore: restore complete: SECS seconds elapsed
-xfsrestore: Restore Status: SUCCESS
-Comparing dump directory with restore directory
-Files DUMP_DIR/big and RESTORE_DIR/DUMP_SUBDIR/big are identical
-Files DUMP_DIR/small and RESTORE_DIR/DUMP_SUBDIR/small are identical
-Files DUMP_DIR/sub/a and RESTORE_DIR/DUMP_SUBDIR/sub/a are identical
-Files DUMP_DIR/sub/a00 and RESTORE_DIR/DUMP_SUBDIR/sub/a00 are identical
-Files DUMP_DIR/sub/a000 and RESTORE_DIR/DUMP_SUBDIR/sub/a000 are identical
-Files DUMP_DIR/sub/b and RESTORE_DIR/DUMP_SUBDIR/sub/b are identical
-Files DUMP_DIR/sub/b00 and RESTORE_DIR/DUMP_SUBDIR/sub/b00 are identical
-Files DUMP_DIR/sub/big and RESTORE_DIR/DUMP_SUBDIR/sub/big are identical
-Files DUMP_DIR/sub/c and RESTORE_DIR/DUMP_SUBDIR/sub/c are identical
-Files DUMP_DIR/sub/c00 and RESTORE_DIR/DUMP_SUBDIR/sub/c00 are identical
-Files DUMP_DIR/sub/d and RESTORE_DIR/DUMP_SUBDIR/sub/d are identical
-Files DUMP_DIR/sub/d00 and RESTORE_DIR/DUMP_SUBDIR/sub/d00 are identical
-Files DUMP_DIR/sub/e and RESTORE_DIR/DUMP_SUBDIR/sub/e are identical
-Files DUMP_DIR/sub/e00 and RESTORE_DIR/DUMP_SUBDIR/sub/e00 are identical
-Files DUMP_DIR/sub/e000 and RESTORE_DIR/DUMP_SUBDIR/sub/e000 are identical
-Files DUMP_DIR/sub/f and RESTORE_DIR/DUMP_SUBDIR/sub/f are identical
-Files DUMP_DIR/sub/f00 and RESTORE_DIR/DUMP_SUBDIR/sub/f00 are identical
-Files DUMP_DIR/sub/g and RESTORE_DIR/DUMP_SUBDIR/sub/g are identical
-Files DUMP_DIR/sub/g00 and RESTORE_DIR/DUMP_SUBDIR/sub/g00 are identical
-Files DUMP_DIR/sub/h and RESTORE_DIR/DUMP_SUBDIR/sub/h are identical
-Files DUMP_DIR/sub/h00 and RESTORE_DIR/DUMP_SUBDIR/sub/h00 are identical
-Files DUMP_DIR/sub/h000 and RESTORE_DIR/DUMP_SUBDIR/sub/h000 are identical
-Files DUMP_DIR/sub/i and RESTORE_DIR/DUMP_SUBDIR/sub/i are identical
-Files DUMP_DIR/sub/i00 and RESTORE_DIR/DUMP_SUBDIR/sub/i00 are identical
-Files DUMP_DIR/sub/j and RESTORE_DIR/DUMP_SUBDIR/sub/j are identical
-Files DUMP_DIR/sub/j00 and RESTORE_DIR/DUMP_SUBDIR/sub/j00 are identical
-Files DUMP_DIR/sub/k and RESTORE_DIR/DUMP_SUBDIR/sub/k are identical
-Files DUMP_DIR/sub/k00 and RESTORE_DIR/DUMP_SUBDIR/sub/k00 are identical
-Files DUMP_DIR/sub/k000 and RESTORE_DIR/DUMP_SUBDIR/sub/k000 are identical
-Files DUMP_DIR/sub/l and RESTORE_DIR/DUMP_SUBDIR/sub/l are identical
-Files DUMP_DIR/sub/l00 and RESTORE_DIR/DUMP_SUBDIR/sub/l00 are identical
-Files DUMP_DIR/sub/m and RESTORE_DIR/DUMP_SUBDIR/sub/m are identical
-Files DUMP_DIR/sub/m00 and RESTORE_DIR/DUMP_SUBDIR/sub/m00 are identical
-Files DUMP_DIR/sub/n and RESTORE_DIR/DUMP_SUBDIR/sub/n are identical
-Files DUMP_DIR/sub/n00 and RESTORE_DIR/DUMP_SUBDIR/sub/n00 are identical
-Files DUMP_DIR/sub/small and RESTORE_DIR/DUMP_SUBDIR/sub/small are identical
-Comparing listing of dump directory with restore directory
-Files TMP.dump_dir and TMP.restore_dir are identical
#! /bin/sh
-# XFS QA Test No. 044
+# FS QA Test No. 044
#
# external log uuid/format tests (TODO - version 2 log format)
#
. ./common.filter
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
_require_logdev
#! /bin/sh
-# XFS QA Test No. 045
+# FS QA Test No. 045
#
# test mount of two FSes with identical UUID and mount with unknown option
#
}
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
_require_scratch
#! /bin/sh
-# XFS QA Test No. 046
+# FS QA Test No. 046
#
# check on symlinks permissions
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_create_dumpdir_symlinks
_do_dump_file
#! /bin/sh
-# XFS QA Test No. 047
+# FS QA Test No. 047
#
# invutil with interactive responses
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
# wipe test dir clean first
# so dump can be real quick
xfsdump: dump complete: SECS seconds elapsed
xfsdump: Dump Status: SUCCESS
file system 0:
- fs id: ID
+ fs ID: ID
session 0:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 1:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 2:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 3:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 4:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
xfsdump: Dump Status: SUCCESS
Processing file /var/xfsdump/inventory/UUIDstab
Found entry for HOSTNAME:SCRATCH_MNT
DEV PATH : HOSTNAME:SCRATCH_DEV
TIME OF DUMP : TIME
-Do you want to prune this entry: [y/n] -------------------------------------------------
+Do you want to prune this entry: [y/n]
Session 1: HOSTNAME:SCRATCH_MNT
-------------------------------------------------
DEV PATH : HOSTNAME:SCRATCH_DEV
TIME OF DUMP : TIME
-Do you want to prune this entry: [y/n] -------------------------------------------------
+Do you want to prune this entry: [y/n]
Session 2: HOSTNAME:SCRATCH_MNT
-------------------------------------------------
DEV PATH : HOSTNAME:SCRATCH_DEV
TIME OF DUMP : TIME
-Do you want to prune this entry: [y/n] -------------------------------------------------
+Do you want to prune this entry: [y/n]
Session 3: HOSTNAME:SCRATCH_MNT
Session 4: HOSTNAME:SCRATCH_MNT
file system 0:
- fs id: ID
+ fs ID: ID
session 0:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 1:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
session 2:
mount point: HOSTNAME:SCRATCH_MNT
device: HOSTNAME:SCRATCH_DEV
mfile start: ino INO offset 0
mfile end: ino INO offset 0
media label: "stress_tape_media"
- media id: ID
+ media ID: ID
xfsdump: Dump Status: SUCCESS
#! /bin/sh
-# XFS QA Test No. 048
+# FS QA Test No. 048
#
# test return codes from xfsctl on bad userspace address
#
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
-trap "rm -f $tmp.*; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
+_cleanup()
+{
+ cd /
+ rm -f $tmp.*
+ _cleanup_testdir
+}
+
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
+_setup_testdir
-src/fault $TEST_DIR || exit
+src/fault $testdir || exit
# success, all done
status=0
#! /bin/sh
-# XFS QA Test No. 049
+# FS QA Test No. 049
#
# XFS on loop test
#
_cleanup()
{
+ cd /
umount $SCRATCH_MNT/test2 > /dev/null 2>&1
umount $SCRATCH_MNT/test > /dev/null 2>&1
rm -f $tmp.*
. ./common.rc
. ./common.filter
+# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+
_log()
{
echo "--- $*"
_require_scratch
_require_loop
-
-# real QA test starts here
-
rm -f $seq.full
echo "(dev=$SCRATCH_DEV, mount=$SCRATCH_MNT)" >> $seq.full
#! /bin/sh
-# XFS QA Test No. 050
+# FS QA Test No. 050
#
# Exercises basic XFS quota functionality
# MOUNT_OPTIONS env var switches the test type (uid/gid/acct/enfd)
tmp=/tmp/$$
status=1 # failure is the default!
+export MOUNT_OPTIONS=-ousrquota
+
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
_cleanup()
{
+ cd /
echo; echo "*** unmount"
umount $SCRATCH_MNT 2>/dev/null
rm -f $tmp.*
}
trap "_cleanup; exit \$status" 0 1 2 3 15
+
+
+# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+
rm -f $seq.out
cp /dev/null $seq.full
chmod a+rwx $seq.full # arbitrary users will write here
' | _filter_repquota $1
}
-# real QA test starts here
_scratch_mkfs_xfs | _filter_mkfs 2>$tmp.mkfs
cat $tmp.mkfs >>$seq.full
_qmount
repquota -$type $SCRATCH_DEV | _filter_and_check_blocks 7
+
+export -n MOUNT_OPTIONS
+
# success, all done
status=0
exit
#! /bin/sh
-# XFS QA Test No. 051
+# FS QA Test No. 051
#
# Test out ACLs.
#
_cleanup()
{
+ cd /
rm -f $tmp.*
- rm -rf $TEST_DIR/$seq.dir1
+ [ -n "$testdir" ] && rm -rf $testdir/$seq.dir1
+ _cleanup_testdir
}
# -----
# -> this would be done by simultaneously matching on ACEs
# -> interesting if it allows user to specify ACEs in any order
#
+
+# real QA test starts here
+_supported_fs xfs udf
+_supported_os Linux
+
+[ -x /usr/bin/chacl ] || _notrun "chacl executable not found"
+[ -x $runas ] || _notrun "$runas executable not found"
+
rm -f $seq.full
+_setup_testdir
+
_need_to_be_root
_acl_setup_ids
_acl_requirements
-[ -x $runas ] || _notrun "$runas executable not found"
# get dir
-cd $TEST_DIR
+cd $testdir
rm -rf $seq.dir1
mkdir $seq.dir1
cd $seq.dir1
-#-------------------------------------------------------
-# real QA test starts here
echo "QA output created by $seq"
-
echo ""
echo "=== Test minimal ACE ==="
#! /bin/sh
-# XFS QA Test No. 052
+# FS QA Test No. 052
#
# Ensure that quota(1) displays blocksizes matching ondisk dquots.
#
tmp=/tmp/$$
status=1 # failure is the default!
+export MOUNT_OPTIONS=-ousrquota
+
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
_cleanup()
{
+ cd /
umount $SCRATCH_MNT 2>/dev/null
rm -f $tmp.*
}
trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
rm -f $seq.full
_require_scratch
MOUNT_OPTIONS="-o usrquota"; export MOUNT_OPTIONS
fi
-# real QA test starts here
_scratch_mkfs_xfs | _filter_mkfs 2>$tmp.mkfs
cat $tmp.mkfs >>$seq.full
chmod a+w $seq.full # arbitrary users will write here
diff $tmp.quota $tmp.xfs_db
[ $? -eq 0 ] && echo OK.
+export -n MOUNT_OPTIONS
+
# success, all done
status=0
exit
#! /bin/sh
-# XFS QA Test No. 053
+# FS QA Test No. 053
#
# xfs_repair breaks acls
#
. ./common.filter
. ./common.attr
+# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+
[ ! -x /bin/chacl -a ! -x /usr/bin/chacl ] && _notrun "chacl command not found"
-# real QA test starts here
_require_scratch
_acl_setup_ids
_do_die_on_error=y
#! /bin/sh
-# XFS QA Test No. 054
+# FS QA Test No. 054
#
# Check behavior of chown with both user and group quota enabled,
# and changing both user and group together via chown(2).
rm -f $tmp.*
}
trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+
cp /dev/null $seq.full
chmod ugo+rwx $seq.full
umount $SCRATCH_MNT 2>/dev/null
}
-# real QA test starts here
_scratch_mkfs_xfs $SCRATCH_DEV >/dev/null 2>&1
MOUNT_OPTIONS="$MOUNT_OPTIONS -ousrquota,grpquota"; export MOUNT_OPTIONS
_qmount
#! /bin/sh
-# XFS QA Test No. 055
+# FS QA Test No. 055
#
# Test xfsdump/restore to a remote IRIX tape using RMT user
#
. ./common.rc
. ./common.dump
+# link correct .out file
+_link_out_file $seq.out
+
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_require_tape $RMT_TAPE_USER@$RMT_IRIXTAPE_DEV
_create_dumpdir_fill
+++ /dev/null
-QA output created by 055
-Creating directory system to dump using src/fill.
-Setup ....................................
-Erasing tape
-Dumping to tape...
-xfsdump -o -F -f TAPE_DEV -M stress_tape_media -L stress_055 SCRATCH_MNT
-xfsdump: using scsi tape (drive_scsitape) strategy
-xfsdump: level 0 dump of HOSTNAME:SCRATCH_MNT
-xfsdump: dump date: DATE
-xfsdump: session id: ID
-xfsdump: session label: "stress_055"
-xfsdump: ino map phase 1: skipping (no subtrees specified)
-xfsdump: ino map phase 2: constructing initial dump list
-xfsdump: ino map phase 3: skipping (no pruning necessary)
-xfsdump: ino map phase 4: skipping (size estimated in phase 2)
-xfsdump: ino map phase 5: skipping (only one dump stream)
-xfsdump: ino map construction complete
-xfsdump: estimated dump size: NUM bytes
-xfsdump: /var/xfsdump/inventory created
-xfsdump: preparing drive
-xfsdump: WARNING: media may contain data. Overwrite option specified
-xfsdump: creating dump session media file 0 (media 0, file 0)
-xfsdump: dumping ino map
-xfsdump: dumping directories
-xfsdump: dumping non-directory files
-xfsdump: ending media file
-xfsdump: media file size NUM bytes
-xfsdump: dumping session inventory
-xfsdump: beginning inventory media file
-xfsdump: media file 1 (media 0, file 1)
-xfsdump: ending inventory media file
-xfsdump: inventory media file size NUM bytes
-xfsdump: writing stream terminator
-xfsdump: beginning media stream terminator
-xfsdump: media file 2 (media 0, file 2)
-xfsdump: ending media stream terminator
-xfsdump: media stream terminator size 245760 bytes
-xfsdump: dump size (non-dir files) : NUM bytes
-xfsdump: dump complete: SECS seconds elapsed
-xfsdump: Dump Status: SUCCESS
-Rewinding tape
-Restoring from tape...
-xfsrestore -f TAPE_DEV -L stress_055 RESTORE_DIR
-xfsrestore: using scsi tape (drive_scsitape) strategy
-xfsrestore: using online session inventory
-xfsrestore: searching media for directory dump
-xfsrestore: preparing drive
-xfsrestore: examining media file 0
-xfsrestore: reading directories
-xfsrestore: 3 directories and 38 entries processed
-xfsrestore: directory post-processing
-xfsrestore: restoring non-directory files
-xfsrestore: restore complete: SECS seconds elapsed
-xfsrestore: Restore Status: SUCCESS
-Comparing dump directory with restore directory
-Files DUMP_DIR/big and RESTORE_DIR/DUMP_SUBDIR/big are identical
-Files DUMP_DIR/small and RESTORE_DIR/DUMP_SUBDIR/small are identical
-Files DUMP_DIR/sub/a and RESTORE_DIR/DUMP_SUBDIR/sub/a are identical
-Files DUMP_DIR/sub/a00 and RESTORE_DIR/DUMP_SUBDIR/sub/a00 are identical
-Files DUMP_DIR/sub/a000 and RESTORE_DIR/DUMP_SUBDIR/sub/a000 are identical
-Files DUMP_DIR/sub/b and RESTORE_DIR/DUMP_SUBDIR/sub/b are identical
-Files DUMP_DIR/sub/b00 and RESTORE_DIR/DUMP_SUBDIR/sub/b00 are identical
-Files DUMP_DIR/sub/big and RESTORE_DIR/DUMP_SUBDIR/sub/big are identical
-Files DUMP_DIR/sub/c and RESTORE_DIR/DUMP_SUBDIR/sub/c are identical
-Files DUMP_DIR/sub/c00 and RESTORE_DIR/DUMP_SUBDIR/sub/c00 are identical
-Files DUMP_DIR/sub/d and RESTORE_DIR/DUMP_SUBDIR/sub/d are identical
-Files DUMP_DIR/sub/d00 and RESTORE_DIR/DUMP_SUBDIR/sub/d00 are identical
-Files DUMP_DIR/sub/e and RESTORE_DIR/DUMP_SUBDIR/sub/e are identical
-Files DUMP_DIR/sub/e00 and RESTORE_DIR/DUMP_SUBDIR/sub/e00 are identical
-Files DUMP_DIR/sub/e000 and RESTORE_DIR/DUMP_SUBDIR/sub/e000 are identical
-Files DUMP_DIR/sub/f and RESTORE_DIR/DUMP_SUBDIR/sub/f are identical
-Files DUMP_DIR/sub/f00 and RESTORE_DIR/DUMP_SUBDIR/sub/f00 are identical
-Files DUMP_DIR/sub/g and RESTORE_DIR/DUMP_SUBDIR/sub/g are identical
-Files DUMP_DIR/sub/g00 and RESTORE_DIR/DUMP_SUBDIR/sub/g00 are identical
-Files DUMP_DIR/sub/h and RESTORE_DIR/DUMP_SUBDIR/sub/h are identical
-Files DUMP_DIR/sub/h00 and RESTORE_DIR/DUMP_SUBDIR/sub/h00 are identical
-Files DUMP_DIR/sub/h000 and RESTORE_DIR/DUMP_SUBDIR/sub/h000 are identical
-Files DUMP_DIR/sub/i and RESTORE_DIR/DUMP_SUBDIR/sub/i are identical
-Files DUMP_DIR/sub/i00 and RESTORE_DIR/DUMP_SUBDIR/sub/i00 are identical
-Files DUMP_DIR/sub/j and RESTORE_DIR/DUMP_SUBDIR/sub/j are identical
-Files DUMP_DIR/sub/j00 and RESTORE_DIR/DUMP_SUBDIR/sub/j00 are identical
-Files DUMP_DIR/sub/k and RESTORE_DIR/DUMP_SUBDIR/sub/k are identical
-Files DUMP_DIR/sub/k00 and RESTORE_DIR/DUMP_SUBDIR/sub/k00 are identical
-Files DUMP_DIR/sub/k000 and RESTORE_DIR/DUMP_SUBDIR/sub/k000 are identical
-Files DUMP_DIR/sub/l and RESTORE_DIR/DUMP_SUBDIR/sub/l are identical
-Files DUMP_DIR/sub/l00 and RESTORE_DIR/DUMP_SUBDIR/sub/l00 are identical
-Files DUMP_DIR/sub/m and RESTORE_DIR/DUMP_SUBDIR/sub/m are identical
-Files DUMP_DIR/sub/m00 and RESTORE_DIR/DUMP_SUBDIR/sub/m00 are identical
-Files DUMP_DIR/sub/n and RESTORE_DIR/DUMP_SUBDIR/sub/n are identical
-Files DUMP_DIR/sub/n00 and RESTORE_DIR/DUMP_SUBDIR/sub/n00 are identical
-Files DUMP_DIR/sub/small and RESTORE_DIR/DUMP_SUBDIR/sub/small are identical
-Only in SCRATCH_MNT: RESTORE_SUBDIR
#! /bin/sh
-# XFS QA Test No. 056
+# FS QA Test No. 056
#
# Test xfsdump/xfsrestore to a dump file (as opposed to a tape)
# and test restoring various permissions/modes
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_create_dumpdir_fill_perm
_do_dump_file
#! /bin/sh
-# XFS QA Test No. 057
+# FS QA Test No. 057
#
-# Test out the different acl_get semantics
+# Place holder for test 075. Test out the different acl_get semantics
#
#-----------------------------------------------------------------------
# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
# creator
owner=tes@sgi.com
-# get standard environment, filters and checks
-. ./common.rc
-. ./common.filter
-. ./common.attr
-
seq=`basename $0`
echo "QA output created by $seq"
-_cleanup()
-{
- rm -f $tmp.*
- rm -rf $TEST_DIR/$seq.dir1
-}
-
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
-trap "_cleanup; exit \$status" 0 1 2 3 15
+trap "rm -f $tmp.*; exit \$status" 0 1 2 3 15
-acl_get=$here/src/acl_get
-
-_get_file()
-{
- _file=$1
-
- ls -ln $_file | awk '{ print $1, $3, $4, $NF }'
- echo ""
-
- echo "access, default, irix-semantics"
- $acl_get -adi $_file
- echo ""
-
- echo "access, default, linux-semantics"
- $acl_get -ad $_file
- echo ""
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
- echo "access, fd, irix-semantics"
- $acl_get -afi $_file
- echo ""
+_notrun "Place holder for IRIX test 057"
- echo "access, fd, linux-semantics"
- $acl_get -af $_file
- echo ""
-}
+# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX
-_acl_on()
-{
- # test if acl_get syscall is operational
- # and hence the ACL config has been turned on
- touch syscalltest
- if $acl_get -l syscalltest 2>&1 | tee -a $here/$seq.full \
- | grep 'Function not implemented' >/dev/null
- then
- cd $here
- _notrun "requires kernel ACL support"
- fi
-}
+# success, all done
+status=0
+exit
-# real QA test starts here
-[ `uname` = Linux ] && _notrun "IRIX acl_get semantics no longer required"
-rm -f $seq.full
-_need_to_be_root
-[ -x $acl_get ] || _notrun "$acl_get command not found"
-[ ! -x /bin/chacl -a ! -x /usr/bin/chacl ] && _notrun "chacl command not found"
-# get dir
-cd $TEST_DIR
-rm -rf $seq.dir1
-mkdir $seq.dir1
-cd $seq.dir1
-_acl_on
-touch file1
-chmod 752 file1
-_get_file file1
-# ensure that full blown acls' get/set work, not just minimal ones
-_acl_setup_ids
-chacl u::rwx,g::rw-,o::---,u:$acl1:r-x,g:$acl1:r--,m::rwx file1 2>&1
-chacl -l file1 | _acl_filter_id
-_get_file file1 | _acl_filter_id
-# success, all done
-status=0
-exit
#! /bin/sh
-# XFS QA Test No. 058
+# FS QA Test No. 058
#
-# Test some ACL API functions.
+# Place holder test 068. Test some ACL API functions.
#
#-----------------------------------------------------------------------
# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
. ./common.rc
. ./common.filter
-[ `uname` = IRIX -o `uname` = IRIX64 ] || \
- _notrun "acl_test.c requires the IRIX ACL API"
+_notrun "Place holder for IRIX test 058"
# real QA test starts here
-src/acl_test
+_supported_fs xfs
+_supported_os IRIX
# success, all done
status=0
#! /bin/sh
-# XFS QA Test No. 059
+# FS QA Test No. 059
#
# place holder for IRIX 059 test for xfsdump/xfsrestore multi streams
#
. ./common.rc
. ./common.filter
-# real QA test starts here
-
_notrun "Place holder for IRIX test 059"
+# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX
+
# success, all done
status=0
exit
#! /bin/sh
-# XFS QA Test No. 060
+# FS QA Test No. 060
#
# place holder for IRIX 060 test for xfsdump/xfsrestore multi streams
#
. ./common.rc
. ./common.filter
-# real QA test starts here
-
_notrun "Place holder for IRIX test 060"
+# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX
+
# success, all done
status=0
exit
#! /bin/sh
-# XFS QA Test No. 061
+# FS QA Test No. 061
#
# Test restoring a dump created in IRIX/XFS
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
# src/dumpfile based on dumping from
# _create_dumpdir_fill_perm (small dump)
xfsrestore: level: 0
xfsrestore: session label: "stress_056"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: 7 directories and 11 entries processed
#! /bin/sh
-# XFS QA Test No. 062
+# FS QA Test No. 062
#
# Exercises the getfattr/setfattr tools
# Derived from tests originally written by Andreas Gruenbacher for ext2
. ./common.rc
. ./common.filter
+_get_os
+
_cleanup()
{
+ cd /
echo; echo "*** unmount"
umount $SCRATCH_MNT 2>/dev/null
rm -f $tmp.*
_filter_scratch()
{
- sed -e "s,$SCRATCH_MNT,SCRATCH_MNT,g"
+ sed -e "s,$SCRATCH_MNT,SCRATCH_MNT,g"
+}
+
+_linux_attr_calls()
+{
+ echo "*** set/get one initially empty attribute"
+ setfattr -h -n $nsp.name $SCRATCH_MNT/$inode
+ getfattr -m $nsp $SCRATCH_MNT/$inode
+
+ echo "*** overwrite empty, set several new attributes"
+ setfattr -h -n $nsp.name -v 0xbabe $SCRATCH_MNT/$inode
+ setfattr -h -n $nsp.name2 -v 0xdeadbeef $SCRATCH_MNT/$inode
+ setfattr -h -n $nsp.name3 -v 0xdeface $SCRATCH_MNT/$inode
+
+ echo "*** fetch several attribute names and values (hex)"
+ getfattr -m $nsp -e hex $SCRATCH_MNT/$inode
+
+ echo "*** fetch several attribute names and values (base64)"
+ getfattr -m $nsp -e base64 $SCRATCH_MNT/$inode
+
+ echo "*** shrink value of an existing attribute"
+ setfattr -h -n $nsp.name2 -v 0xdeaf $SCRATCH_MNT/$inode
+ getfattr -m $nsp -e hex $SCRATCH_MNT/$inode
+
+ echo "*** grow value of existing attribute"
+ setfattr -h -n $nsp.name2 -v 0xdecade $SCRATCH_MNT/$inode
+ getfattr -m $nsp -e hex $SCRATCH_MNT/$inode
+
+ echo "*** set an empty value for second attribute"
+ setfattr -h -n $nsp.name2 $SCRATCH_MNT/$inode
+ getfattr -m $nsp -n $nsp.name2 $SCRATCH_MNT/$inode
+
+ echo "*** overwrite empty value"
+ setfattr -h -n $nsp.name2 -v 0xcafe $SCRATCH_MNT/$inode
+ getfattr -m $nsp -e hex -n $nsp.name2 $SCRATCH_MNT/$inode
+
+ echo "*** remove attribute"
+ setfattr -h -x $nsp.name2 $SCRATCH_MNT/$inode
+ getfattr -m $nsp -e hex -n $nsp.name2 $SCRATCH_MNT/$inode
+
+ echo "*** final list (strings, type=$inode, nsp=$nsp)"
+ getfattr -m '.' -e hex $SCRATCH_MNT/$inode
}
getfattr()
{
- /usr/bin/getfattr --absolute-names -dh $@ 2>&1 | _filter_scratch
+ /usr/bin/getfattr --absolute-names -dh $@ 2>&1 | _filter_scratch
}
setfattr()
{
- /usr/bin/setfattr $@
+ /usr/bin/setfattr $@
}
_create_test_bed()
find $SCRATCH_MNT | LC_COLLATE=POSIX sort | _filter_scratch
}
+# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os Linux
+
+
_require_scratch
rm -f $tmp.backup1 $tmp.backup2 $seq.full
for inode in reg dir lnk dev/b dev/c dev/p; do
echo; echo "=== TYPE $inode; NAMESPACE $nsp"; echo
-
echo "*** set/get one initially empty attribute"
+
setfattr -h -n $nsp.name $SCRATCH_MNT/$inode
getfattr -m $nsp $SCRATCH_MNT/$inode
echo "*** fetch several attribute names and values (base64)"
getfattr -m $nsp -e base64 $SCRATCH_MNT/$inode
-
+
echo "*** shrink value of an existing attribute"
setfattr -h -n $nsp.name2 -v 0xdeaf $SCRATCH_MNT/$inode
getfattr -m $nsp -e hex $SCRATCH_MNT/$inode
echo "*** grow value of existing attribute"
setfattr -h -n $nsp.name2 -v 0xdecade $SCRATCH_MNT/$inode
getfattr -m $nsp -e hex $SCRATCH_MNT/$inode
-
+
echo "*** set an empty value for second attribute"
setfattr -h -n $nsp.name2 $SCRATCH_MNT/$inode
getfattr -m $nsp -n $nsp.name2 $SCRATCH_MNT/$inode
echo "*** final list (strings, type=$inode, nsp=$nsp)"
getfattr -m '.' -e hex $SCRATCH_MNT/$inode
+
done
done
#! /bin/sh
-# XFS QA Test No. 063
+# FS QA Test No. 063
#
# xfsdump/xfsrestore with EAs
#
. ./common.dump
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
# create files with EAs
_create_dumpdir_fill_ea
#! /bin/sh
-# XFS QA Test No. 064
+# FS QA Test No. 064
#
# test multilevel dump and restores with hardlinks
#
}
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
_create_dumpdir_hardlinks 9
xfsrestore: level: 0
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 1
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 2
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 3
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 4
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 5
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 6
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 7
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 8
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 9
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 0
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 1
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 2
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 3
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 4
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 5
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 6
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 7
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 8
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: level: 9
xfsrestore: session label: "stress_064"
xfsrestore: media label: "stress_tape_media"
-xfsrestore: file system id: ID
+xfsrestore: file system ID: ID
xfsrestore: session id: ID
-xfsrestore: media id: ID
+xfsrestore: media ID: ID
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
#! /bin/sh
-# XFS QA Test No. 065
+# FS QA Test No. 065
#
# Testing incremental dumps and cumulative restores with
# "adding, deleting, renaming, linking, and unlinking files and
. ./common.filter
. ./common.dump
-
_my_ls_filter()
{
#
}
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
#
# too much hassle to get output matching with quotas turned on
#! /bin/sh
-# XFS QA Test No. 066
+# FS QA Test No. 066
#
# Test dumping of large files
#
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
-trap "rm -f $tmp.*; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
. ./common.dump
+_cleanup()
+{
+ cd /
+ rm -f $tmp.*
+ _cleanup_testdir
+}
+
+# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
+_setup_testdir
+
_my_ls_filter()
{
$AWK_PROG 'NF > 5 {print $5, $9}'
}
-rm -f $TEST_DIR/testfile
-if src/feature -t $TEST_DIR/testfile; then
+rm -f $testdir/testfile
+if src/feature -t $testdir/testfile; then
:
else
_notrun "Installed libc doesn't correctly handle setrlimit/ftruncate64"
fi
-# real QA test starts here
-
_create_dumpdir_largefile
echo "ls dumpdir/largefile"
ls -l $dump_dir | _my_ls_filter
-#! /bin/sh
-# XFS QA Test No. 067
+#! /bin/sh -x
+# FS QA Test No. 067
#
# Test out acl/dacls which fit in shortform in the inode
#
. ./common.attr
# real QA test starts here
-#
+_supported_fs xfs
+_supported_os Linux
+
+[ -x /usr/bin/chacl ] || _notrun "chacl executable not found"
+
_need_to_be_root
_acl_requirements
_require_scratch
#! /bin/sh
-# XFS QA Test No. 068
+# FS QA Test No. 068
#
# Test Linux LVM snapshot creation
#
. ./common.rc
. ./common.filter
+# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+
if [ -e "$SCRATCH_SNAP_MNT" ]; then
rmdir "$SCRATCH_SNAP_MNT" || _notrun "Cannot rmdir $SCRATCH_SNAP_MNT"
fi
[ "$LVM" = false ] && _notrun "LVM is not present in the running kernel."
-# real QA test starts here
-
# Create a PV set from the scratch partition
#TODO # (I don't know if this is needed.and it is dangerous because it intentionally deletes the partition table!!!
#TODO # dd if=/dev/zero of="$SCRATCH_LVM_DEV" bs=512 count=1
#! /bin/sh
-# XFS QA Test No. 069
+# FS QA Test No. 069
#
# Test out writes with O_APPEND flag sets.
#
. ./common.filter
# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
_require_scratch
rm -f $seq.full
umount $SCRATCH_DEV >/dev/null 2>&1
echo "*** mkfs"
-_scratch_mkfs_xfs >/dev/null || _fail "mkfs failed"
+_scratch_mkfs >/dev/null || _fail "mkfs failed"
echo "*** mount FS"
_scratch_mount >/dev/null || _fail "mount failed"
#! /bin/sh
-# XFS QA Test No. 070
+# FS QA Test No. 070
#
# fsstress incarnation testing extended attributes writes
#
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
-trap "rm -f $tmp.*; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_cleanup()
+{
+ cd /
+ rm -f $tmp.*
+ _cleanup_testdir
+}
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
+
+_setup_testdir
$here/ltp/fsstress \
- -d $TEST_DIR/fsstress \
+ -d $testdir/fsstress \
-f allocsp=0 \
-f freesp=0 \
-f bulkstat=0 \
#! /bin/sh
-# XFS QA Test No. 071
+# FS QA Test No. 071
#
# Exercise IO at large file offsets.
#
_cleanup()
{
+ cd /
rm -f $tmp.*
umount $SCRATCH_DEV 2>/dev/null
}
sed -e "s/$1/<OFFSET>/g" | _filter_io
}
+_filter_xfs_io()
+{
+ sed -e "s/[0-9/.]* bytes, [0-9] ops\; [0-9/.]* sec ([0-9/.]* [MKiBbytes]*\/sec and [0-9/.]* ops\/sec)/XXX bytes, X ops\; XXX sec (X YYY\/sec and XXX ops\/sec/"
+}
+
write_block()
{
location=$1
echo "Writing $bytes bytes, offset is $words (direct=$direct)" | _filter_io
echo "Writing $bytes bytes at $location $words (direct=$direct)" >>$seq.full
xfs_io -c "pwrite $offset 512" $flags $SCRATCH_MNT/$seq \
- 2>&1 | _filter_off $offset | tee -a $seq.full
+ 2>&1 | _filter_off $offset | _filter_xfs_io | tee -a $seq.full
xfs_bmap -v $SCRATCH_MNT/$seq >>$seq.full
echo "Reading $bytes bytes (direct=$direct)" | _filter_io
echo "Reading $bytes bytes at $location (direct=$direct)" >>$seq.full
xfs_io -c "pread $offset $bytes" $flags $SCRATCH_MNT/$seq \
- 2>&1 | _filter_off $offset | tee -a $seq.full
+ 2>&1 | _filter_off $offset | _filter_xfs_io | tee -a $seq.full
xfs_io -c "pread -v $offset $bytes" $flags $SCRATCH_MNT/$seq >>$seq.full
}
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
+[ -x /usr/sbin/xfs_io ] || _notrun "xfs_io executable not found"
+
_require_scratch
_scratch_mkfs_xfs | _filter_mkfs 2>$tmp.mkfs
-source $tmp.mkfs
+. $tmp.mkfs
echo
_scratch_mount
Writing 512 bytes, offset is +0 (direct=false)
wrote 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Reading 512 bytes (direct=false)
read 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 512 bytes, offset is minus 1 byte (direct=false)
wrote 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Reading 512 bytes (direct=false)
read 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 512 bytes, offset is minus 1FSB (direct=false)
wrote 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Reading 512 bytes (direct=false)
read 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 1 bytes, offset is minus 1FSB (direct=false)
wrote 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Reading 1 bytes (direct=false)
read 1/1 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 1FSB bytes, offset is +0 (direct=true)
wrote 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Reading 1FSB bytes (direct=true)
read 512/1FSB bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 1FSB bytes, offset is minus 1FSB (direct=true)
wrote 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Reading 1FSB bytes (direct=true)
read 513/1FSB bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
=== Iterating, 2 remains
pwrite64: File too large
Reading 512 bytes (direct=false)
read 0/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 512 bytes, offset is minus 1 byte (direct=false)
pwrite64: File too large
Reading 512 bytes (direct=false)
read 0/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 512 bytes, offset is minus 1FSB (direct=false)
wrote 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Reading 512 bytes (direct=false)
read 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 1 bytes, offset is minus 1FSB (direct=false)
wrote 512/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Reading 1 bytes (direct=false)
read 1/1 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 1FSB bytes, offset is +0 (direct=true)
pwrite64: File too large
Reading 1FSB bytes (direct=true)
read 0/1FSB bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 1FSB bytes, offset is minus 1FSB (direct=true)
pwrite64: File too large
Reading 1FSB bytes (direct=true)
read 0/1FSB bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
=== Iterating, 1 remains
pwrite64: File too large
Reading 512 bytes (direct=false)
read 0/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 512 bytes, offset is minus 1 byte (direct=false)
pwrite64: File too large
Reading 512 bytes (direct=false)
read 0/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 512 bytes, offset is minus 1FSB (direct=false)
pwrite64: File too large
Reading 512 bytes (direct=false)
read 0/512 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 1 bytes, offset is minus 1FSB (direct=false)
pwrite64: File too large
Reading 1 bytes (direct=false)
read 0/1 bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 1FSB bytes, offset is +0 (direct=true)
pwrite64: File too large
Reading 1FSB bytes (direct=true)
read 0/1FSB bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
Writing 1FSB bytes, offset is minus 1FSB (direct=true)
pwrite64: File too large
Reading 1FSB bytes (direct=true)
read 0/1FSB bytes at offset <OFFSET>
+XXX bytes, X ops; XXX sec (X YYY/sec and XXX ops/sec
=== Iterating, 0 remains
#! /bin/sh
-# XFS QA Test No. 072
+# FS QA Test No. 072
#
# Check some unwritten extent boundary conditions
#
_cleanup()
{
- umount $SCRATCH_MNT 2>/dev/null
+ cd /
rm -f $tmp.*
+ umount $SCRATCH_MNT 2>/dev/null
}
here=`pwd`
. ./common.filter
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
+[ -x /usr/sbin/xfs_io ] || _notrun "xfs_io executable not found"
+
_require_scratch
rm -f $seq.full
#! /bin/sh
-# XFS QA Test No. 073
+# FS QA Test No. 073
#
# Test xfs_copy
#
status=1 # failure is the default!
_cleanup()
{
+ cd /
umount $SCRATCH_MNT 2>/dev/null
umount $tmp.loop 2>/dev/null
[ -d $tmp.loop ] && rmdir $tmp.loop
[ $source = $SCRATCH_DEV ] && _scratch_mount
echo checking new image
- _check_filesystem $target
+ _check_xfs_filesystem $target
echo mounting new image on loopback
rmdir $target_dir 2>/dev/null
. ./common.rc
. ./common.filter
+
+# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+
[ "$USE_EXTERNAL" = yes ] && _notrun "Cannot xfs_copy with external devices"
[ -x /usr/sbin/xfs_copy ] || _notrun "xfs_copy binary not yet installed"
-# real QA test starts here
_require_scratch
_require_loop
#! /bin/sh
-# XFS QA Test No. 074
+# FS QA Test No. 074
#
# fstest
#
_cleanup()
{
- rm -rf $TEST_DIR/fstest.$$.* $tmp.*
+ cd /
+ rm -rf $testdir/fstest.$$.* $tmp.*
+ _cleanup_testdir
}
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
+# link correct .out file
+_link_out_file $seq.out
+
_do_test()
{
_n="$1"
_param="$2"
- out=$TEST_DIR/fstest.$$.$_n
+ out=$testdir/fstest.$$.$_n
rm -rf $out
if ! mkdir $out
then
# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
+
+_setup_testdir
rm -f $here/$seq.full
echo "brevity is wit..."
_do_test 1 "-s $size10 -b 8192 -m"
_do_test 2 "-n 3 -Fp -f 10 -s $size30 -b 512"
-
_do_test 3 "-n 3 -Fp -f 10 -s $size30 -b 512 -m"
-QA output created by 074
-brevity is wit...
-
------------------------------------------------
-fstest.0 :
------------------------------------------------
-
------------------------------------------------
-fstest.1 : -s 10485760 -b 8192 -m
------------------------------------------------
-
------------------------------------------------
-fstest.2 : -n 3 -Fp -f 10 -s 31457280 -b 512
------------------------------------------------
-
------------------------------------------------
-fstest.3 : -n 3 -Fp -f 10 -s 31457280 -b 512 -m
------------------------------------------------
#! /bin/sh
-# XFS QA Test No. 075
+# FS QA Test No. 075
#
# fsx
#
_cleanup()
{
- rm -rf $TEST_DIR/fsx.* $tmp.*
+ cd /
+ rm -rf $testdir/fsx.* $tmp.*
+ _cleanup_testdir
}
# get standard environment, filters and checks
_n="$1"
_param="$2"
- out=$TEST_DIR/fsx
+ out=$testdir/fsx
rm -rf $out
if ! mkdir $out
then
echo "fsx.$_n : $_param"
echo "-----------------------------------------------"
+ if [ "$FSTYP" = "nfs" ]
+ then
+ if [ "$_n" = "1" -o "$_n" = "3" ]
+ then
+ # HACK: nfs don't handle preallocation (-x) so just skip this test
+ return
+ fi
+ fi
+
# This cd and use of -P gets full debug on $here (not TEST_DEV)
cd $out
if ! $here/ltp/fsx $_param -P $here $seq.$_n >/dev/null
# real QA test starts here
+_supported_fs xfs udf nfs
+_supported_os IRIX Linux
+
+_setup_testdir
rm -f $here/$seq.full
echo "brevity is wit..."
#! /bin/sh
-# XFS QA Test No. 076
+# FS QA Test No. 076
#
# Test blockdev reads in parallel with filesystem reads/writes
#
. ./common.filter
# real QA test starts here
+_supported_fs xfs udf
+_supported_os IRIX Linux
_require_scratch
#! /bin/sh
-# XFS QA Test No. 077
+# FS QA Test No. 077
#
# Check use of ACLs (extended attributes) on a full filesystem
#
here=`pwd`
tmp=/tmp/$$
status=1
-filler=$here/../../linux
+#filler=$here/../../linux
+filler=/home/fsgqa/isms/2.4.x-xfs
_cleanup()
{
+ cd /
echo "*** unmount"
umount $SCRATCH_MNT 2>/dev/null
}
. ./common.filter
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
[ ! -d $filler ] && _notrun "No linux directory to source files from"
#! /bin/sh
-# XFS QA Test No. 078
+# FS QA Test No. 078
#
# Check several growfs corner cases
#
tmp=/tmp/$$
status=1
+trap "_cleanup; rm -f $tmp.*; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+
_cleanup()
{
- umount $LOOP_MNT 2>/dev/null
- rmdir $LOOP_MNT
+ cd /
+ rm -f $tmp.*
+ umount $LOOP_MNT 2>/dev/null
+ rmdir $LOOP_MNT
+ _cleanup_testdir
}
-trap "_cleanup; rm -f $tmp.*; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
echo "*** unmount and check"
umount $LOOP_MNT
- _check_filesystem $LOOP_DEV
+ _check_xfs_filesystem $LOOP_DEV
rm -f $LOOP_DEV
}
#! /bin/sh
-# XFS QA Test No. 079
+# FS QA Test No. 079
#
# Run the t_immutable test program for immutable/append-only files.
#
_cleanup()
{
+ cd /
echo "*** cleaning up"
$timmutable -r $SCRATCH_MNT/$seq
umount $SCRATCH_MNT
. ./common.rc
. ./common.filter
+_supported_fs xfs
+_supported_os Linux
+
_require_scratch
[ -x $timmutable ] || _notrun "t_immutable was not built for this platform"
#! /bin/sh
-# XFS QA Test No. 080
+# FS QA Test No. 080
#
# rwtest (iogen|doio)
#
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
-trap "rm -f $tmp.*; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
+_cleanup()
+{
+ cd /
+ rm -f $tmp.*
+ _cleanup_testdir
+}
+
+_supported_fs xfs
+_supported_os IRIX Linux
+
+_setup_testdir
+
quiet=-q
clean=-c
export here
-cd $TEST_DIR
+cd $testdir
echo
# real QA test starts here
#! /bin/sh
-# XFS QA Test No. 081
+# FS QA Test No. 081
#
# To test out logprint with quotas
#
. ./common.filter
. ./common.log
+# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
_cleanup()
{
+ cd /
_cleanup_logfiles
rm -f $tmp.*
echo "*** unmount"
}
trap "_cleanup; exit \$status" 0 1 2 3 15
-# real QA test starts here
-
# prelim
rm -f $seq.full $tmp.*
_require_scratch
#! /bin/sh
-# XFS QA Test No. 082
+# FS QA Test No. 082
#
# Test out the v2 stripe logs with logprint
#
#-----------------------------------------------------------------------
#
# creator
-owner=root@icy.melbourne.sgi.com
+owner=tes@melbourne.sgi.com
seq=`basename $0`
echo "QA output created by $seq"
_cleanup()
{
+ cd /
_cleanup_logfiles
rm -f $tmp.*
echo "*** unmount"
#! /bin/sh
-# XFS QA Test No. 083
+# FS QA Test No. 083
#
# Exercise filesystem full behaviour - run numerous fsstress
# processes in write mode on a small filesystem. NB: delayed
{
echo "*** unmount"
umount $SCRATCH_MNT 2>/dev/null
+ rm -f $tmp.*
}
-trap "_cleanup; rm -f $tmp.*; exit \$status" 0 1 2 3 15
+trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
_require_scratch
_require_nobigloopfs
#! /bin/sh
-# XFS QA Test No. 084
+# FS QA Test No. 084
#
# Exercises unwritten extent reads and writes, looking
# for data corruption (zeroes read) near the end of file.
# -s == preallocation size
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
echo
echo "*** First case - I/O blocksize same as pagesize"
$here/src/resvtest -i 20 -b $pgsize "$TEST_DIR/resv" | _filter_resv
#! /bin/sh
-# XFS QA Test No. 086
+# FS QA Test No. 086
#
# To test log replay with version 2 logs
# Initially keep this simple with just creates.
. ./common.log
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
rm -f $seq.full $tmp.*
_require_scratch
#! /bin/sh
-# XFS QA Test No. 087
+# FS QA Test No. 087
#
# * like 086 but want to create more/different kinds of metadata
# and so will use fsstress
}
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
+
rm -f $seq.full $tmp.*
_require_scratch
#! /bin/sh
-# XFS QA Test No. 088
+# FS QA Test No. 088
#
# test out CAP_DAC_OVERRIDE and CAP_DAC_SEARCH code in
# xfs_iaccess(ip,mode,cr)
}
# real QA test starts here
+_supported_fs xfs
+_supported_os IRIX Linux
path=$TEST_DIR/t_access
src/t_access_root $path | _filter
#! /bin/sh
-# XFS QA Test No. 089
+# FS QA Test No. 089
#
# Emulate the way Linux mount manipulates /etc/mtab to attempt to
# reproduce a possible bug in rename (see src/t_mtab.c).
}
# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+
rm -f $seq.full
[ "X$TEST_DIR" = "X" ] && exit 1
cd $TEST_DIR
endif
TESTS = $(shell sed -n -e '/^[0-9][0-9][0-9]*/s/ .*//p' group)
-CONFIGURE = configure include/builddefs
+CONFIGURE = configure include/builddefs include/config.h
LSRCFILES = configure configure.in aclocal.m4 README VERSION
LDIRT = config.log .dep config.status config.cache confdefs.h conftest* \
check.log check.time
endif
$(CONFIGURE):
+ autoheader
autoconf
./configure
-##/bin/sh
+##/bin/sh
#
# Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved.
#
if $check
then
- if make >/tmp/$$.make 2>&1
+ if $MAKE_PROG >/tmp/$$.gmake 2>&1
then
:
else
- cat /tmp/$$.make
- echo "Warning: make failed -- some tests may be missing"
+ cat /tmp/$$.gmake
+ echo "Warning: $MAKE_PROG failed -- some tests may be missing"
warn=1
fi
- rm -f /tmp/$$.make
+ rm -f /tmp/$$.gmake
fi
diff=diff
which xdiff >/dev/null 2>&1 && diff=xdiff
which gdiff >/dev/null 2>&1 && diff=gdiff
which tkdiff >/dev/null 2>&1 && diff=tkdiff
+ which xxdiff >/dev/null 2>&1 && diff=xxdiff
fi
verbose=false
quick=${quick-false}
have_test_arg=false
rm -f $tmp.list $tmp.tmp $tmp.sed
+export FSTYP=xfs
+
for r
do
-v verbose
check options
+ -xfs test XFS
+ -udf test UDF
+ -nfs test NFS
-g group[,group...] include tests from these groups
-l line mode diff [xdiff]
-n show me, do not run tests
-q quick, no checks (you are on your own)
-T output timestamps
-x group[,group...] exclude tests from these groups
+ -r randomize order
'
exit 0
;;
+ -udf) # -udf ... set FSTYP to udf
+ FSTYP=udf
+ xpand=false
+ ;;
+
+ -xfs) # -xfs ... set FSTYP to xfs
+ FSTYP=xfs
+ xpand=false
+ ;;
+
+ -nfs) # -nfs ... set FSTYP to nfs
+ FSTYP=nfs
+ xpand=false
+ ;;
+
-g) # -g group ... pick from group file
group=true
xpand=false
verbose=true
xpand=false
;;
-
-x) # -x group ... exclude from group file
xgroup=true
xpand=false
;;
-
'[0-9][0-9][0-9] [0-9][0-9][0-9][0-9]')
echo "No tests?"
status=1
then
:
else
-
- if ( cd src; make -i )
+
+ if ( cd src; $MAKE_PROG -is)
then
:
else
echo
echo ":----------------------------------------------"
- echo ": Warning: make failed in src -- some tests may fail as a result"
+ echo ": Warning: $MAKE_PROG failed in src -- some tests may fail as a result"
echo ":----------------------------------------------"
echo
warn=1
fi
fi
+
+case "$FSTYP" in
+ xfs)
+ [ "$XFS_LOGPRINT_PROG" = "" ] && _fatal "xfs_logprint not found"
+ [ "$XFS_REPAIR_PROG" = "" ] && _fatal "xfs_repair not found"
+ [ "$XFS_CHECK_PROG" = "" ] && _fatal "xfs_check not found"
+ [ "$XFS_DB_PROG" = "" ] && _fatal "xfs_db not found"
+ [ "$MKFS_XFS_PROG" = "" ] && _fatal "mkfs_xfs not found"
+ ;;
+ udf)
+ [ "$MKFS_UDF_PROG" = "" ] && _fatal "mkfs_udf not found"
+ ;;
+ nfs)
+ ;;
+esac
\ No newline at end of file
_acl_requirements()
{
xfsdir=$TEST_DIR
+
- if [ ! -x /bin/chacl -a ! -x /usr/bin/chacl ]; then
- _notrun "chacl command not found"
+ if [ ! -x /bin/chacl -a ! -x /usr/bin/chacl -a ! -x /sbin/chacl ]; then
+ _notrun "chacl command not found"
fi
# test if acl_get syscall is operational
##/bin/sh
-
#
# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
#
# validity or mountedness.
#
+# Warning: don't put freeware before /usr/bsd on IRIX coz you'll
+# get the wrong hostname and set your system name to -s :)
+[ -d /usr/bsd ] && PATH=$PATH:/usr/bsd
+[ -d /usr/freeware/bin ] && PATH=$PATH:/usr/freeware/bin
+PATH=".:$PATH"
+
HOST=`hostname -s`
+HOSTOS=`uname -s`
+[ "$HOSTOS" = "IRIX64" ] && HOSTOS="IRIX"
+
MODULAR=0 # using XFS as a module or not
BOOT="/boot" # install target for kernels
export EXTRA=${EXTRA:=xfs-qa}
SOAK_PASSES=-1 # count of repetitions of fsstress (while soaking)
EMAIL=root@localhost # where auto-qa will send its status messages
export HOST_OPTIONS=${HOST_OPTIONS:=local.config}
-export MKFS_OPTIONS=${MKFS_OPTIONS:=-bsize=4096}
-export MOUNT_OPTIONS=${MOUNT_OPTIONS:=-ologbufs=2}
export CHECK_OPTIONS=${CHECK_OPTIONS:="-g auto"}
export BENCH_PASSES=${BENCH_PASSES:=5}
+export XFS_MKFS_OPTIONS=${XFS_MKFS_OPTIONS:=-bsize=4096}
+[ "$HOSTOS" == "Linux" ] && export XFS_MOUNT_OPTIONS=${XFS_MOUNT_OPTIONS:=-ologbufs=2}
+export UDF_MKFS_OPTIONS=${UDF_MKFS_OPTIONS}
+export UDF_MOUNT_OPTIONS=${UDF_MOUNT_OPTIONS}
+export NFS_MKFS_OPTIONS=${NFS_MKFS_OPTIONS}
+export NFS_MOUNT_OPTIONS=${NFS_MOUNT_OPTIONS}
#export DEBUG=${DEBUG:=...} # arbitrary CFLAGS really.
export MALLOCLIB=${MALLOCLIB:=/usr/lib/libefence.a}
export LOCAL_CONFIGURE_OPTIONS=${LOCAL_CONFIGURE_OPTIONS:=--enable-readline=yes}
+# $1 = prog to look for, $2* = default pathnames if not found in $PATH
+set_prog_path()
+{
+ p=`which $1 2> /dev/null`
+ if [ -n "$p" -a -x "$p" ]
+ then
+ echo $p
+ return 0
+ fi
+ p=$1
+
+ shift
+ for f
+ do
+ if [ -x $f ]
+ then
+ echo $f
+ return 0
+ fi
+ done
+
+ echo ""
+ return 1
+}
+
+_fatal()
+{
+ echo "$*"
+ status=1
+ exit 1
+}
+
+export MKFS_PROG="`set_prog_path mkfs`"
+[ "$MKFS_PROG" = "" ] && _fatal "mkfs not found"
+
+export AWK_PROG="`set_prog_path awk`"
+[ "$AWK_PROG" = "" ] && _fatal "awk not found"
+
+export MOUNT_PROG="`set_prog_path mount`"
+[ "$MOUNT_PROG" = "" ] && _fatal "mount not found"
+
+export UMOUNT_PROG="`set_prog_path umount`"
+[ "$UMOUNT_PROG" = "" ] && _fatal "umount not found"
+
+export NSLOOKUP_PROG="`set_prog_path nslookup`"
+[ "$NSLOOKUP_PROG" = "" ] && _fatal "nslookup not found"
+
+export PERL_PROG="`set_prog_path perl`"
+[ "$PERL_PROG" = "" ] && _fatal "perl not found"
+
+export SED_PROG="`set_prog_path sed`"
+[ "$SED_PROG" = "" ] && _fatal "sed not found"
+
+export MAKE_PROG="`set_prog_path gmake`"
+[ "$MAKE_PROG" = "" ] && _fatal "gmake not found"
+
+export PS_ALL_FLAGS="-ef"
+
+export DF_PROG="`set_prog_path df`"
+[ "$DF_PROG" = "" ] && _fatal "df not found"
+[ "$HOSTOS" = "Linux" ] && export DF_PROG="$DF_PROG -T"
+
+export XFS_LOGPRINT_PROG="`set_prog_path xfs_logprint`"
+export XFS_REPAIR_PROG="`set_prog_path xfs_repair`"
+export XFS_CHECK_PROG="`set_prog_path xfs_check`"
+export XFS_DB_PROG="`set_prog_path xfs_db`"
+
+case "$HOSTOS" in
+ IRIX*)
+ export MKFS_XFS_PROG="`set_prog_path mkfs_xfs`"
+ export MKFS_UDF_PROG="`set_prog_path mkfs_udf`"
+ export XFS_FSR_PROG="`set_prog_path /usr/etc/fsr_xfs`"
+ export MKFS_NFS_PROG="false"
+ ;;
+ Linux)
+ export MKFS_XFS_PROG="`set_prog_path mkfs.xfs`"
+ export MKFS_UDF_PROG="`set_prog_path mkfs.udf`"
+ export XFS_FSR_PROG="`set_prog_path xfs_fsr`"
+ export MKFS_NFS_PROG="false"
+ ;;
+esac
+
known_hosts()
{
case "$HOST"
}
if [ -f "$HOST_OPTIONS" ]; then
- source "$HOST_OPTIONS"
+ . ./"$HOST_OPTIONS"
else
known_hosts
fi
-if [ ! -b "$TEST_DEV" ]
+echo $TEST_DEV | grep -q ":" > /dev/null 2>&1
+if [ ! -b "$TEST_DEV" -a "$?" != "0" ]
then
- echo "common.config: Error: \$TEST_DEV ($TEST_DEV) is not a block device"
+ echo "common.config: Error: \$TEST_DEV ($TEST_DEV) is not a block device or a NFS filesystem"
exit 1
fi
exit 1
fi
-if [ ! -z "$SCRATCH_DEV" -a ! -b "$SCRATCH_DEV" ]
+echo $SCRATCH_DEV | grep -q ":" > /dev/null 2>&1
+if [ ! -z "$SCRATCH_DEV" -a ! -b "$SCRATCH_DEV" -a "$?" != "0" ]
then
- echo "common.config: Error: \$SCRATCH_DEV ($SCRATCH_DEV) is not a block device"
+ echo "common.config: Error: \$SCRATCH_DEV ($SCRATCH_DEV) is not a block device or a NFS filesystem"
exit 1
fi
-if [ ! -z "$SCRATCH_DEV" -a ! -d "$SCRATCH_MNT" ]
+if [ ! -z "$SCRATCH_MNT" -a ! -d "$SCRATCH_MNT" ]
then
echo "common.config: Error: \$SCRATCH_MNT ($SCRATCH_MNT) is not a directory"
exit 1
print STDERR "dirversion=$1\ndirbsize=$2\n";
print STDOUT "naming =VERN bsize=XXX\n";
}
- if (/^log\s+=(internal log|[\w|\/.-]+)\s+bsize=(\d+)\s+blocks=(\d+),\s+version=(\d+)/) {
+ if (/^log\s+=(internal log|[\w|\/.-]+)\s+bsize=(\d+)\s+blocks=(\d+),\s+version=(\d+)/ ||
+ /^log\s+=(internal log|[\w|\/.-]+)\s+bsize=(\d+)\s+blocks=(\d+)/) {
print STDERR "ldev=\"$1\"\nlbsize=$2\nlblocks=$3\nlversion=$4\n";
print STDOUT "log =LDEV bsize=XXX blocks=XXX\n";
}
/^[ ]*$/d;
s/ */ /g;
s/ $//;
- ' |\
+ '|\
awk '
# collapse BUF DATA group into 1 line
# for Oper data this can be over separate operations...ughh
_print_operation()
{
mkdir $fulldir >/dev/null 2>&1
- raw=$fulldir/op.mnt$MOUNT_OPTIONS.mkfs$MKFS_OPTIONS$sync_suffix.raw
- filtered=$fulldir/op.mnt$MOUNT_OPTIONS.mkfs$MKFS_OPTIONS$sync_suffix.filtered
+ mntopt=`echo $MOUNT_OPTIONS | sed 's/ /_/g'`
+ mkfsopt=`echo $MKFS_OPTIONS | sed 's/ /_/g'`
+ raw=$fulldir/op.mnt$mntopt.mkfs$mkfsopt$sync_suffix.raw
+ filtered=$fulldir/op.mnt$mntopt.mkfs$mkfsopt$sync_suffix.filtered
echo "### xfs_logprint output ###" | tee $raw >$filtered
_scratch_xfs_logprint -c 2>&1 \
{
_start=$1
mkdir $fulldir >/dev/null 2>&1
- raw=$fulldir/trans_inode.mnt$MOUNT_OPTIONS.mkfs$MKFS_OPTIONS$sync_suffix.raw
- filtered=$fulldir/trans_inode.mnt$MOUNT_OPTIONS.mkfs$MKFS_OPTIONS$sync_suffix.filtered
+ mntopt=`echo $MOUNT_OPTIONS | sed 's/ /_/g'`
+ mkfsopt=`echo $MKFS_OPTIONS | sed 's/ /_/g'`
+ raw=$fulldir/trans_inode.mnt$mntopt.mkfs$mkfsopt$sync_suffix.raw
+ filtered=$fulldir/trans_inode.mnt$mntopt.mkfs$mkfsopt$sync_suffix.filtered
echo "### xfs_logprint -t -i -s START output ###" | tee $raw >$filtered
_scratch_xfs_logprint -t -i -s $_start 2>&1 \
{
_start=$1
mkdir $fulldir >/dev/null 2>&1
- raw=$fulldir/trans_buf.mnt$MOUNT_OPTIONS.mkfs$MKFS_OPTIONS$sync_suffix.raw
- filtered=$fulldir/trans_buf.mnt$MOUNT_OPTIONS.mkfs$MKFS_OPTIONS$sync_suffix.filtered
+ mntopt=`echo $MOUNT_OPTIONS | sed 's/ /_/g'`
+ mkfsopt=`echo $MKFS_OPTIONS | sed 's/ /_/g'`
+ raw=$fulldir/trans_buf.mnt$mntopt.mkfs$mkfsopt$sync_suffix.raw
+ filtered=$fulldir/trans_buf.mnt$mntopt.mkfs$mkfsopt$sync_suffix.filtered
echo "### xfs_logprint -t -b -s START output ###" | tee $raw >$filtered
_scratch_xfs_logprint -t -b -s $_start 2>&1 \
{
# create the FS
_full "mkfs"
- extra_ops="-lsize=2000b"
+ extra_ops="-l size=2000b"
_scratch_mkfs_xfs $extra_ops >>$seq.full 2>&1
if [ $? -ne 0 ] ; then
_echofull "Cannot mkfs for this test using option specified: $MKFS_OPTIONS $extra_ops"
# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
#
+
+_mount_opts()
+{
+ case $FSTYP in
+ xfs)
+ export MOUNT_OPTIONS=$XFS_MOUNT_OPTIONS
+ ;;
+ udf)
+ export MOUNT_OPTIONS=$UDF_MOUNT_OPTIONS
+ ;;
+ nfs)
+ export MOUNT_OPTIONS=$NFS_MOUNT_OPTIONS
+ ;;
+ *)
+ ;;
+ esac
+}
+
+_mkfs_opts()
+{
+ case $FSTYP in
+ xfs)
+ export MKFS_OPTIONS=$XFS_MKFS_OPTIONS
+ ;;
+ udf)
+ [ ! -z "$udf_fsize" ] && \
+ UDF_MKFS_OPTIONS="$UDF_MKFS_OPTIONS -s $udf_fsize"
+ export MKFS_OPTIONS=$UDF_MKFS_OPTIONS
+ ;;
+ nfs)
+ export MKFS_OPTIONS=$NFS_MKFS_OPTIONS
+ ;;
+ *)
+ ;;
+ esac
+}
+
+[ -z "$FSTYP" ] && FSTYP=xfs
+[ -z "$MOUNT_OPTIONS" ] && _mount_opts
+[ -z "$MKFS_OPTIONS" ] && _mkfs_opts
+
+
# we need common.config
if ! . ./common.config
then
# make sure we have a standard umask
umask 022
-# awk
-AWK_PROG=awk
-export AWK_PROG
-
-# we override mount and mkfs.xfs so we can specify extra options
+_mount()
+{
+ $MOUNT_PROG $*
+}
-mount()
+_scratch_options()
{
- case "$*"
- in
- *remount*)
- /bin/mount $*
- ;;
- *ext2*)
- /bin/mount $*
- ;;
- *xfs*)
- /bin/mount $* $MOUNT_OPTIONS
- ;;
- *)
- /bin/mount $*
- ;;
+ type=$1
+ SCRATCH_OPTIONS=""
+
+ if [ $FSTYP != "xfs" ]; then
+ return
+ fi
+
+ case $type in
+ mkfs)
+ rt_opt="-r"
+ log_opt="-l"
+ ;;
+ mount)
+ rt_opt="-o"
+ log_opt="-o"
+ ;;
esac
+ [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_RTDEV" ] && \
+ SCRATCH_OPTIONS="$SCRATCH_OPTIONS ${rt_opt}rtdev=$SCRATCH_RTDEV"
+ [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_LOGDEV" ] && \
+ SCRATCH_OPTIONS="$SCRATCH_OPTIONS ${log_opt}logdev=$SCRATCH_LOGDEV"
}
-_test_mount()
+_test_options()
{
+ type=$1
TEST_OPTIONS=""
+
+ if [ $FSTYP != "xfs" ]; then
+ return
+ fi
+
+ case $type
+ in
+ mkfs)
+ rt_opt="-r"
+ log_opt="-l"
+ ;;
+ mount)
+ rt_opt="-o"
+ log_opt="-o"
+ ;;
+ esac
[ "$USE_EXTERNAL" = yes -a ! -z "$TEST_RTDEV" ] && \
- TEST_OPTIONS="$TEST_OPTIONS -ortdev=$TEST_RTDEV"
+ TEST_OPTIONS="$TEST_OPTIONS ${rt_opt}rtdev=$TEST_RTDEV"
[ "$USE_EXTERNAL" = yes -a ! -z "$TEST_LOGDEV" ] && \
- TEST_OPTIONS="$TEST_OPTIONS -ologdev=$TEST_LOGDEV"
- [ -z "$FSTYP" ] && FSTYP=xfs
- mount -t $FSTYP $TEST_OPTIONS $MOUNT_OPTIONS $* $TEST_DEV $TEST_DIR
+ TEST_OPTIONS="$TEST_OPTIONS ${log_opt}logdev=$TEST_LOGDEV"
}
_scratch_mount_options()
{
- SCRATCH_OPTIONS=""
- [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_RTDEV" ] && \
- SCRATCH_OPTIONS="$SCRATCH_OPTIONS -ortdev=$SCRATCH_RTDEV"
- [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_LOGDEV" ] && \
- SCRATCH_OPTIONS="$SCRATCH_OPTIONS -ologdev=$SCRATCH_LOGDEV"
+ _scratch_options mount
echo $SCRATCH_OPTIONS $MOUNT_OPTIONS $* $SCRATCH_DEV $SCRATCH_MNT
}
_scratch_mount()
{
- SCRATCH_OPTIONS=""
- [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_RTDEV" ] && \
- SCRATCH_OPTIONS="$SCRATCH_OPTIONS -ortdev=$SCRATCH_RTDEV"
- [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_LOGDEV" ] && \
- SCRATCH_OPTIONS="$SCRATCH_OPTIONS -ologdev=$SCRATCH_LOGDEV"
- [ -z "$FSTYP" ] && FSTYP=xfs
- mount -t $FSTYP $SCRATCH_OPTIONS $MOUNT_OPTIONS $* $SCRATCH_DEV $SCRATCH_MNT
+ _mount -t $FSTYP `_scratch_mount_options $*`
}
-_scratch_mkfs_options()
+_test_mount()
{
- SCRATCH_OPTIONS=""
- [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_RTDEV" ] && \
- SCRATCH_OPTIONS="$SCRATCH_OPTIONS -rrtdev=$SCRATCH_RTDEV"
- [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_LOGDEV" ] && \
- SCRATCH_OPTIONS="$SCRATCH_OPTIONS -llogdev=$SCRATCH_LOGDEV"
- echo $SCRATCH_OPTIONS $MKFS_OPTIONS $* $SCRATCH_DEV
+ _test_options mount
+ _mount -t $FSTYP $TEST_OPTIONS $MOUNT_OPTIONS $* $TEST_DEV $TEST_DIR
}
-_scratch_mkfs()
+_scratch_mkfs_options()
{
- if [ -z "$FSTYP" -o "$FSTYP" = xfs ]; then
- _scratch_mkfs_xfs $*
- else
- /sbin/mkfs.$FSTYP $MKFS_OPTIONS $* $SCRATCH_DEV
- fi
+ _scratch_options mkfs
+ echo $SCRATCH_OPTIONS $MKFS_OPTIONS $* $SCRATCH_DEV
}
_scratch_mkfs_xfs()
-{
- SCRATCH_OPTIONS=""
- [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_RTDEV" ] && \
- SCRATCH_OPTIONS="$SCRATCH_OPTIONS -rrtdev=$SCRATCH_RTDEV"
- [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_LOGDEV" ] && \
- SCRATCH_OPTIONS="$SCRATCH_OPTIONS -llogdev=$SCRATCH_LOGDEV"
- /sbin/mkfs.xfs -f $SCRATCH_OPTIONS $MKFS_OPTIONS $* $SCRATCH_DEV
+{
+ _scratch_options mkfs
+ [ "$HOSTOS" != "IRIX" ] && \
+ SCRATCH_OPTIONS="$SCRATCH_OPTIONS -f"
+
+ $MKFS_XFS_PROG $SCRATCH_OPTIONS $MKFS_OPTIONS $* $SCRATCH_DEV
mkfs_status=$?
+
if [ "$USE_BIG_LOOPFS" = yes ]; then
[ -z "$RETAIN_AG_BYTES" ] && RETAIN_AG_BYTES=0
./tools/ag-wipe -q -r $RETAIN_AG_BYTES $SCRATCH_DEV
fi
+
return $mkfs_status
}
+_scratch_mkfs()
+{
+ case $FSTYP in
+ xfs)
+ _scratch_mkfs_xfs $*
+ ;;
+ nfs*)
+ # do nothing for nfs
+ ;;
+ udf|*)
+ $MKFS_PROG -t $FSTYP $MKFS_OPTIONS $* > /dev/null
+ ;;
+ esac
+}
+
_scratch_xfs_db_options()
{
SCRATCH_OPTIONS=""
SCRATCH_OPTIONS=""
[ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_LOGDEV" ] && \
SCRATCH_OPTIONS="-l$SCRATCH_LOGDEV"
- /usr/sbin/xfs_logprint $SCRATCH_OPTIONS $* $SCRATCH_DEV
+ $XFS_LOGPRINT_PROG $SCRATCH_OPTIONS $* $SCRATCH_DEV
}
_scratch_xfs_repair()
[ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_LOGDEV" ] && \
SCRATCH_OPTIONS="-l$SCRATCH_LOGDEV"
[ "$USE_BIG_LOOPFS" = yes ] && SCRATCH_OPTIONS=$SCRATCH_OPTIONS" -t"
- /sbin/xfs_repair $SCRATCH_OPTIONS $* $SCRATCH_DEV
+ $XFS_REPAIR_PROG $SCRATCH_OPTIONS $* $SCRATCH_DEV
}
_get_pids_by_name()
_get_fqdn()
{
host=`hostname`
- nslookup $host | $AWK_PROG '{ if ($1 == "Name:") print $2 }'
+ $NSLOOKUP_PROG $host | $AWK_PROG '{ if ($1 == "Name:") print $2 }'
}
# fix malloc libs output
_fix_malloc()
{
# filter out the Electric Fence notice
- perl -e '
+ $PERL_PROG -e '
while (<>) {
if (defined $o && /^\s+Electric Fence/) {
chomp($o);
#
_need_to_be_root()
{
- id=`id | sed -e 's/(.*//' -e 's/.*=//'`
+ id=`id | $SED_PROG -e 's/(.*//' -e 's/.*=//'`
if [ "$id" -ne 0 ]
then
echo "Arrgh ... you need to be root (not uid=$id) to run this test"
exit 1
fi
- df -T 2>/dev/null | $AWK_PROG -v what=$1 '
+ $DF_PROG 2>/dev/null | $AWK_PROG -v what=$1 '
match($1,what) && NF==1 {
v=$1
getline
exit 1
fi
- df -T $1 2>/dev/null | $AWK_PROG -v what=$1 '
+ $DF_PROG $1 2>/dev/null | $AWK_PROG -v what=$1 '
NR == 2 && NF==1 {
v=$1
getline
# return the FS mount options of a mounted device
#
+# should write a version which just parses the output of mount for IRIX
+# compatibility, but since this isn't used at all, at the moment I'll leave
+# this for now
+#
_fs_options()
{
if [ $# -ne 1 ]
_do()
{
- if [ $# -eq 1 ]; then
- _cmd=$1
- elif [ $# -eq 2 ]; then
- _note=$1
- _cmd=$2
- echo -n "$_note... "
- else
- echo "Usage: _do [note] cmd" 1>&2
- status=1; exit
- fi
-
- (eval "echo '---' \"$_cmd\"") >>$seq.full
- (eval "$_cmd") >$tmp._out 2>&1; ret=$?
- cat $tmp._out | _fix_malloc >>$seq.full
- if [ $# -eq 2 ]; then
- if [ $ret -eq 0 ]; then
- echo "done"
+ if [ $# -eq 1 ]; then
+ _cmd=$1
+ elif [ $# -eq 2 ]; then
+ _note=$1
+ _cmd=$2
+ echo -n "$_note... "
else
- echo "fail"
+ echo "Usage: _do [note] cmd" 1>&2
+ status=1; exit
fi
- fi
- if [ $ret -ne 0 ] \
- && [ "$_do_die_on_error" = "always" \
- -o \( $# -eq 2 -a "$_do_die_on_error" = "message_only" \) ]
- then
- [ $# -ne 2 ] && echo
- eval "echo \"$_cmd\" failed \(returned $ret\): see $seq.full"
- status=1; exit
- fi
- return $ret
+ (eval "echo '---' \"$_cmd\"") >>$here/$seq.full
+ (eval "$_cmd") >$tmp._out 2>&1; ret=$?
+ cat $tmp._out | _fix_malloc >>$here/$seq.full
+ if [ $# -eq 2 ]; then
+ if [ $ret -eq 0 ]; then
+ echo "done"
+ else
+ echo "fail"
+ fi
+ fi
+ if [ $ret -ne 0 ] \
+ && [ "$_do_die_on_error" = "always" \
+ -o \( $# -eq 2 -a "$_do_die_on_error" = "message_only" \) ]
+ then
+ [ $# -ne 2 ] && echo
+ eval "echo \"$_cmd\" failed \(returned $ret\): see $seq.full"
+ status=1; exit
+ fi
+
+ return $ret
}
# bail out, setting up .notrun file
#
_fail()
{
- echo "$*" | tee -a $seq.full
+ echo "$*" | tee -a $here/$seq.full
echo "(see $seq.full for details)"
status=1
exit 1
}
+# tests whether $FSTYP is one of the supported filesystems for a test
+#
+_supported_fs()
+{
+ for f
+ do
+ if [ "$f" = "$FSTYP" ]
+ then
+ return
+ fi
+ done
+
+ _notrun "not suitable for this filesystem type: $FSTYP"
+}
+
+# tests whether $FSTYP is one of the supported OSes for a test
+#
+_supported_os()
+{
+ for h
+ do
+ if [ "$h" = "$HOSTOS" ]
+ then
+ return
+ fi
+ done
+
+ _notrun "not suitable for this OS: $HOSTOS"
+}
+
# this test needs a scratch partition - check we're ok & unmount it
#
_require_scratch()
{
- if [ -z "$SCRATCH_DEV" -o "`_is_block_dev $SCRATCH_DEV`" = "" ]
- then
- _notrun "this test requires a valid \$SCRATCH_DEV"
- fi
-
- if [ "`_is_block_dev $SCRATCH_DEV`" = "`_is_block_dev $TEST_DEV`" ]
- then
- _notrun "this test requires a valid \$SCRATCH_DEV"
- fi
+ case "$FSTYP" in
+ xfs|udf)
+ if [ -z "$SCRATCH_DEV" -o "`_is_block_dev $SCRATCH_DEV`" = "" ]
+ then
+ _notrun "this test requires a valid \$SCRATCH_DEV"
+ fi
+
+ if [ "`_is_block_dev $SCRATCH_DEV`" = "`_is_block_dev $TEST_DEV`" ]
+ then
+ _notrun "this test requires a valid \$SCRATCH_DEV"
+ fi
+ ;;
+ nfs*)
+ echo $SCRATCH_DEV | grep -q ":" > /dev/null 2>&1
+ if [ ! -z "$SCRATCH_DEV" -a ! -b "$SCRATCH_DEV" -a "$?" != "0" ]
+ then
+ _notrun "this test requires a valid \$SCRATCH_DEV"
+ fi
+ ;;
+ *)
+ _notrun "\$FSTYP ($FSTYP) unknown or not specified"
+ ;;
+ esac
+
# mounted?
- if mount | grep -q $SCRATCH_DEV
+ if _mount | grep -q $SCRATCH_DEV
then
# if it's mounted, make sure its on $SCRATCH_MNT
- if ! mount | grep $SCRATCH_DEV | grep -q $SCRATCH_MNT
+ if ! _mount | grep $SCRATCH_DEV | grep -q $SCRATCH_MNT
then
echo "\$SCRATCH_DEV is mounted but not on \$SCRATCH_MNT - aborting"
exit 1
fi
# and then unmount it
- if ! umount $SCRATCH_DEV
+ if ! $UMOUNT_PROG $SCRATCH_DEV
then
echo "failed to unmount $SCRATCH_DEV"
exit 1
_notrun "This test requires USE_EXTERNAL to be enabled"
# ensure its not mounted
- umount $SCRATCH_LOGDEV 2>/dev/null
+ $UMOUNT_PROG $SCRATCH_LOGDEV 2>/dev/null
}
# this test requires loopback device support
#
_require_loop()
{
+ if [ "$HOSTOS" != "Linux" ]
+ then
+ _notrun "This test requires linux for loopback device support"
+ fi
+
modprobe loop >/dev/null 2>&1
if grep loop /proc/devices >/dev/null 2>&1
then
device=$1
- if mount | grep $device | $AWK_PROG '
+ if _mount | grep $device | $AWK_PROG '
/type xfs/ { print $3 ; exit 0 }
END { exit 1 }
'
echo "_remount: failed to remount filesystem on $device as $mode"
exit 1
fi
-
- # we might like to check here later
- #options=`_fs_options $device`
-
}
# run xfs_check and friends on a FS.
USE_REMOUNT=0
-_check_filesystem()
+_check_xfs_filesystem()
{
if [ $# -ne 1 -a $# -ne 2 ]
then
if [ $USE_REMOUNT -eq 0 ]
then
mountpoint=`_xfs_mounted $device`
- umount $device
+ $UMOUNT_PROG $device
else
_remount $device ro
fi
fi
- /usr/sbin/xfs_logprint -t $extra_log_options $device 2>&1 \
+ $XFS_LOGPRINT_PROG -t $extra_log_options $device 2>&1 \
| tee $tmp.fs_check | grep -q "<CLEAN>"
- if [ $? -ne 0 ]
+ if [ $? -ne 0 -a "$HOSTOS" = "Linux" ]
then
echo "_check_fs: filesystem on $device has dirty log (see $seq.full)"
- echo "_check_fs: filesystem on $device has dirty log" >>$seq.full
- echo "*** xfs_logprint -t output ***" >>$seq.full
- cat $tmp.fs_check >>$seq.full
- echo "*** end xfs_logprint output" >>$seq.full
+ echo "_check_fs: filesystem on $device has dirty log" >>$here/$seq.full
+ echo "*** xfs_logprint -t output ***" >>$here/$seq.full
+ cat $tmp.fs_check >>$here/$seq.full
+ echo "*** end xfs_logprint output" >>$here/$seq.full
ok=0
fi
- /usr/sbin/xfs_check $testoption $extra_log_options $device 2>&1 | \
- _fix_malloc >$tmp.fs_check
+ $XFS_CHECK_PROG $testoption $extra_log_options $device 2>&1 |\
+ _fix_malloc >$tmp.fs_check
if [ -s $tmp.fs_check ]
then
echo "_check_fs: filesystem on $device is inconsistent (c) (see $seq.full)"
- echo "_check_fs: filesystem on $device is inconsistent" >>$seq.full
- echo "*** xfs_check output ***" >>$seq.full
- cat $tmp.fs_check >>$seq.full
- echo "*** end xfs_check output" >>$seq.full
+ echo "_check_fs: filesystem on $device is inconsistent" >>$here/$seq.full
+ echo "*** xfs_check output ***" >>$here/$seq.full
+ cat $tmp.fs_check >>$here/$seq.full
+ echo "*** end xfs_check output" >>$here/$seq.full
ok=0
fi
-
# repair doesn't scale massively at this stage, optionally skip it for now
[ "$USE_BIG_LOOPFS" = yes ] || \
- /sbin/xfs_repair -n $extra_log_options $device >$tmp.fs_check 2>&1
+ $XFS_REPAIR_PROG -n $extra_log_options $device >$tmp.fs_check 2>&1
if [ $? -ne 0 ]
then
echo "_check_fs: filesystem on $device is inconsistent (r) (see $seq.full)"
- echo "_check_fs: filesystem on $device is inconsistent" >>$seq.full
- echo "*** xfs_repair -n output ***" >>$seq.full
- cat $tmp.fs_check | _fix_malloc >>$seq.full
- echo "*** end xfs_repair output" >>$seq.full
+ echo "_check_fs: filesystem on $device is inconsistent" >>$here/$seq.full
+ echo "*** xfs_repair -n output ***" >>$here/$seq.full
+ cat $tmp.fs_check | _fix_malloc >>$here/$seq.full
+ echo "*** end xfs_repair output" >>$here/$seq.full
ok=0
fi
if [ $ok -eq 0 ]
then
- echo "*** mount output ***" >>$seq.full
- mount >>$seq.full
- echo "*** end mount output" >>$seq.full
+ echo "*** mount output ***" >>$here/$seq.full
+ _mount >>$here/$seq.full
+ echo "*** end mount output" >>$here/$seq.full
elif [ "$type" = "xfs" ]
then
# mounted...
if [ $USE_REMOUNT -eq 0 ]
then
- if ! mount -t xfs $extra_mount_options $device $mountpoint
+ if ! _mount -t xfs $extra_mount_options $device $mountpoint
then
echo "!!! failed to remount $device on $mountpoint"
ok=0
return 0
}
+_check_udf_filesystem()
+{
+ if [ $# -ne 1 -a $# -ne 2 ]
+ then
+ echo "Usage: _check_fs device [last_block]" 1>&2
+ exit 1
+ fi
+
+ device=$1
+ if [ $# -eq 2 ];
+ then
+ LAST_BLOCK=`expr \( $2 - 1 \)`
+ OPT_ARG="-lastvalidblock $LAST_BLOCK"
+ fi
+
+ # Output messages format:
+ # Error messages contain : "Error:" or "error:"
+ # Warning messages contain : "Warning:" or "warning:"
+ # Attention messages contain : "Note:" or "note:"
+ # Message continuation lines start with a "-" character.
+
+ rm -f $seq.checkfs
+ sleep 1 # Due to a problem with time stamps in udf_test
+ $here/src/udf_test $OPT_ARG $device | tee $here/$seq.checkfs | \
+ egrep -i "error:|warning:|Error count:|Warning count:" | \
+ egrep -v "Error count: 0|Warning count: 0"
+}
+
_check_test_fs()
{
TEST_LOG=""
[ "$USE_EXTERNAL" = yes -a ! -z "$TEST_LOGDEV" ] && \
TEST_LOG="$TEST_LOGDEV"
- _check_filesystem $TEST_DEV $TEST_LOG
+
+ _check_xfs_filesystem $TEST_DEV $TEST_LOG
}
_check_scratch_fs()
{
- SCRATCH_LOG=""
- [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_LOGDEV" ] && \
- SCRATCH_LOG="$SCRATCH_LOGDEV"
- _check_filesystem $SCRATCH_DEV $SCRATCH_LOG
+
+
+ case $FSTYP in
+ xfs)
+ SCRATCH_LOG=""
+ [ "$USE_EXTERNAL" = yes -a ! -z "$SCRATCH_LOGDEV" ] && \
+ SCRATCH_LOG="$SCRATCH_LOGDEV"
+
+ _check_xfs_filesystem $SCRATCH_DEV $SCRATCH_LOG
+ ;;
+ udf)
+ _check_udf_filesystem $SCRATCH_DEV $udf_fsize
+ ;;
+ nfs*)
+ # Don't know how to check an NFS filesystem, yet.
+ ;;
+ *)
+ ;;
+ esac
}
_full_fstyp_details()
{
- [ -z "$FSTYP" ] && FSTYP=xfs
- if [ $FSTYP = xfs ]; then
- if grep 'debug 0' /proc/fs/xfs/stat >/dev/null; then
- FSTYP="$FSTYP (non-debug)"
- elif grep 'debug 1' /proc/fs/xfs/stat >/dev/null; then
- FSTYP="$FSTYP (debug)"
- fi
- fi
- echo $FSTYP
+ [ -z "$FSTYP" ] && FSTYP=xfs
+ if [ $FSTYP = xfs ]; then
+ if grep 'debug 0' /proc/fs/xfs/stat >/dev/null; then
+ FSTYP="$FSTYP (non-debug)"
+ elif grep 'debug 1' /proc/fs/xfs/stat >/dev/null; then
+ FSTYP="$FSTYP (debug)"
+ fi
+ fi
+ echo $FSTYP
}
_full_platform_details()
{
- os=`uname -s`
- host=`hostname -s`
- kernel=`uname -r`
- platform=`uname -m`
- echo "$os/$platform $host $kernel"
+ os=`uname -s`
+ host=`hostname -s`
+ kernel=`uname -r`
+ platform=`uname -m`
+ echo "$os/$platform $host $kernel"
}
-################################################################################
+_check_testdir()
+{
+
+
+ case $FSTYP in
+ xfs)
+ _check_test_fs
+ ;;
+ udf)
+ _cleanup_testdir
+ _check_scratch_fs
+ _scratch_mount
+ ;;
+ nfs*)
+ # Don't know how to check an NFS filesystem, yet.
+ ;;
+ *)
+ ;;
+ esac
+}
-[ -d /usr/bsd ] && PATH=$PATH:/usr/bsd
-[ -d /usr/freeware/bin ] && PATH=$PATH:/usr/freeware/bin
+
+_setup_xfs_testdir()
+{
+ [ "$FSTYP" != "xfs" ] \
+ && _fail "setup_xfs_testdir: \$FSTYP ($FSTYP) is not xfs"
+
+ testdir=$TEST_DIR
+}
+
+_setup_udf_testdir()
+{
+ [ "$FSTYP" != "udf" ] \
+ && _fail "setup_udf_testdir: \$FSTYP is not udf"
+ [ -z "$SCRATCH_DEV" -o ! -b "$SCRATCH_DEV" ] \
+ && _notrun "this test requires a valid \$SCRATCH_DEV"
+ [ -z "$SCRATCH_MNT" ] \
+ && _notrun "this test requires a valid \$SCRATCH_MNT"
+
+ # mounted?
+ if _mount | grep -q $SCRATCH_DEV
+ then
+ # if it's mounted, make sure its on $TEST_RW_DIR
+ if ! _mount | grep $SCRATCH_DEV | grep -q $SCRATCH_MNT
+ then
+ _fail "\$SCRATCH_DEV is mounted but not on \$SCRATCH_MNT - aborting"
+ fi
+ $UMOUNT_PROG $SCRATCH_DEV
+ fi
+
+ _scratch_mkfs
+ _scratch_mount
+
+ testdir=$SCRATCH_MNT
+}
+
+_setup_nfs_testdir()
+{
+ [ "$FSTYP" != "nfs" ] \
+ && _fail "setup_udf_testdir: \$FSTYP is not nfs"
+ [ -z "$SCRATCH_DEV" ] \
+ && _notrun "this test requires a valid host fs for \$SCRATCH_DEV"
+ [ -z "$SCRATCH_MNT" ] \
+ && _notrun "this test requires a valid \$SCRATCH_MNT"
+
+ # mounted?
+ if _mount | grep -q $SCRATCH_DEV
+ then
+ # if it's mounted, make sure its on $TEST_RW_DIR
+ if ! _mount | grep $SCRATCH_DEV | grep -q $SCRATCH_MNT
+ then
+ _fail "\$SCRATCH_DEV is mounted but not on \$SCRATCH_MNT - aborting"
+ fi
+ $UMOUNT_PROG $SCRATCH_DEV
+ fi
+
+ _scratch_mkfs
+ _scratch_mount
+
+ testdir=$SCRATCH_MNT
+}
+
+_setup_testdir()
+{
+
+
+ case $FSTYP in
+ xfs)
+ _setup_xfs_testdir
+ ;;
+ udf)
+ _setup_udf_testdir
+ ;;
+ nfs*)
+ _setup_nfs_testdir
+ ;;
+ *)
+ _fail "\$FSTYP is not xfs, udf or nfs"
+ ;;
+ esac
+}
+
+_cleanup_testdir()
+{
+
+
+ case $FSTYP in
+ xfs)
+ # do nothing, testdir is $TEST_DIR
+ ;;
+ udf)
+ # umount testdir as it is $SCRATCH_MNT which could be used by xfs next
+ [ -n "$testdir" ] && $UMOUNT_PROG $testdir
+ ;;
+ nfs*)
+ # umount testdir as it is $SCRATCH_MNT which could be used by xfs next
+ [ -n "$testdir" ] && $UMOUNT_PROG $testdir
+ ;;
+ *)
+ _fail "\$FSTYP is not xfs, udf or nfs"
+ ;;
+ esac
+}
+
+################################################################################
if [ "$iam" != new -a "$iam" != bench ]
then
-
# make some further configuration checks here
-
+
if [ "$TEST_DEV" = "" ]
then
echo "common.rc: Error: \$TEST_DEV is not set"
if [ "`_fs_type $TEST_DEV`" != "xfs" ]
then
echo "common.rc: Error: \$TEST_DEV ($TEST_DEV) is not a MOUNTED XFS filesystem"
- df -T $TEST_DEV
+ $DF_PROG $TEST_DEV
exit 1
fi
fi
-# check for some required binaries on our $PATH
-#
-for exec in mkfs.xfs xfs_logprint xfs_check xfs_repair xfs_db
-do
- if which $exec >/dev/null 2>&1
- then
- :
- else
- echo "common.rc: cannot find $exec on \$PATH=$PATH"
- exit 1
- fi
-done
+_link_out_file()
+{
+ if [ -z "$1" ]; then
+ echo Error must pass \$seq.
+ exit
+ fi
+ rm -f $1
+ if [ "`uname`" == "IRIX64" ] || [ "`uname`" == "IRIX" ]; then
+ ln -s $1.irix $1
+ elif [ "`uname`" == "Linux" ]; then
+ ln -s $1.linux $1
+ else
+ echo Error test $seq does not run on the operating system: `uname`
+ exit
+ fi
+}
+
+_get_os()
+{
+
+ if [ "`uname`" == "IRIX64" ] || [ "`uname`" == "IRIX" ]; then
+ os=irix
+ elif [ "`uname`" == "Linux" ]; then
+ os=linux
+ else
+ echo Error test does not run on the operating system: `uname`
+ exit
+ fi
+}
# make sure this script returns success
/bin/true
AC_PACKAGE_GLOBALS(xfstests)
AC_PACKAGE_UTILITIES(xfstests)
-AC_PACKAGE_NEED_UUID_H
+AC_HEADER_STDC
+ AC_CHECK_HEADERS([ assert.h \
+ bstring.h \
+ libgen.h \
+ dirent.h \
+ errno.h \
+ malloc.h \
+ uuid.h \
+ uuid/uuid.h \
+ sys/uuid.h \
+ sys/file.h \
+ sys/fcntl.h \
+ sys/syssgi.h \
+ sys/param.h \
+ sys/stat.h \
+ sys/statvfs.h \
+ sys/time.h \
+ sys/ioctl.h \
+ sys/wait.h \
+ sys/types.h \
+ strings.h \
+ err.h
+ ])
+ AC_CHECK_HEADERS([ sys/fs/xfs_fsops.h \
+ sys/fs/xfs_itable.h \
+ xfs/platform_defs.h \
+ ])
+
AC_PACKAGE_NEED_UUIDCOMPARE
-AC_PACKAGE_NEED_XFS_LIBXFS_H
-AC_PACKAGE_NEED_XFSCTL_MACRO
-AC_PACKAGE_NEED_XFS_HANDLE_H
-AC_PACKAGE_NEED_ATTRLIST_LIBHANDLE
+case $pkg_platform
+in
+ irix)
+ AC_PACKAGE_NEED_SYS_ACL_H
+ AC_PACKAGE_NEED_ATTRIBUTES_H
+ AC_PACKAGE_WANT_NDBM
+ ;;
+ *)
+ AC_PACKAGE_NEED_XFS_LIBXFS_H
+ AC_PACKAGE_NEED_XFS_XQM_H
+ AC_PACKAGE_NEED_XFSCTL_MACRO
+ AC_PACKAGE_NEED_XFS_HANDLE_H
+
+ AC_PACKAGE_NEED_ATTRLIST_LIBHANDLE
+ AC_PACKAGE_NEED_ATTR_XATTR_H
+ AC_PACKAGE_NEED_ATTRIBUTES_H
+ AC_PACKAGE_NEED_GETXATTR_LIBATTR
-AC_PACKAGE_NEED_ATTR_XATTR_H
-AC_PACKAGE_NEED_GETXATTR_LIBATTR
-AC_PACKAGE_NEED_SYS_ACL_H
-AC_PACKAGE_NEED_ACL_LIBACL_H
-AC_PACKAGE_NEED_ACLINIT_LIBACL
+ AC_PACKAGE_NEED_SYS_ACL_H
+ AC_PACKAGE_NEED_ACL_LIBACL_H
+ AC_PACKAGE_NEED_ACLINIT_LIBACL
-AC_PACKAGE_WANT_LIBGDBM
+ AC_PACKAGE_WANT_GDBM
+ ;;
+esac
+AC_CONFIG_HEADER(include/config.h)
AC_OUTPUT(include/builddefs)
HFILES = dataascii.h databin.h pattern.h \
random_range.h string_to_tokens.h tlibio.h write_log.h
-LSRCFILES = builddefs.in buildrules
+LSRCFILES = builddefs.in buildrules buildmacros config.h.in
default install install-dev:
RPM_VERSION = @rpm_version@
ENABLE_SHARED = @enable_shared@
-ENABLE_DBM = @enable_dbm@
+HAVE_DB = @have_db@
ifeq ($(PKG_PLATFORM),linux)
PCFLAGS = -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64
PCFLAGS = -traditional-cpp
endif
-CFLAGS += -O1 $(OPTIMIZER) $(DEBUG) -funsigned-char -Wall -I$(TOPDIR)/include \
- -DVERSION=\"$(PKG_VERSION)\"
+CFLAGS += -O1 $(OPTIMIZER) $(DEBUG) -funsigned-char -fno-strict-aliasing -Wall \
+ -I$(TOPDIR)/include -DVERSION=\"$(PKG_VERSION)\" -D_REENTRANT
# Global, Platform, Local CFLAGS
CFLAGS += $(GCFLAGS) $(PCFLAGS) $(LCFLAGS)
LTLINK = $(LIBTOOL) --mode=link $(CC)
LTEXEC = $(LIBTOOL) --mode=execute
LTINSTALL = $(LIBTOOL) --mode=install $(INSTALL)
-LTCOMPILE = $(LIBTOOL) --mode=compile $(CCF) -D_REENTRANT -fno-strict-aliasing
+LTCOMPILE = $(LIBTOOL) --mode=compile $(CCF)
ifeq ($(ENABLE_SHARED),yes)
LTLDFLAGS += -rpath $(PKG_LIB_DIR)
int offset; /* offset into the file where buffer starts */
char **errmsg;
{
- int cnt;
- unsigned char *chr;
- int total;
- long expbits;
- long actbits;
+ int cnt;
+ unsigned char *chr;
+ long expbits;
+ long actbits;
chr=buffer;
- total=bsize;
if ( errmsg != NULL ) {
*errmsg = Errmsg;
#else
/* for linux or sgi */
#include <sys/uio.h> /* readv(2)/writev(2) */
-#include <string.h> /* bzero */
+#include <string.h>
+#include <strings.h>
#endif
#ifdef sgi
#include <aio.h>
lio_async_callback_handler(sigval_t sigval)
{
if ( Debug_level )
- printf("DEBUG %s/%d: received callback, nbytes=%ld, a callback called %d times\n",
+ printf("DEBUG %s/%d: received callback, nbytes=%d, a callback called %d times\n",
__FILE__, __LINE__, sigval.sival_int, Received_callback+1);
Received_callback++;
#include <fcntl.h>
#include <errno.h>
#include <string.h>
+#include <strings.h>
#include <sys/param.h>
#include <sys/stat.h>
#include <sys/types.h>
int (*func)();
long data;
{
- int fd, leftover, nbytes, offset, recnum, reclen, rval;
+ int fd, leftover, nbytes, offset, recnum, reclen;
char buf[BSIZE*32], *bufend, *cp, *bufstart;
char albuf[WLOG_REC_MAX_SIZE];
struct wlog_rec wrec;
* stop if instructed to.
*/
- if ((rval = (*func)(&wrec, data)) == WLOG_STOP_SCAN) {
+ if ((*func)(&wrec, data) == WLOG_STOP_SCAN) {
break;
}
HFILES = doio.h
LDIRT = $(TARGETS)
LCFLAGS = -DXFS
-
+IFLAG = -I$(TOPDIR)/src #Used for including $(TOPDIR)/src/global.h
#LCFLAGS += -DAIO
#LIBAIO =-laio
default: $(TARGETS)
-include $(BUILDRULES)
+include $(BUILDRULES)
LINKTEST = $(LTLINK) $@.c -o $@ $(CFLAGS) $(LDFLAGS)
doio: doio.c $(LIBTEST)
- $(LINKTEST) $(LIBTEST)
+ $(LINKTEST) $(LIBTEST) $(IFLAG)
fsstress: fsstress.c $(LIBATTR) $(LIBTEST)
- $(LINKTEST) $(LIBATTR) $(LIBTEST) $(LDLIBS)
+ $(LINKTEST) $(LIBATTR) $(LIBTEST) $(LDLIBS) $(IFLAG)
fsx: fsx.c
- $(LINKTEST) $(LIBAIO) $(LDLIBS)
+ $(LINKTEST) $(LIBAIO) $(LDLIBS) $(IFLAG)
growfiles: growfiles.c $(LIBTEST)
- $(LINKTEST) $(LIBTEST) $(LDLIBS)
+ $(LINKTEST) $(LIBTEST) $(LDLIBS) $(IFLAG)
iogen: iogen.c $(LIBTEST)
- $(LINKTEST) $(LIBTEST)
+ $(LINKTEST) $(LIBTEST) $(IFLAG)
*
*/
-#include <stdio.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <signal.h>
-#include <string.h>
-#include <ctype.h>
-#include <unistd.h>
-#include <time.h>
-#include <stdarg.h>
-#include <sys/stat.h>
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/sysmacros.h>
-#ifdef CRAY
-#include <sys/iosw.h>
-#endif
+#include "global.h"
+
#ifdef sgi
#include <aio.h> /* for aio_read,write */
#include <inttypes.h> /* for uint64_t type */
#include <siginfo.h> /* signal handlers & SA_SIGINFO */
#endif
-#ifndef CRAY
+
#include <sys/uio.h> /* for struct iovec (readv)*/
#include <sys/mman.h> /* for mmap(2) */
#include <sys/ipc.h> /* for i/o buffer in shared memory */
#include <sys/shm.h> /* for i/o buffer in shared memory */
-#endif
#include <sys/wait.h>
-#ifdef CRAY
-#include <sys/listio.h>
-#include <sys/panic.h>
-#endif
#include <sys/time.h> /* for delays */
+#include <ctype.h>
#ifndef NO_XFS
-#include <xfs/libxfs.h>
struct io_req;
int do_xfsctl(struct io_req *);
#endif
int parse_cmdline( int, char **, char * );
int lock_file_region( char *, int, int, int, int );
struct fd_cache *alloc_fdcache(char *, int);
+int aio_register( int, int, int );
+#ifndef linux
+int aio_wait(int);
+#endif
/*
* Upanic conditions, and a map from symbolics to values
char **argv;
{
int i, pid, stat, ex_stat;
-#ifdef CRAY
- sigset_t omask;
-#else
- int omask;
-#endif
struct sigaction sa;
-
+ int omask;
umask(0); /* force new file modes to known values */
#if _CRAYMPP
Npes = sysconf(_SC_CRAY_NPES); /* must do this before parse_cmdline */
if ((cp = strchr(Host, '.')) != NULL)
*cp = '\0';
- Pattern_Length = sprintf(Pattern, "-:%d:%s:%s*", getpid(), Host, Prog);
+ Pattern_Length = sprintf(Pattern, "-:%d:%s:%s*", (int)getpid(), Host, Prog);
if (!(Pattern_Length % 16)) {
Pattern_Length = sprintf(Pattern, "-:%d:%s:%s**",
- getpid(), Host, Prog);
+ (int)getpid(), Host, Prog);
}
/*
struct io_req *req;
{
static int pid = -1;
- int fd, nbytes, oflags, signo;
+ int fd, nbytes, oflags;
+ /* REFERENCED */
+ int signo;
int logged_write, rval, got_lock;
long offset, woffset = 0;
char *addr, pattern, *file, *msg;
struct status *s;
struct wlog_rec wrec;
struct syscall_info *sy;
-#if defined(CRAY) || defined(sgi)
+#ifdef sgi
struct aio_info *aiop;
+#endif
+#ifdef CRAY
+ /* REFERENCED */
struct iosw *iosw;
#endif
#ifndef NO_XFS
for (i = 0; i < nb; i++) {
expected[i] = pattern[(pattern_index + i) % pattern_length];
- if (! isprint(expected[i])) {
+ if (! isprint((int)expected[i])) {
expected[i] = '.';
}
actual[i] = cp[i];
- if (! isprint(actual[i])) {
+ if (! isprint((int)actual[i])) {
actual[i] = '.';
}
}
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
-#include <xfs/libxfs.h>
+#include "global.h"
+
+#ifdef HAVE_ATTR_XATTR_H
#include <attr/xattr.h>
-#include <attr/attributes.h>
-#include <sys/statvfs.h>
-#include <sys/time.h>
-#include <sys/wait.h>
-#include <dirent.h>
+#endif
+#include <math.h>
#define XFS_ERRTAG_MAX 17
#define XFS_IDMODULO_MAX 32
void add_to_flist(int, int, int);
void append_pathname(pathname_t *, char *);
+#ifdef HAVE_LIBATTR
int attr_list_path(pathname_t *, char *, const int, int);
+#else
+int attr_list_path(pathname_t *, char *, const int, int, attrlist_cursor_t *);
+#endif
int attr_remove_path(pathname_t *, const char *, int);
int attr_set_path(pathname_t *, const char *, const char *, const int, int);
void check_cwd(void);
struct timeval t;
ptrdiff_t srval;
int nousage = 0;
- xfs_error_injection_t err_inj;
+ xfs_error_injection_t err_inj;
errrange = errtag = 0;
umask(0);
perror(dirname);
exit(1);
}
- sprintf(buf, "fss%x", getpid());
+ sprintf(buf, "fss%x", (unsigned int)getpid());
fd = creat(buf, 0666);
if (lseek64(fd, (off64_t)(MAXFSIZE32 + 1ULL), SEEK_SET) < 0)
maxfsize = (off64_t)MAXFSIZE32;
}
int
-attr_list_path(pathname_t *name, char *buffer, const int buffersize, int flags)
+attr_list_path(pathname_t *name,
+ char *buffer,
+ const int buffersize,
+ int flags
+#ifndef HAVE_LIBATTR
+ , attrlist_cursor_t *cursor
+#endif
+ )
{
char buf[MAXNAMELEN];
pathname_t newname;
int rval;
+#ifdef ATTR_DONTFOLLOW
if (flags != ATTR_DONTFOLLOW) {
errno = EINVAL;
return -1;
}
+#endif
+
+#ifdef HAVE_LIBATTR
rval = llistxattr(name->path, buffer, buffersize);
+#else
+ rval = attr_list(name->path, buffer, buffersize, flags, cursor);
+#endif
if (rval >= 0 || errno != ENAMETOOLONG)
return rval;
separate_pathname(name, buf, &newname);
if (chdir(buf) == 0) {
+#ifdef HAVE_LIBATTR
rval = attr_list_path(&newname, buffer, buffersize, flags);
+#else
+ rval = attr_list_path(&newname, buffer, buffersize, flags, cursor);
+#endif
chdir("..");
}
free_pathname(&newname);
int e;
pathname_t f;
int fd;
- struct flock64 fl;
+ struct xfs_flock64 fl;
__int64_t lr;
off64_t off;
struct stat64 stb;
void
attr_remove_f(int opno, long r)
{
- char *aname, *l;
+ attrlist_ent_t *aep;
+ attrlist_t *alist;
+ char *aname;
char buf[4096];
+#ifndef HAVE_LIBATTR
+ attrlist_cursor_t cursor;
+#endif
int e;
int ent;
pathname_t f;
if (!get_fname(FT_ANYm, r, &f, NULL, NULL, &v))
append_pathname(&f, ".");
total = 0;
- e = attr_list_path(&f, buf, sizeof(buf), ATTR_DONTFOLLOW);
- check_cwd();
- if (e > 0) {
- for (l = buf; l - buf <= e; l += strlen(l)+1)
- if (strncmp(l, "user.",5) == 0)
- total++;
- }
+#ifndef HAVE_LIBATTR
+ bzero(&cursor, sizeof(cursor));
+#endif
+ do {
+ bzero(buf, sizeof(buf));
+#ifdef HAVE_LIBATTR
+ e = attr_list_path(&f, buf, sizeof(buf), ATTR_DONTFOLLOW);
+#else
+ e = attr_list_path(&f, buf, sizeof(buf), ATTR_DONTFOLLOW, &cursor);
+#endif
+ check_cwd();
+ if (e)
+ break;
+ alist = (attrlist_t *)buf;
+ total += alist->al_count;
+ } while (alist->al_more);
if (total == 0) {
if (v)
printf("%d/%d: attr_remove - no attrs for %s\n",
return;
}
which = (int)(random() % total);
+#ifndef HAVE_LIBATTR
+ bzero(&cursor, sizeof(cursor));
+#endif
ent = 0;
aname = NULL;
- e = attr_list_path(&f, buf, sizeof(buf), ATTR_DONTFOLLOW);
- check_cwd();
- if (e <= 0)
- return;
- for (l = buf; l - buf <= e; l += strlen(l)+1) {
- if (strncmp(l, "user.",5) == 0) {
- if (++ent == which) {
- aname = l;
- break;
- }
+ do {
+ bzero(buf, sizeof(buf));
+#ifdef HAVE_LIBATTR
+ e = attr_list_path(&f, buf, sizeof(buf), ATTR_DONTFOLLOW);
+#else
+ e = attr_list_path(&f, buf, sizeof(buf), ATTR_DONTFOLLOW, &cursor);
+#endif
+ check_cwd();
+ if (e)
+ break;
+ alist = (attrlist_t *)buf;
+ if (which < ent + alist->al_count) {
+ aep = (attrlist_ent_t *)
+ &buf[alist->al_offset[which - ent]];
+ aname = aep->a_name;
+ break;
}
- }
+ ent += alist->al_count;
+ } while (alist->al_more);
if (aname == NULL) {
if (v)
printf(
total += count;
free(t);
if (verbose)
- printf("%d/%d: bulkstat nent %d total %llu\n",
+ printf("%d/%d: bulkstat nent %d total %lld\n",
procid, opno, nent, (long long)total);
close(fd);
}
e = lchown_path(&f, u, g) < 0 ? errno : 0;
check_cwd();
if (v)
- printf("%d/%d: chown %s %d/%d %d\n", procid, opno, f.path, u, g, e);
+ printf("%d/%d: chown %s %d/%d %d\n", procid, opno, f.path, (int)u, (int)g, e);
free_pathname(&f);
}
int e;
pathname_t f;
int fd;
- struct flock64 fl;
+ struct xfs_flock64 fl;
__int64_t lr;
off64_t off;
struct stat64 stb;
int e;
pathname_t f;
int fd;
- struct flock64 fl;
+ struct xfs_flock64 fl;
__int64_t lr;
off64_t off;
struct stat64 stb;
int e;
pathname_t f;
int fd;
- struct flock64 fl;
+ struct xfs_flock64 fl;
__int64_t lr;
off64_t off;
struct stat64 stb;
* Small changes to work under Linux -- davej.
*/
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/param.h>
+#include "global.h"
+
#include <limits.h>
#include <time.h>
#include <strings.h>
#include <sys/file.h>
#include <sys/mman.h>
-#include <limits.h>
+#ifdef HAVE_ERR_H
#include <err.h>
+#endif
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <unistd.h>
#include <stdarg.h>
#include <errno.h>
#ifdef AIO
#include <libaio.h>
#endif
-#ifdef XFS
-#include <xfs/libxfs.h>
-#endif
#ifndef MAP_FILE
# define MAP_FILE 0
return (void *)ret;
}
+void
+vwarnc(int code, const char *fmt, va_list ap) {
+ fprintf(stderr, "fsx: ");
+ if (fmt != NULL) {
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, ": ");
+ }
+ fprintf(stderr, "%s\n", strerror(code));
+}
+
+void
+warn(const char * fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ vwarnc(errno, fmt, ap);
+ va_end(ap);
+}
+
void
prt(char *fmt, ...)
{
#ifdef XFS
if (prealloc) {
xfs_flock64_t resv = { 0 };
-
+#ifdef HAVE_XFS_PLATFORM_DEFS_H
if (!platform_test_xfs_fd(fd)) {
prterr(fname);
fprintf(stderr, "main: cannot prealloc, non XFS\n");
exit(96);
}
-
+#endif
resv.l_len = maxfilelen;
if ((xfsctl(fname, fd, XFS_IOC_RESVSP, &resv)) < 0) {
prterr(fname);
* Author: Richard Logan
*
*/
-#include <stdio.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <ctype.h>
-#include <fcntl.h>
-#include <time.h>
+
+#include "global.h"
+
+#ifdef HAVE_SYS_FILE_H
#include <sys/file.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/param.h>
-#include <sys/signal.h>
-#include <errno.h>
-#include <string.h>
+#endif
+
#include "dataascii.h"
#include "random_range.h"
#include "databin.h"
-#ifndef NO_XFS
-#include <xfs/libxfs.h>
-#endif
-
-#ifdef CRAY
-#include <sys/panic.h>
-#include <sys/category.h>
-#endif
extern char *openflags2symbols();
no_file_check=1;
if ( write_check_inter || file_check_inter )
printf("%s%s: %d Using random pattern - no data checking will be performed!\n",
- Progname, TagName, getpid());
+ Progname, TagName, (int)getpid());
}
else if ( max_lseek == LSK_EOFPLUSGROW || Mode & MODE_GROW_BY_LSEEK ) {
no_file_check=1;
if ( file_check_inter )
printf("%s%s: %d Using random lseek beyond EOF or lseek grow,\n\
-no whole file checking will be performed!\n", Progname, TagName, getpid());
+no whole file checking will be performed!\n", Progname, TagName, (int)getpid());
}
int fd;
char *file;
int grow_incr;
-char *buf;
+unsigned char *buf;
{
int noffset;
int ret;
+ /* REFERENCED */
int cur_offset;
char *errmsg;
int fsize; /* current size of file */
else if ( Pattern == PATTERN_PID )
datapidgen(Pid, buf, grow_incr, Woffset);
else if ( Pattern == PATTERN_ASCII )
- dataasciigen(NULL, buf, grow_incr, Woffset);
+ dataasciigen(NULL, (char *)buf, grow_incr, Woffset);
else if ( Pattern == PATTERN_RANDOM )
databingen('r', buf, grow_incr, Woffset);
else if ( Pattern == PATTERN_ALT )
else if ( Pattern == PATTERN_ONES )
databingen('o', buf, grow_incr, Woffset);
else
- dataasciigen(NULL, buf, grow_incr, Woffset);
+ dataasciigen(NULL, (char *)buf, grow_incr, Woffset);
if ( Debug > 2 )
printf("%s: %d DEBUG3 %s/%d: attempting to write %d bytes\n",
*****/
#if NEWIO
- ret=lio_write_buffer(fd, io_type, buf, grow_incr,
+ ret=lio_write_buffer(fd, io_type, (char *)buf, grow_incr,
SIGUSR1, &errmsg,0);
#else
ret=write_buffer(fd, io_type, buf, grow_incr, 0, &errmsg);
#endif
#ifndef NO_XFS
+#ifdef XFS_IOC_RESVSP
struct xfs_flock64 f;
f.l_whence = 0;
__FILE__, __LINE__, errno, strerror(errno));
return -1;
}
+#else
+ struct flock64 f;
+ f.l_whence = 0;
+ f.l_start = 0;
+ f.l_len = size;
+
+ /* non-zeroing reservation */
+ if( fcntl( fd, F_RESVSP64, &f ) == -1 ){
+ fprintf(stderr, "%s%s %s/%d: Unable to pre-alloc space: fcntl(F_RESVSP) failed: %d %s\n",
+ Progname, TagName,
+ __FILE__, __LINE__, errno, strerror(errno));
+ return -1;
+ }
+#endif
#endif
return 0;
* iogen - a tool for generating file/sds io for a doio process
*/
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <signal.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <string.h>
-#include <signal.h>
-#include <time.h>
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/sysmacros.h>
-#ifdef CRAY
-#include <sys/file.h>
-#include <sys/iosw.h>
-#include <sys/listio.h>
+#include "global.h"
+
+#ifdef HAVE_SYS_SYSSGI_H
+#include <sys/syssgi.h>
#endif
-#ifdef sgi
-#include <sys/statvfs.h>
-#include <sys/fs/xfs_itable.h>
+
+#ifdef HAVE_SYS_UUID_H
+#include <sys/uuid.h>
#endif
-#ifndef NO_XFS
-#include <xfs/libxfs.h>
+#ifdef HAVE_SYS_FS_XFS_FSOPS_H
+#include <sys/fs/xfs_fsops.h>
#endif
-#ifdef CRAY
-#include "libkern.h"
+#ifdef HAVE_SYS_FS_XFS_ITABLE_H
+#include <sys/fs/xfs_itable.h>
#endif
+
#include "doio.h"
#include "str_to_bytes.h"
#include "string_to_tokens.h"
{ "llaread", LLAREAD, SY_ASYNC },
{ "llwrite", LLWRITE, 0 },
{ "llawrite", LLAWRITE, SY_ASYNC },
-#endif
{ "ffsync", DFFSYNC, SY_WRITE },
+#endif
#endif /* SGI */
#ifndef NO_XFS
{ "resvsp", RESVSP, SY_WRITE },
#endif
#ifndef NO_XFS
if( (fd = open(rec->f_path, O_RDWR|O_DIRECT, 0)) != -1 ) {
+#ifdef XFS_IOC_DIOINFO
if(xfsctl(rec->f_path, fd, XFS_IOC_DIOINFO, &finfo) != -1) {
+#else
+#ifdef F_DIOINFO
+ if(fcntl(fd, F_DIOINFO, &finfo) != -1) {
+#else
+bozo!
+#endif
+#endif
rec->f_riou = finfo.d_miniosz;
} else {
fprintf(stderr,
struct stat sbuf;
#ifndef NO_XFS
int nb;
- struct xfs_flock64 f;
+ struct flock64 f;
struct fsxattr xattr;
struct dioattr finfo;
char *b, *buf;
bzero(&xattr, sizeof(xattr));
xattr.fsx_xflags = XFS_XFLAG_REALTIME;
/*fprintf(stderr, "set: fsx_xflags = 0x%x\n", xattr.fsx_xflags);*/
+#ifdef XFS_IOC_FSSETXATTR
if( xfsctl(path, fd, XFS_IOC_FSSETXATTR, &xattr) == -1 ) {
+#else
+#ifdef F_FSSETXATTR
+ if (fcntl(fd, F_FSSETXATTR, &xattr) < 0) {
+#else
+bozo!
+#endif
+#endif
fprintf(stderr, "iogen%s: Error %s (%d) setting XFS XATTR->Realtime on file %s\n",
TagName, SYSERR, errno, path);
close(fd);
}
#ifdef DEBUG
+#ifdef XFS_IOC_FSGETXATTR
if( xfsctl(path, fd, XFS_IOC_FSGETXATTR, &xattr) == -1 ) {
+#else
+#ifdef F_FSGETXATTR
+ if (fcntl(fd, F_FSGETXATTR, &xattr) < 0) {
+#else
+bozo!
+#endif
+#endif
fprintf(stderr, "iogen%s: Error getting realtime flag %s (%d)\n",
TagName, SYSERR, errno);
close(fd);
fd, f.l_whence, (long long)f.l_start, (long long)f.l_len);*/
/* non-zeroing reservation */
+#ifdef XFS_IOC_RESVSP
if( xfsctl( path, fd, XFS_IOC_RESVSP, &f ) == -1) {
fprintf(stderr,
"iogen%s: Could not xfsctl(XFS_IOC_RESVSP) %d bytes in file %s: %s (%d)\n",
close(fd);
return -1;
}
+#else
+#ifdef F_RESVSP
+ if( fcntl( fd, F_RESVSP, &f ) == -1) {
+ fprintf(stderr,
+ "iogen%s: Could not fcntl(F_RESVSP) %d bytes in file %s: %s (%d)\n",
+ TagName, nbytes, path, SYSERR, errno);
+ close(fd);
+ return -1;
+ }
+#else
+bozo!
+#endif
+#endif
}
if( Oallocate ) {
(long long)f.l_len);*/
/* zeroing reservation */
+#ifdef XFS_IOC_ALLOCSP
if( xfsctl( path, fd, XFS_IOC_ALLOCSP, &f ) == -1) {
fprintf(stderr,
"iogen%s: Could not xfsctl(XFS_IOC_ALLOCSP) %d bytes in file %s: %s (%d)\n",
close(fd);
return -1;
}
+#else
+#ifdef F_ALLOCSP
+ if ( fcntl(fd, F_ALLOCSP, &f) < 0) {
+ fprintf(stderr,
+ "iogen%s: Could not fcntl(F_ALLOCSP) %d bytes in file %s: %s (%d)\n",
+ TagName, nbytes, path, SYSERR, errno);
+ close(fd);
+ return -1;
+ }
+#else
+bozo!
+#endif
+#endif
}
#endif
if(Owrite == 2) {
close(fd);
if( (fd = open(path, O_CREAT|O_RDWR|O_DIRECT, 0)) != -1 ) {
+#ifdef XFS_IOC_DIOINFO
if(xfsctl(path, fd, XFS_IOC_DIOINFO, &finfo) == -1) {
+#else
+#ifdef F_DIOINFO
+ if (fcntl(fd, F_DIOINFO, &finfo) < 0) {
+#else
+bozo!
+#endif
+#endif
fprintf(stderr,
"iogen%s: Error %s (%d) getting direct I/O info for file %s\n",
TagName, SYSERR, errno, path);
case $(uname) in
IRIX*)
- sz=$( perl -le 'print int( '$blks' * '$size' / 100 )' )
+ echo $blke
+ #sz=$( perl -le 'print int( "$blke" * "$size" / 100 )' )
;;
*)
- sz=$(expr \( $blks '*' $size \) / 100)
+ #sz=$(expr \( $blks '*' $size \) / 100)
;;
esac
-AC_DEFUN([AC_PACKAGE_WANT_LIBGDBM],
- [ AC_CHECK_HEADER([gdbm/ndbm.h], [have_db=true ], [ have_db=false ])
+AC_DEFUN([AC_PACKAGE_WANT_NDBM],
+ [ AC_CHECK_HEADERS(ndbm.h, [ have_db=true ], [ have_db=false ])
+ libgdbm=""
+ AC_SUBST(libgdbm)
+ AC_SUBST(have_db)
+ ])
+
+AC_DEFUN([AC_PACKAGE_WANT_GDBM],
+ [ AC_CHECK_HEADERS([gdbm/ndbm.h], [ have_db=true ], [ have_db=false ])
if test $have_db = true -a -f /usr/lib/libgdbm.a; then
libgdbm="/usr/lib/libgdbm.a"
fi
# MSGFMT MSGMERGE RPM
#
AC_DEFUN([AC_PACKAGE_UTILITIES],
- [ if test -z "$CC"; then
- AC_PROG_CC
- fi
+ [ AC_PROG_CC
cc="$CC"
AC_SUBST(cc)
AC_PACKAGE_NEED_UTILITY($1, "$cc", cc, [C compiler])
if test -z "$MAKE"; then
- AC_PATH_PROG(MAKE, make, /usr/bin/make)
+ AC_PATH_PROG(MAKE, gmake,, /usr/bin:/usr/freeware/bin)
+ fi
+ if test -z "$MAKE"; then
+ AC_PATH_PROG(MAKE, make,, /usr/bin)
fi
make=$MAKE
AC_SUBST(make)
AC_PACKAGE_NEED_UTILITY($1, "$make", make, [GNU make])
if test -z "$LIBTOOL"; then
- AC_PATH_PROG(LIBTOOL, libtool,,/usr/bin:/usr/local/bin)
+ AC_PATH_PROG(LIBTOOL, glibtool,, /usr/bin)
+ fi
+ if test -z "$LIBTOOL"; then
+ AC_PATH_PROG(LIBTOOL, libtool,, /usr/bin:/usr/local/bin:/usr/freeware/bin)
fi
libtool=$LIBTOOL
AC_SUBST(libtool)
AC_PACKAGE_NEED_UTILITY($1, "$libtool", libtool, [GNU libtool])
if test -z "$TAR"; then
- AC_PATH_PROG(TAR, tar)
+ AC_PATH_PROG(TAR, tar,, /usr/freeware/bin:/bin:/usr/local/bin:/usr/bin)
fi
tar=$TAR
AC_SUBST(tar)
if test -z "$ZIP"; then
- AC_PATH_PROG(ZIP, gzip, /bin/gzip)
+ AC_PATH_PROG(ZIP, gzip,, /bin:/usr/local/bin:/usr/freeware/bin)
fi
+
zip=$ZIP
AC_SUBST(zip)
+
if test -z "$MAKEDEPEND"; then
AC_PATH_PROG(MAKEDEPEND, makedepend, /bin/true)
fi
makedepend=$MAKEDEPEND
AC_SUBST(makedepend)
+
if test -z "$AWK"; then
- AC_PATH_PROG(AWK, awk, /bin/awk)
+ AC_PATH_PROG(AWK, awk,, /bin:/usr/bin)
fi
awk=$AWK
AC_SUBST(awk)
+
if test -z "$SED"; then
- AC_PATH_PROG(SED, sed, /bin/sed)
+ AC_PATH_PROG(SED, sed,, /bin:/usr/bin)
fi
sed=$SED
AC_SUBST(sed)
+
if test -z "$ECHO"; then
- AC_PATH_PROG(ECHO, echo, /bin/echo)
+ AC_PATH_PROG(ECHO, echo,, /bin:/usr/bin)
fi
echo=$ECHO
AC_SUBST(echo)
+
if test -z "$SORT"; then
- AC_PATH_PROG(SORT, sort, /bin/sort)
+ AC_PATH_PROG(SORT, sort,, /bin:/usr/bin)
fi
sort=$SORT
AC_SUBST(sort)
if test "$enable_gettext" = yes; then
if test -z "$MSGFMT"; then
- AC_CHECK_PROG(MSGFMT, msgfmt, /usr/bin/msgfmt)
+ AC_PATH_PROG(MSGFMT, msgfmt,, /usr/bin:/usr/freeware/bin)
fi
msgfmt=$MSGFMT
AC_SUBST(msgfmt)
AC_PACKAGE_NEED_UTILITY($1, "$msgfmt", msgfmt, gettext)
+
if test -z "$MSGMERGE"; then
- AC_CHECK_PROG(MSGMERGE, msgmerge, /usr/bin/msgmerge)
+ AC_PATH_PROG(MSGMERGE, msgmerge,, /usr/bin:/usr/freeware/bin)
fi
msgmerge=$MSGMERGE
AC_SUBST(msgmerge)
fi
if test -z "$RPM"; then
- AC_PATH_PROG(RPM, rpm, /bin/rpm)
+ AC_PATH_PROG(RPM, rpm,, /bin:/usr/freeware/bin)
fi
rpm=$RPM
AC_SUBST(rpm)
+
dnl .. and what version is rpm
rpm_version=0
test -x $RPM && rpm_version=`$RPM --version \
int c, i;
acl_t acl1, acl2, acl3;
acl_entry_t ace1;
+ char *p;
- prog = basename(argv[0]);
+ prog = argv[0];
+ for (p = prog; *p; p++) {
+ if (*p == '/') {
+ prog = p + 1;
+ }
+ }
while ((c = getopt(argc, argv, "i")) != -1) {
switch (c) {
* filesystem allocation, and must equal 512. Length units given to bio
* routines are in BB's.
*/
+
+/* Assume that if we have BTOBB, then we have the rest */
+#ifndef BTOBB
#define BBSHIFT 9
#define BBSIZE (1<<BBSHIFT)
#define BBMASK (BBSIZE-1)
#define BTOBB(bytes) (((__u64)(bytes) + BBSIZE - 1) >> BBSHIFT)
#define BTOBBT(bytes) ((__u64)(bytes) >> BBSHIFT)
#define BBTOB(bbs) ((bbs) << BBSHIFT)
-#define OFFTOBB(bytes) (((__u64)(bytes) + BBSIZE - 1) >> BBSHIFT)
#define OFFTOBBT(bytes) ((__u64)(bytes) >> BBSHIFT)
-#define BBTOOFF(bbs) ((__u64)(bbs) << BBSHIFT)
#define SEEKLIMIT32 0x7fffffff
#define BBSEEKLIMIT32 BTOBBT(SEEKLIMIT32)
#define SEEKLIMIT 0x7fffffffffffffffLL
#define BBSEEKLIMIT OFFTOBBT(SEEKLIMIT)
+#endif
+
+#ifndef OFFTOBB
+#define OFFTOBB(bytes) (((__u64)(bytes) + BBSIZE - 1) >> BBSHIFT)
+#define BBTOOFF(bbs) ((__u64)(bbs) << BBSHIFT)
+#endif
#define FSBTOBB(f) (OFFTOBBT(FSBTOOFF(f)))
#define BBTOFSB(b) (OFFTOFSB(BBTOOFF(b)))
/* params are in bytes */
void map(off64_t off, off64_t len)
{
- struct getbmap bm[2]={{0}};
+ struct getbmap bm[2];
+ bzero(bm, sizeof(bm));
+
bm[0].bmv_count = 2;
bm[0].bmv_offset = OFFTOBB(off);
if (len==(off64_t)-1) { /* unsigned... */
bm[0].bmv_length = -1;
printf(" MAP off=%lld, len=%lld [%lld-]\n",
- (__s64)off, (__s64)len,
- (__s64)BBTOFSB(bm[0].bmv_offset));
+ (long long)off, (long long)len,
+ (long long)BBTOFSB(bm[0].bmv_offset));
} else {
bm[0].bmv_length = OFFTOBB(len);
printf(" MAP off=%lld, len=%lld [%lld,%lld]\n",
- (__s64)off, (__s64)len,
- (__s64)BBTOFSB(bm[0].bmv_offset),
- (__s64)BBTOFSB(bm[0].bmv_length));
+ (long long)off, (long long)len,
+ (long long)BBTOFSB(bm[0].bmv_offset),
+ (long long)BBTOFSB(bm[0].bmv_length));
}
printf(" [ofs,count]: start..end\n");
for (;;) {
+#ifdef XFS_IOC_GETBMAP
if (xfsctl(filename, fd, XFS_IOC_GETBMAP, bm) < 0) {
+#else
+#ifdef F_GETBMAP
+ if (fcntl(fd, F_GETBMAP, bm) < 0) {
+#else
+bozo!
+#endif
+#endif
perror("getbmap");
break;
}
+
if (bm[0].bmv_entries == 0)
break;
+
printf(" [%lld,%lld]: ",
- (__s64)BBTOFSB(bm[1].bmv_offset),
- (__s64)BBTOFSB(bm[1].bmv_length));
+ (long long)BBTOFSB(bm[1].bmv_offset),
+ (long long)BBTOFSB(bm[1].bmv_length));
+
if (bm[1].bmv_block == -1)
printf("hole");
else
printf("%lld..%lld",
- (__s64)BBTOFSB(bm[1].bmv_block),
- (__s64)BBTOFSB(bm[1].bmv_block +
+ (long long)BBTOFSB(bm[1].bmv_block),
+ (long long)BBTOFSB(bm[1].bmv_block +
bm[1].bmv_length - 1));
printf("\n");
}
char line[1024];
off64_t off;
int oflags;
- static char *opnames[] =
- { "freesp", "allocsp", "unresvsp", "resvsp" };
+ static char *opnames[] = { "freesp",
+ "allocsp",
+ "unresvsp",
+ "resvsp" };
int opno;
- static int optab[] =
- { XFS_IOC_FREESP64, XFS_IOC_ALLOCSP64, XFS_IOC_UNRESVSP64, XFS_IOC_RESVSP64 };
+
+ /* Assume that if we have FREESP64 then we have the rest */
+#ifdef XFS_IOC_FREESP64
+#define USE_XFSCTL
+ static int optab[] = { XFS_IOC_FREESP64,
+ XFS_IOC_ALLOCSP64,
+ XFS_IOC_UNRESVSP64,
+ XFS_IOC_RESVSP64 };
+#else
+#ifdef F_FREESP64
+#define USE_FCNTL
+ static int optab[] = { F_FREESP64,
+ F_ALLOCSP64,
+ F_UNRESVSP64,
+ F_RESVSP64 };
+#else
+bozo!
+#endif
+#endif
int rflag = 0;
struct statvfs64 svfs;
int tflag = 0;
if (rflag) {
struct fsxattr a;
+#ifdef XFS_IOC_FSGETXATTR
if (xfsctl(filename, fd, XFS_IOC_FSGETXATTR, &a) < 0) {
perror("XFS_IOC_FSGETXATTR");
status = 1;
goto done;
}
+#else
+#ifdef F_FSGETXATTR
+ if (fcntl(fd, F_FSGETXATTR, &a) < 0) {
+ perror("F_FSGETXATTR");
+ status = 1;
+ goto done;
+ }
+#else
+bozo!
+#endif
+#endif
+
a.fsx_xflags |= XFS_XFLAG_REALTIME;
+
+#ifdef XFS_IOC_FSSETXATTR
if (xfsctl(filename, fd, XFS_IOC_FSSETXATTR, &a) < 0) {
perror("XFS_IOC_FSSETXATTR");
status = 1;
goto done;
}
+#else
+#ifdef F_FSSETXATTR
+ if (fcntl(fd, F_FSSETXATTR, &a) < 0) {
+ perror("F_FSSETXATTR");
+ status = 1;
+ goto done;
+ }
+#else
+bozo!
+#endif
+#endif
}
while (!done) {
char *p;
len = v;
printf(" CMD %s, off=%lld, len=%lld\n",
- opnames[opno], (__s64)off, (__s64)len);
+ opnames[opno], (long long)off, (long long)len);
f.l_len = len;
+#ifdef USE_XFSCTL
c = xfsctl(filename, fd, optab[opno], &f);
+#else
+#ifdef USE_FCNTL
+ c = fcntl(fd, optab[opno], &f);
+#else
+bozo!
+#endif
+#endif
if (c < 0) {
perror(opnames[opno]);
break;
off = FSBTOOFF(v);
else
off = v;
- printf(" TRUNCATE off=%lld\n", (__s64)off);
+ printf(" TRUNCATE off=%lld\n", (long long)off);
if (ftruncate64(fd, off) < 0) {
perror("ftruncate");
break;
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
-#include <xfs/libxfs.h>
+#include "global.h"
#include <xfs/jdm.h>
void
printbstat(xfs_bstat_t *sp)
{
printf("ino %lld mode %#o nlink %d uid %d gid %d rdev %#x\n",
- sp->bs_ino, sp->bs_mode, sp->bs_nlink,
+ (long long)sp->bs_ino, sp->bs_mode, sp->bs_nlink,
sp->bs_uid, sp->bs_gid, sp->bs_rdev);
printf("\tblksize %d size %lld blocks %lld xflags %#x extsize %d\n",
- sp->bs_blksize, sp->bs_size, sp->bs_blocks,
+ sp->bs_blksize, (long long)sp->bs_size, (long long)sp->bs_blocks,
sp->bs_xflags, sp->bs_extsize);
dotime(&sp->bs_atime, "atime");
dotime(&sp->bs_mtime, "mtime");
printstat(struct stat64 *sp)
{
printf("ino %lld mode %#o nlink %d uid %d gid %d rdev %#x\n",
- (xfs_ino_t)sp->st_ino, sp->st_mode, sp->st_nlink,
- sp->st_uid, sp->st_gid, (unsigned int)sp->st_rdev);
+ (long long)sp->st_ino, (unsigned int)sp->st_mode, (int)sp->st_nlink,
+ (int)sp->st_uid, (int)sp->st_gid, (int)sp->st_rdev);
printf("\tblksize %llu size %lld blocks %lld\n",
- (__uint64_t)sp->st_blksize, sp->st_size, sp->st_blocks);
+ (unsigned long long)sp->st_blksize, (long long)sp->st_size, (long long)sp->st_blocks);
dotime(&sp->st_atime, "atime");
dotime(&sp->st_mtime, "mtime");
dotime(&sp->st_ctime, "ctime");
if (verbose)
printf(
- "XFS_IOC_FSBULKSTAT test: last=%lld nent=%d\n", last, nent);
+ "XFS_IOC_FSBULKSTAT test: last=%lld nent=%d\n", (long long)last, nent);
bulkreq.lastip = &last;
bulkreq.icount = nent;
if (verbose)
printf(
"XFS_IOC_FSBULKSTAT test: last=%lld ret=%d count=%d total=%d\n",
- last, ret, count, total);
+ (long long)last, ret, count, total);
if (count == 0)
exit(0);
if (verbose && nread > 0)
printf(
"readlink: ino %lld: <%*s>\n",
- t[i].bs_ino,
+ (long long)t[i].bs_ino,
nread,
cc_readlinkbufp);
free(cc_readlinkbufp);
if ( nread < 0 ) {
printf(
"could not read symlink ino %llu\n",
- t[i].bs_ino );
+ (unsigned long long)t[i].bs_ino );
printbstat(&t[i]);
}
break;
if (fd < 0) {
printf(
"unable to open handle ino %lld: %s\n",
- t[i].bs_ino, strerror(errno));
+ (long long)t[i].bs_ino, strerror(errno));
continue;
}
if (fstat64(fd, &sb) < 0) {
printf(
"unable to stat ino %lld: %s\n",
- t[i].bs_ino, strerror(errno));
+ (long long)t[i].bs_ino, strerror(errno));
}
close(fd);
if (verbose)
printf(
"XFS_IOC_FSBULKSTAT test: last=%lld nent=%d ret=%d count=%d\n",
- last, nent, ret, count);
+ (long long)last, nent, ret, count);
return 1;
}
*/
#include "global.h"
+
+#ifdef HAVE_GDBM_NDBM_H
#include <gdbm/ndbm.h>
+#else
+#ifdef HAVE_GDBM_H
+#include <gdbm.h>
+#else
+#ifdef HAVE_NDBM_H
+#include <ndbm.h>
+#else
+bozo!
+#endif
+#endif
+#endif
+
/* #define WorkDir "/xfs" */
#define DBNAME "DBtest"
printf("\tperforming lookups for %d iterations...\n", loops);
if (randseed)
printf("\tusing %d as seed for srandom()...\n\n", randseed);
+ fflush(stdout);
InitDbmLookup(numrecs);
printf("\t\nThere were %d duplicate checksums generated\n", Dups);
for (l=0; l<loops; l++) {
}
printf("\nCleaning up database...\n");
printf("\t\nThere were %d duplicate checksums generated\n", Dups);
+ fflush(stdout);
CleanupDbmLookup();
if (debugflg)
for (l=0; l<Dups; l++) {
int i, rc;
myDB dbRec;
- sprintf(filename, "%s-%d", DBNAME, getpid());
+ sprintf(filename, "%s-%d", DBNAME, (int)getpid());
+ if (debugflg) {
+ printf("dbm_open(%s, O_WRONLY|O_CREAT, 0644)\n", filename);
+ fflush(stdout);
+ }
dbm = dbm_open(filename, O_WRONLY|O_CREAT, 0644);
if(dbm == NULL) DoSysError("\ndbm_open", (int)dbm);
if ( creatDbRec(&dbRec) )
DoSysError("\ncreatDbRec", -1);
- if (debugflg)
+ if (debugflg) {
printf("created rec #%-7d\t%x\r", i+1, dbRec.checksum);
+ fflush(stdout);
+ }
if (InsertKey(KeyArray, keyIdx, dbRec.checksum))
keyIdx++;
key.dsize = sizeof(dbRec.checksum);
content.dptr = (char *)&dbRec;
content.dsize = sizeof(dbRec);
-write(2, NULL, 0);
+
+ write(2, NULL, 0);
+
+ if (debugflg) {
+ printf("dbm_store(dbm, key, content, DBM_INSERT)\n");
+ }
rc = dbm_store(dbm, key, content, DBM_INSERT);
if (rc < 0)
DoSysError("\ndbm_store", rc);
}
}
numDbmEntries = i;
+ if (debugflg) {
+ printf("dbm_close(dbm)\n");
+ fflush(stdout);
+ }
dbm_close(dbm); /* close to eliminate chance of in-memory caching */
+ if (debugflg) {
+ printf("dbm_open(%s, O_RDNLY, 0)\n", filename);
+ fflush(stdout);
+ }
dbm = dbm_open(filename, O_RDONLY, 0);
if(dbm == NULL) DoSysError("\ndbm_open", (int)dbm);
return 0;
unsigned tmpck;
printf("\n\tSequential lookups...\n");
+ fflush(stdout);
for(i=0, j=0; i<numDbmEntries; i++) {
key.dptr = (char *)(KeyArray+j);
key.dsize = sizeof(KeyArray[0]);
write(2, NULL, 0);
+ if (debugflg) {
+ printf("dbm_fetch(dbm, key = %d)\n", j);
+ fflush(stdout);
+ }
content = dbm_fetch(dbm, key);
dbp = (myDB *)content.dptr;
if ( !content.dptr ) {
assert( dbp );
}
- if (debugflg && dbp)
+ if (debugflg && dbp) {
printf("Seq rec #%-6d: checksum %4x (%4x)\r", i,
dbp->checksum, KeyArray[j]);
+ fflush(stdout);
+ }
if (content.dsize == 0) {
printf("\nrec #%-8d: key = %4x (%d)\n", i, KeyArray[j], j);
+ fflush(stdout);
DoSysError("\ndbm_fetch", content.dsize);
}
if (dbp->checksum != KeyArray[j])
j = 0;
}
printf("\n\n\tRandom lookups...\n");
+ fflush(stdout);
for(i=0; i<numDbmEntries; i++) {
n = random() % keyIdx;
key.dptr = (char *)(KeyArray+n);
key.dsize = sizeof(KeyArray[0]);
+ if (debugflg) {
+ printf("dbm_fetch(dbm, key = %d)\n", n);
+ fflush(stdout);
+ }
content = dbm_fetch(dbm, key);
dbp = (myDB *)content.dptr;
if ( !content.dptr ) {
assert( dbp );
}
- if (debugflg && dbp)
+ if (debugflg && dbp) {
printf("Rnd rec #%-6d: checksum %4x (%4x)\r", i,
dbp->checksum, KeyArray[n]);
+ fflush(stdout);
+ }
if (content.dsize == 0)
DoSysError("\ndbm_fetch", content.dsize);
char filename[100];
int rc;
+ if (debugflg) {
+ printf("dbm_close(dbm)\n");
+ fflush(stdout);
+ }
dbm_close(dbm);
- sprintf(filename, "%s-%d.dir", DBNAME, getpid());
+ sprintf(filename, "%s-%d.dir", DBNAME, (int)getpid());
rc = unlink(filename);
if (rc) DoSysError("\nunlink", rc);
- sprintf(filename, "%s-%d.pag", DBNAME, getpid());
+ sprintf(filename, "%s-%d.pag", DBNAME, (int)getpid());
rc = unlink(filename);
if (rc) DoSysError("\nunlink", rc);
}
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
-#include <xfs/libxfs.h>
+#include "global.h"
int
main(int argc, char **argv)
if ((lseek64(fd, offset, SEEK_SET)) < 0) {
fprintf(stderr, "%s: error seeking to offset %llu "
"on \"%s\": %s\n",
- progname, offset, path, strerror(errno));
+ progname, (unsigned long long)offset, path, strerror(errno));
exit(1);
}
#define MKNOD_DEV 0
-static int dirstress(char *dirname, int dirnum, int nfiles, int keep);
+static int dirstress(char *dirname, int dirnum, int nfiles, int keep, int nprocs_per_dir);
static int create_entries(int nfiles);
static int scramble_entries(int nfiles);
static int remove_entries(int nfiles);
pid=getpid();
if (verbose) fprintf(stderr,"** [%d] forked\n", pid);
- r=dirstress(dirname, i / nprocs_per_dir, nfiles, keep);
- if (verbose) fprintf(stderr,"** [%d] exit\n", pid);
+ r=dirstress(dirname, i / nprocs_per_dir, nfiles, keep, nprocs_per_dir);
+ if (verbose) fprintf(stderr,"** [%d] exit %d\n", pid, r);
exit(r);
}
}
istatus+=status/256;
printf("INFO: Dirstress complete\n");
- if (verbose) fprintf(stderr,"** [%d] exit %d\n", pid, istatus);
+ if (verbose) fprintf(stderr,"** [%d] parent exit %d\n", pid, istatus);
return istatus;
}
char *dirname,
int dirnum,
int nfiles,
- int keep)
+ int keep,
+ int nprocs_per_dir)
{
int error;
char buf[1024];
if (verbose) fprintf(stderr,"** [%d] chdir ..\n", pid);
error = chdir("..");
if (error) {
+ /* If this is multithreaded, then expecting a ENOENT here is fine */
+ if (nprocs_per_dir > 1 && errno == ENOENT) {
+ return 0;
+ }
+
perror("Cannot chdir out of pid directory");
return 1;
}
if (verbose) fprintf(stderr,"** [%d] chdir ..\n", pid);
error = chdir("..");
if (error) {
+ /* If this is multithreaded, then expecting a ENOENT here is fine */
+ if (nprocs_per_dir > 1 && errno == ENOENT) {
+ return 0;
+ }
+
perror("Cannot chdir out of working directory");
return 1;
}
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
-#include <xfs/libxfs.h>
+#include "global.h"
void expect_error(int r, int err)
{
}
fsfd = open(argv[1], O_RDONLY);
+
if (fsfd < 0) {
perror("open");
exit(1);
}
printf("--- xfsctl with bad output address\n");
+#ifdef XFS_IOC_FSCOUNTS
expect_error(xfsctl(argv[1], fsfd, XFS_IOC_FSCOUNTS, NULL), EFAULT);
+#else
+#ifdef XFS_FS_COUNTS
+ expect_error(syssgi(SGI_XFS_FSOPERATIONS, fsfd, XFS_FS_COUNTS, NULL, NULL), EFAULT);
+#else
+bozo!
+#endif
+#endif
+
printf("--- xfsctl with bad input address\n");
+#ifdef XFS_IOC_SET_RESBLKS
expect_error(xfsctl(argv[1], fsfd, XFS_IOC_SET_RESBLKS, NULL), EFAULT);
+#else
+#ifdef XFS_SET_RESBLKS
+ expect_error(syssgi(SGI_XFS_FSOPERATIONS, fsfd, XFS_SET_RESBLKS, NULL, NULL), EFAULT);
+#else
+bozo!
+#endif
+#endif
close(fsfd);
* -w report bits per long
*/
-#include <xfs/libxfs.h>
+#include "global.h"
+
#include <sys/quota.h>
#include <sys/resource.h>
#include <signal.h>
+
+#ifdef HAVE_XFS_XQM_H
#include <xfs/xqm.h>
+#endif
+
+#ifndef USRQUOTA
+#define USRQUOTA 0
+#endif
+
+#ifndef GRPQUOTA
+#define GRPQUOTA 1
+#endif
int verbose = 0;
}
/* 98789 is greater than 2^16 (65536) */
- if ((__u32)sbuf.st_uid == 98789 || (__u32)sbuf.st_gid == 98789)
+ if ((__uint32_t)sbuf.st_uid == 98789 || (__uint32_t)sbuf.st_gid == 98789)
return(0);
if (verbose)
fprintf(stderr, "lstat64 on %s gave uid=%d, gid=%d\n",
- filename, sbuf.st_uid, sbuf.st_gid);
+ filename, (int)sbuf.st_uid, (int)sbuf.st_gid);
return(1);
}
hastruncate64(char *filename)
{
struct rlimit64 rlimit64;
- off64_t bigoff = 4294967307; /* > 2^32 */
+ off64_t bigoff = 4294967307; /* > 2^32 */
struct stat64 bigst;
int fd;
if (verbose)
fprintf(stderr, "fstat64 on %s gave sz=%lld (truncsz=%lld)\n",
- filename, bigst.st_size, bigoff);
+ filename, (long long)bigst.st_size, (long long)bigoff);
if (bigst.st_size != bigoff)
return(1);
return (access("/proc/fs/xfs/xqm", F_OK) < 0);
memset(&qstat, 0, sizeof(fs_quota_stat_t));
+
+#ifdef QCMD
qcmd = QCMD(Q_XGETQSTAT, type);
+#else
+ qcmd = Q_GETQSTAT;
+#endif
+
if (quotactl(qcmd, device, 0, (caddr_t)&qstat) < 0) {
if (verbose)
perror("quotactl");
exit(0);
}
if (wflag) {
+#ifdef BITS_PER_LONG
printf("%d\n", BITS_PER_LONG);
+#else
+#ifdef NBBY
+ /* This can change under IRIX depending on whether we compile
+ * with -n32/-32 or -64
+ */
+ printf("%d\n", (int)(NBBY * sizeof(long)));
+#else
+bozo!
+#endif
+#endif
exit(0);
}
* determined.
*/
-#include <xfs/libxfs.h>
+#include "global.h"
+
+#define constpp char * const *
#define N(x) (sizeof(x)/sizeof(x[0]))
/* defaults */
+
progname = basename(argv[0]);
+ for (p = progname; *p; p++) {
+ if (*p == '/') {
+ progname = p + 1;
+ }
+ }
nbytes = 1024 * 1024;
dlen = 73; /* includes the trailing newline */
dfile, strerror(errno));
status = 1;
}
- exit(status);
+ return status;
}
XFS space preallocation changes -- lord@sgi.com, April 2003
*/
-#include <dirent.h>
+
+#include "global.h"
+
#include <sys/mman.h>
-#include <sys/wait.h>
-#include <xfs/libxfs.h>
/* variables settable on the command line */
static int loop_count = 100;
/* generate a buffer for a particular child, fnum etc. Just use a simple buffer
to make debugging easy
*/
-static void gen_buffer(uchar *buf, int loop, int child, int fnum, int ofs)
+static void gen_buffer(char *buf, int loop, int child, int fnum, int ofs)
{
uchar v = (loop+child+fnum+(ofs/block_size)) % 256;
memset(buf, v, block_size);
*/
static void check_buffer(uchar *buf, int loop, int child, int fnum, int ofs)
{
- uchar *buf2;
+ char *buf2;
buf2 = x_malloc(block_size);
*/
static void create_file(const char *dir, int loop, int child, int fnum)
{
- uchar *buf;
+ char *buf;
int size, fd;
char fname[1024];
}
if (do_prealloc) {
- xfs_flock64_t resv;
+ struct flock64 resv;
resv.l_whence = 0;
resv.l_start = 0;
resv.l_len = file_size;
- if ((xfsctl(fname, fd, XFS_IOC_RESVSP, &resv)) < 0) {
+#ifdef XFS_IOC_RESVSP64
+ if ((xfsctl(fname, fd, XFS_IOC_RESVSP64, &resv)) < 0) {
+ perror(fname);
+ exit(1);
+ }
+#else
+#ifdef F_RESVSP64
+ if ((fcntl(fd, F_RESVSP64, &resv)) < 0) {
perror(fname);
exit(1);
}
+#else
+bozo!
+#endif
+#endif
}
if (!use_mmap) {
#ifndef GLOBAL_H
#define GLOBAL_H
-/* xfs-specific includes */
+#include <config.h>
+#ifdef HAVE_XFS_LIBXFS_H
#include <xfs/libxfs.h>
+#endif
+
+#ifdef HAVE_XFS_JDM_H
+#include <xfs/jdm.h>
+#endif
+
+#ifdef HAVE_ATTR_ATTRIBUTES_H
#include <attr/attributes.h>
+#endif
-/* libc includes */
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+#ifdef HAVE_SYS_STATVFS_H
#include <sys/statvfs.h>
+#endif
+
+#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
+#endif
+
+#ifdef HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#ifdef HAVE_SYS_WAIT_H
#include <sys/wait.h>
+#endif
+
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
+#ifdef HAVE_DIRENT_H
#include <dirent.h>
+#endif
+
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_ERRNO_H
+#include <errno.h>
+#endif
+
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
+#ifdef HAVE_SYS_FCNTL_H
+#include <fcntl.h>
#endif
+
+#ifdef HAVE_TIME_H
+#include <sys/time.h>
+#endif
+
+#ifdef HAVE_WAIT_H
+#include <sys/wait.h>
+#endif
+
+#ifdef HAVE_DIRENT_H
+#include <dirent.h>
+#endif
+
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#ifdef HAVE_SYS_ATTRIBUTES_H
+#include <sys/attributes.h>
+#endif
+
+#ifdef HAVE_LIBGEN_H
+#include <libgen.h>
+#endif
+
+#ifdef HAVE_ASSERT_H
+#include <assert.h>
+#endif
+
+#ifdef STDC_HEADERS
+#include <signal.h>
+#endif
+
+#ifdef HAVE_STRINGS_H
+#include <strings.h>
+#endif
+
+#ifdef HAVE_SYS_SYSSGI_H
+#include <sys/syssgi.h>
+#endif
+
+#ifdef HAVE_SYS_UUID_H
+#include <sys/uuid.h>
+#endif
+
+#ifdef HAVE_SYS_FS_XFS_FSOPS_H
+#include <sys/fs/xfs_fsops.h>
+#endif
+
+#ifdef HAVE_SYS_FS_XFS_ITABLE_H
+#include <sys/fs/xfs_itable.h>
+#endif
+
+#ifdef HAVE_BSTRING_H
+#include <bstring.h>
+#endif
+
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#endif
+
+#ifdef sgi
+#define xfs_flock64 flock64
+#define xfs_flock64_t struct flock64
+
+#define XFS_IOC_DIOINFO F_DIOINFO
+#define XFS_IOC_FSGETXATTR F_FSGETXATTR
+#define XFS_IOC_FSSETXATTR F_FSSETXATTR
+#define XFS_IOC_ALLOCSP64 F_ALLOCSP64
+#define XFS_IOC_FREESP64 F_FREESP64
+#define XFS_IOC_GETBMAP F_GETBMAP
+#define XFS_IOC_FSSETDM F_FSSETDM
+#define XFS_IOC_RESVSP F_RESVSP
+#define XFS_IOC_UNRESVSP F_UNRESVSP
+#define XFS_IOC_RESVSP64 F_RESVSP64
+#define XFS_IOC_UNRESVSP64 F_UNRESVSP64
+#define XFS_IOC_GETBMAPA F_GETBMAPA
+#define XFS_IOC_FSGETXATTRA F_FSGETXATTRA
+#define XFS_IOC_GETBMAPX F_GETBMAPX
+
+#define XFS_IOC_FSGEOMETRY_V1 XFS_FS_GEOMETRY_V1
+#define XFS_IOC_FSBULKSTAT SGI_FS_BULKSTAT
+#define XFS_IOC_FSBULKSTAT_SINGLE SGI_FS_BULKSTAT_SINGLE
+#define XFS_IOC_FSINUMBERS /* TODO */
+#define XFS_IOC_PATH_TO_FSHANDLE /* TODO */
+#define XFS_IOC_PATH_TO_HANDLE /* TODO */
+#define XFS_IOC_FD_TO_HANDLE /* TODO */
+#define XFS_IOC_OPEN_BY_HANDLE /* TODO */
+#define XFS_IOC_READLINK_BY_HANDLE /* TODO */
+#define XFS_IOC_SWAPEXT /* TODO */
+#define XFS_IOC_FSGROWFSDATA XFS_GROWFS_DATA
+#define XFS_IOC_FSGROWFSLOG XFS_GROWFS_LOG
+#define XFS_IOC_FSGROWFSRT XFS_GROWFS_RT
+#define XFS_IOC_FSCOUNTS XFS_FS_COUNTS
+#define XFS_IOC_SET_RESBLKS XFS_SET_RESBLKS
+#define XFS_IOC_GET_RESBLKS XFS_GET_RESBLKS
+#define XFS_IOC_ERROR_INJECTION SGI_XFS_INJECT_ERROR
+#define XFS_IOC_ERROR_CLEARALL SGI_XFS_CLEARALL_ERROR
+#define XFS_IOC_FREEZE XFS_FS_FREEZE
+#define XFS_IOC_THAW XFS_FS_THAW
+#define XFS_IOC_FSSETDM_BY_HANDLE /* TODO */
+#define XFS_IOC_ATTRLIST_BY_HANDLE /* TODO */
+#define XFS_IOC_ATTRMULTI_BY_HANDLE /* TODO */
+#define XFS_IOC_FSGEOMETRY XFS_FS_GEOMETRY
+#define XFS_IOC_GOINGDOWN XFS_FS_GOINGDOWN
+
+typedef struct xfs_error_injection {
+ __int32_t fd;
+ __int32_t errtag;
+} xfs_error_injection_t;
+
+
+typedef struct xfs_fsop_bulkreq {
+ ino64_t *lastip;
+ __int32_t icount;
+ xfs_bstat_t *ubuffer;
+ __int32_t *ocount;
+} xfs_fsop_bulkreq_t;
+
+static __inline__ int
+xfsctl(char* path, int fd, int cmd, void* arg) {
+ if (cmd >= 0 && cmd < XFS_FSOPS_COUNT)
+ return syssgi(SGI_XFS_FSOPERATIONS, fd, cmd, (void*)0, arg);
+ else if (cmd == SGI_FS_BULKSTAT)
+ return syssgi(SGI_FS_BULKSTAT, fd,
+ ((xfs_fsop_bulkreq_t*)arg)->lastip,
+ ((xfs_fsop_bulkreq_t*)arg)->icount,
+ ((xfs_fsop_bulkreq_t*)arg)->ubuffer);
+ else if (cmd == SGI_FS_BULKSTAT_SINGLE)
+ return syssgi(SGI_FS_BULKSTAT_SINGLE, fd,
+ ((xfs_fsop_bulkreq_t*)arg)->lastip,
+ ((xfs_fsop_bulkreq_t*)arg)->ubuffer);
+ else if (cmd == SGI_XFS_INJECT_ERROR)
+ return syssgi(SGI_XFS_INJECT_ERROR,
+ ((xfs_error_injection_t*)arg)->errtag, fd);
+ else if (cmd == SGI_XFS_CLEARALL_ERROR)
+ return syssgi(SGI_XFS_CLEARALL_ERROR, fd);
+ else
+ return fcntl(fd, cmd, arg);
+}
+
+#endif /* sgi */
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
-#include <xfs/libxfs.h>
+#include "global.h"
/* These should be in libxfs.h */
#ifndef XFS_IOC_GOINGDOWN
mode[0] = '?';
}
- printf(" Mode: (%04o/%s)", sbuf.st_mode & 07777, mode);
- printf(" Uid: (%d)", sbuf.st_uid);
- printf(" Gid: (%d)\n", sbuf.st_gid);
+ printf(" Mode: (%04o/%s)", (unsigned int)(sbuf.st_mode & 07777), mode);
+ printf(" Uid: (%d)", (int)sbuf.st_uid);
+ printf(" Gid: (%d)\n", (int)sbuf.st_gid);
printf("Device: %2d,%-2d", major(sbuf.st_dev),
minor(sbuf.st_dev));
printf(" Inode: %-9llu", (unsigned long long)sbuf.st_ino);
void
usage(char *progname)
{
- fprintf(stderr, "usage: %s [-l filesize] [-b blocksize] [-c count] [-o write offset] [-s seed] [-x extentsize] [-w] [-v] [-d] [-r] [-a] [-p] filename\n",
- progname);
+ fprintf(stderr,
+ "usage: %s [-l filesize] [-b blocksize] [-c count]"
+ " [-o write offset] [-s seed] [-x extentsize]"
+ " [-w] [-v] [-d] [-r] [-a] [-p] filename\n",
+ progname);
exit(1);
}
while ((ch = getopt(argc, argv, "b:l:s:c:o:x:vwdrapt")) != EOF) {
switch(ch) {
case 'b': blocksize = atoi(optarg); break;
- case 'l': filesize = strtoll(optarg, NULL, 16); break;
+ case 'l': filesize = strtoull(optarg, NULL, 16); break;
case 's': seed = atoi(optarg); break;
case 'c': count = atoi(optarg); break;
- case 'o': fileoffset = strtoll(optarg, NULL, 16); break;
+ case 'o': fileoffset = strtoull(optarg, NULL, 16); break;
case 'x': extsize = atoi(optarg); break;
case 'v': verbose++; break;
case 'w': wsync++; break;
usage(argv[0]);
if ((filesize % blocksize) != 0) {
filesize -= filesize % blocksize;
- printf("filesize not a multiple of blocksize, reducing filesize to %lld\n",
- filesize);
+ printf("filesize not a multiple of blocksize, reducing filesize to %llu\n",
+ (unsigned long long)filesize);
}
if ((fileoffset % blocksize) != 0) {
fileoffset -= fileoffset % blocksize;
- printf("fileoffset not a multiple of blocksize, reducing fileoffset to %lld\n",
- fileoffset);
+ printf("fileoffset not a multiple of blocksize, reducing fileoffset to %llu\n",
+ (unsigned long long)fileoffset);
}
if (count > (filesize/blocksize)) {
count = (filesize/blocksize);
printf("randholes: Seed = %d (use \"-s %d\" to re-execute this test)\n", seed, seed);
srandom(seed);
- printf("randholes: blocksize=%d, filesize=%Ld, seed=%d\n"
- "randholes: count=%d, offset=%Ld, extsize=%d\n",
- blocksize, filesize, seed, count, fileoffset, extsize);
+ printf("randholes: blocksize=%d, filesize=%llu, seed=%d\n"
+ "randholes: count=%d, offset=%llu, extsize=%d\n",
+ blocksize, (unsigned long long)filesize, seed,
+ count, (unsigned long long)fileoffset, extsize);
printf("randholes: verbose=%d, wsync=%d, direct=%d, rt=%d, alloconly=%d, preserve=%d, test=%d\n",
verbose, wsync, direct, rt, alloconly, preserve, test);
perror("open");
return 1;
}
+
if (rt) {
+#ifdef XFS_IOC_FSGETXATTR
if (xfsctl(filename, fd, XFS_IOC_FSGETXATTR, &rtattr) < 0) {
perror("xfsctl(XFS_IOC_FSGETXATTR)");
return 1;
return 1;
}
}
+#else
+#ifdef F_FSGETXATTR
+ if (fcntl(fd, F_FSGETXATTR, &rtattr) < 0) {
+ perror("fcntl(F_FSGETXATTR)");
+ return 1;
+ }
+ if ((rtattr.fsx_xflags & XFS_XFLAG_REALTIME) == 0 ||
+ (extsize && rtattr.fsx_extsize != extsize * blocksize)) {
+ rtattr.fsx_xflags |= XFS_XFLAG_REALTIME;
+ if (extsize)
+ rtattr.fsx_extsize = extsize * blocksize;
+ if (fcntl(fd, F_FSSETXATTR, &rtattr) < 0) {
+ perror("fcntl(F_FSSETXATTR)");
+ return 1;
+ }
+ }
+#else
+bozo!
+#endif
+#endif
}
+
if (direct) {
+#ifdef XFS_IOC_DIOINFO
if (xfsctl(filename, fd, XFS_IOC_DIOINFO, &diob) < 0) {
perror("xfsctl(XFS_IOC_FIOINFO)");
return 1;
}
+#else
+#ifdef F_DIOINFO
+ if (fcntl(fd, F_DIOINFO, &diob) < 0) {
+ perror("fcntl(F_FIOINFO)");
+ return 1;
+ }
+#else
+bozo!
+#endif
+#endif
if (blocksize % diob.d_miniosz) {
- fprintf(stderr, "blocksize %d must be a multiple of %d for direct I/O\n", blocksize, diob.d_miniosz);
+ fprintf(stderr,
+ "blocksize %d must be a multiple of %d for direct I/O\n",
+ blocksize,
+ diob.d_miniosz);
return 1;
}
}
fl.l_start = offset;
fl.l_len = blocksize;
fl.l_whence = 0;
+
+#ifdef XFS_IOC_RESVSP64
if (xfsctl(fname, fd, XFS_IOC_RESVSP64, &fl) < 0) {
perror("xfsctl(XFS_IOC_RESVSP64)");
exit(1);
}
+#else
+#ifdef F_RESVSP64
+ if (fcntl(fd, F_RESVSP64, &fl) < 0) {
+ perror("fcntl(F_RESVSP64)");
+ exit(1);
+ }
+#else
+bozo!
+#endif
+#endif
continue;
}
SETBIT(valid, block);
if (test && verbose>1) printf("NOT ");
if (verbose > 1) {
printf("writing data at offset=%llx, value 0x%llx and 0x%llx\n",
- fileoffset + offset,
- *(__uint64_t *)buffer, *(__uint64_t *)(buffer+256));
+ (unsigned long long)(fileoffset + offset),
+ *(unsigned long long *)buffer,
+ *(unsigned long long *)(buffer+256));
}
}
if ((*(__uint64_t *)tmp != 0LL) ||
(*(__uint64_t *)(tmp+256) != 0LL)) {
printf("mismatched data at offset=%llx, expected 0x%llx, got 0x%llx and 0x%llx\n",
- fileoffset + block * blocksize,
+ (unsigned long long)fileoffset + block * blocksize,
0LL,
- *(__uint64_t *)tmp,
- *(__uint64_t *)(tmp+256));
+ *(unsigned long long *)tmp,
+ *(unsigned long long *)(tmp+256));
err++;
}
} else {
fileoffset + block * blocksize) ||
(*(__uint64_t *)(tmp+256) !=
fileoffset + block * blocksize) ) {
- printf("mismatched data at offset=%llx, expected 0x%llx, got 0x%llx and 0x%llx\n",
- fileoffset + block * blocksize,
- fileoffset + block * blocksize,
- *(__uint64_t *)tmp,
- *(__uint64_t *)(tmp+256));
+ printf("mismatched data at offset=%llx, "
+ "expected 0x%llx, got 0x%llx and 0x%llx\n",
+ (unsigned long long)fileoffset + block * blocksize,
+ (unsigned long long)fileoffset + block * blocksize,
+ *(unsigned long long *)tmp,
+ *(unsigned long long *)(tmp+256));
err++;
}
}
for (i = 0; i < (blocksize / 16); i++) {
printf("%llx: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- offset, *buffer, *(buffer + 1), *(buffer + 2),
+ (unsigned long long)offset, *buffer, *(buffer + 1), *(buffer + 2),
*(buffer + 3));
offset += 16;
buffer += 4;
gid_t sgids[SUP_MAX];
int sup_cnt = 0;
int status;
+ char *p;
+
+ prog = basename(argv[0]);
+ for (p = prog; *p; p++) {
+ if (*p == '/') {
+ prog = p + 1;
+ }
+ }
- prog = basename(argv[0]);
while ((c = getopt(argc, argv, "u:g:s:")) != -1) {
switch (c) {
if (gid != -1) {
if (setegid(gid) == -1) {
fprintf(stderr, "%s: setegid(%d) failed: %s\n",
- prog, gid, strerror(errno));
+ prog, (int)gid, strerror(errno));
exit(1);
}
}
if (uid != -1) {
if (seteuid(uid) == -1) {
fprintf(stderr, "%s: seteuid(%d) failed: %s\n",
- prog, uid, strerror(errno));
+ prog, (int)uid, strerror(errno));
exit(1);
}
}
}
if (verbose > 1)
printf("writing data at offset=%llx\n",
- (fileoffset + offset));
+ (unsigned long long)(fileoffset + offset));
}
void
}
if (verbose > 1)
printf("truncated file to offset %llx\n",
- (fileoffset + offset));
+ (unsigned long long)(fileoffset + offset));
}
}
printf("XFS_IOC_FSCOUNTS-\n freedata: %lld freertx: %lld freeino: %lld allocino: %lld\n",
- counts.freedata, counts.freertx, counts.freeino, counts.allocino);
+ (long long)counts.freedata, (long long)counts.freertx,
+ (long long)counts.freeino, (long long)counts.allocino);
}
__u64 getresblks(char *fname, int fsfd)
}
printf("XFS_IOC_GET_RESBLKS-\n resblks: %lld blksavail: %lld\n",
- res.resblks, res.resblks_avail);
+ (long long)res.resblks, (long long)res.resblks_avail);
return res.resblks;
}
}
printf("XFS_IOC_SET_RESBLKS-\n resblks: %lld blksavail: %lld\n",
- res.resblks, res.resblks_avail);
+ (long long)res.resblks, (long long)res.resblks_avail);
return res.resblks;
}
exit(1);
}
printf("dev: %llu ino: %llu mode: %o\n",
- (__u64)buf.st_dev, (__u64)buf.st_ino, buf.st_mode);
+ (unsigned long long)buf.st_dev, (unsigned long long)buf.st_ino, buf.st_mode);
}