From: Dave Chinner Date: Fri, 15 Mar 2013 12:28:04 +0000 (+0000) Subject: xfstests: introduce a common directory X-Git-Tag: v2022.05.01~3475 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=8c4905a42e10650a75c0e3966ecc044e1a9616d0;p=xfstests-dev.git xfstests: introduce a common directory Introduce a top level common directory and move all the common.* files into it. Because there is now a directory named common, the prefix can be dropped from all the files. Convert all the tests to use this new directory for including common files. for f in common.*; do \ git mv `echo -n "$f " ; echo $f | sed -e 's;n\.;n/;'` \ done Signed-off-by: Dave Chinner Reviewed-by: Phil White [rjohnston@sgi.com reworked for TOT changes] Signed-off-by: Rich Johnston --- diff --git a/check b/check index af6d3172..cc426db0 100755 --- a/check +++ b/check @@ -133,9 +133,9 @@ fi export FSTYP # we need common.config -if ! . ./common.config +if ! . ./common/config then - echo "$iam: failed to source common.config" + echo "$iam: failed to source common/config" exit 1 fi @@ -266,10 +266,10 @@ then list=`echo $list | awk -f randomize.awk` fi -# we need common.rc -if ! . ./common.rc +# we need common/rc +if ! . ./common/rc then - echo "check: failed to source common.rc" + echo "check: failed to source common/rc" exit 1 fi diff --git a/common.attr b/common.attr deleted file mode 100644 index 69bcb014..00000000 --- a/common.attr +++ /dev/null @@ -1,208 +0,0 @@ -##/bin/bash -#----------------------------------------------------------------------- -# Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA -# -# Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane, -# Mountain View, CA 94043, USA, or: http://www.sgi.com -#----------------------------------------------------------------------- -# common extended attribute and ACL support - -# pick three unused user/group ids, store them as $acl[1-3] -# -_acl_setup_ids() -{ - eval `(_cat_passwd; _cat_group) | awk -F: ' - { ids[$3]=1 } - END { - j=1 - for(i=1; i<1000000 && j<=3;i++){ - if (! (i in ids)) { - printf "acl%d=%d;", j, i; - j++ - } - } - }'` -} - -# filter for the acl ids selected above -# -_acl_filter_id() -{ - sed \ - -e "s/u:$acl1/u:id1/" \ - -e "s/u:$acl2/u:id2/" \ - -e "s/u:$acl3/u:id3/" \ - -e "s/g:$acl1/g:id1/" \ - -e "s/g:$acl2/g:id2/" \ - -e "s/g:$acl3/g:id3/" \ - -e "s/ $acl1 / id1 /" \ - -e "s/ $acl2 / id2 /" \ - -e "s/ $acl3 / id3 /" -} - -# filtered ls -# -_acl_ls() -{ - _ls_l -n $* | awk '{ print $1, $3, $4, $NF }' | _acl_filter_id -} - -# -_acl_list() -{ - _file1=$1 - - if [ $HOSTOS = "IRIX" ]; then - ls -dD $_file1 | _acl_filter_id - else - chacl -l $_file1 | _acl_filter_id - fi -} - -# create an ACL with n ACEs in it -# -_create_n_aces() -{ - let n=$1-4 - acl='u::rwx,g::rwx,o::rwx,m::rwx' # 4 ace acl start - while [ $n -ne 0 ]; do - acl="$acl,u:$n:rwx" - let n=$n-1 - done - echo $acl -} - -# filter user ace names to user ids -# -_filter_aces() -{ - tmp_file=`mktemp /tmp/ace.XXXXXX` - - (_cat_passwd; _cat_group) > $tmp_file - - $AWK_PROG -v tmpfile=$tmp_file ' - BEGIN { - FS=":" - while ( getline 0 ) { - idlist[$1] = $3 - } - } - /^user/ { if ($2 in idlist) sub($2, idlist[$2]); print; next} - /^u/ { if ($2 in idlist) sub($2, idlist[$2]); print; next} - /^default:user/ { if ($3 in idlist) sub($3, idlist[$3]); print; next} - {print} - ' - rm -f $tmp_file -} - -_filter_aces_notypes() -{ - tr '\[' '\012' | tr ']' '\012' | tr ',' '\012' | _filter_aces|\ - sed -e 's/u:/user:/' -e 's/g:/group:/' -e 's/o:/other:/' -e 's/m:/mask:/' -} - -_require_acls() -{ - if [ ! -x /bin/chacl -a ! -x /usr/bin/chacl -a ! -x /sbin/chacl ]; then - _notrun "chacl command not found" - fi - - # - # Test if chacl is able to list ACLs on the target filesystems. On really - # old kernels the system calls might not be implemented at all, but the - # more common case is that the tested filesystem simply doesn't support - # ACLs. - # - touch $TEST_DIR/syscalltest - chacl -l $TEST_DIR/syscalltest > $TEST_DIR/syscalltest.out 2>&1 - cat $TEST_DIR/syscalltest.out >> $RESULT_DIR/$seq.full - - if grep -q 'Function not implemented' $TEST_DIR/syscalltest.out; then - _notrun "kernel does not support ACLs" - fi - if grep -q 'Operation not supported' $TEST_DIR/syscalltest.out; then - _notrun "ACLs not supported by this filesystem type: $FSTYP" - fi - - rm -f $TEST_DIR/syscalltest.out -} - -_list_acl() -{ - file=$1 - - ls -dD $file | _acl_filter_id -} - -_require_attrs() -{ - [ -n $ATTR_PROG ] || _notrun "attr command not found" - [ -n $GETFATTR_PROG ] || _notrun "getfattr command not found" - [ -n $SETFATTR_PROG ] || _notrun "setfattr command not found" - - # - # Test if chacl is able to write an attribute on the target filesystems. - # On really old kernels the system calls might not be implemented at all, - # but the more common case is that the tested filesystem simply doesn't - # support attributes. Note that we can't simply list attributes as - # various security modules generate synthetic attributes not actually - # stored on disk. - # - touch $TEST_DIR/syscalltest - attr -s "user.xfstests" -V "attr" $TEST_DIR > $TEST_DIR/syscalltest.out 2>&1 - cat $TEST_DIR/syscalltest.out >> $RESULT_DIR/$seq.full - - if grep -q 'Function not implemented' $TEST_DIR/syscalltest.out; then - _notrun "kernel does not support attrs" - fi - if grep -q 'Operation not supported' $TEST_DIR/syscalltest.out; then - _notrun "attrs not supported by this filesystem type: $FSTYP" - fi - - rm -f $TEST_DIR/syscalltest.out -} - -# getfattr -R returns info in readdir order which varies from fs to fs. -# This sorts the output by filename -_sort_getfattr_output() -{ - awk '{a[FNR]=$0}END{n = asort(a); for(i=1; i <= n; i++) print a[i]"\n"}' RS='' -} - -# set maximum total attr space based on fs type -if [ "$FSTYP" == "xfs" -o "$FSTYP" == "udf" ]; then - MAX_ATTRS=1000 -else # Assume max ~1 block of attrs - BLOCK_SIZE=`stat -f $TEST_DIR | grep "Block size" | cut -d " " -f3` - # user.attribute_XXX="value.XXX" is about 32 bytes; leave some overhead - let MAX_ATTRS=$BLOCK_SIZE/40 -fi - -export MAX_ATTRS - -# Set max attr value size based on fs type -if [ "$FSTYP" == "xfs" -o "$FSTYP" == "udf" -o "$FSTYP" == "btrfs" ]; then - MAX_ATTRVAL_SIZE=64 -else # Assume max ~1 block of attrs - BLOCK_SIZE=`stat -f $TEST_DIR | grep "Block size" | cut -d " " -f3` - # leave a little overhead - let MAX_ATTRVAL_SIZE=$BLOCK_SIZE-256 -fi - -export MAX_ATTRVAL_SIZE -# make sure this script returns success -/bin/true diff --git a/common.config b/common.config deleted file mode 100644 index 7a95adce..00000000 --- a/common.config +++ /dev/null @@ -1,276 +0,0 @@ -##/bin/bash -# -# Copyright (c) 2000-2003,2006 Silicon Graphics, Inc. All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# -# setup and check for config parameters, and in particular -# -# EMAIL - email of the script runner. -# TEST_DIR - scratch test directory that is in an already -# mounted XFS file system, needs to be be world -# writeable -# TEST_DEV - device for file system containing TEST_DIR -# -# and optionally: -# SCRATCH_DEV - device you can make a scratch file system on -# SCRATCH_MNT - mount point for scratch file system -# SCRATCH_LOGDEV - scratch log device for external log testing -# SCRATCH_RTDEV - scratch rt dev -# TEST_LOGDEV - test log device for external log testing -# TEST_RTDEV - test rt dev -# TAPE_DEV - the tape device for the xfsdump tests -# RMT_TAPE_DEV - the remote tape device for the xfsdump tests -# RMT_IRIXTAPE_DEV- the IRIX remote tape device for the xfsdump tests -# RMT_TAPE_USER - remote user for tape device -# -# - These can be added to $HOST_CONFIG_DIR (witch default to ./config) -# below or a separate local configuration file can be used (using -# the HOST_OPTIONS variable). -# - This script is shared by the stress test system and the auto-qa -# system -# - TEST_DEV & TEST_DIR must be assigned. -# - this script shouldn't make any assertions about filesystem -# validity or mountedness. -# - -# all tests should use a common language setting to prevent golden -# output mismatches. -export LANG=C - -# Warning: don't put freeware before /usr/bsd on IRIX coz you'll -# get the wrong hostname and set your system name to -s :) -[ -d /usr/bsd ] && PATH=$PATH:/usr/bsd -[ -d /usr/freeware/bin ] && PATH=$PATH:/usr/freeware/bin -PATH=".:$PATH" - -HOST=`hostname -s` -HOSTOS=`uname -s` -[ "$HOSTOS" = "IRIX64" ] && HOSTOS="IRIX" - -MODULAR=0 # using XFS as a module or not -BOOT="/boot" # install target for kernels -export EXTRA=${EXTRA:=xfs-qa} - -# general parameters (mainly for auto-qa) -SOAK_PROC=3 # -p option to fsstress -SOAK_STRESS=10000 # -n option to fsstress -SOAK_PASSES=-1 # count of repetitions of fsstress (while soaking) -EMAIL=root@localhost # where auto-qa will send its status messages -export HOST_OPTIONS=${HOST_OPTIONS:=local.config} -export CHECK_OPTIONS=${CHECK_OPTIONS:="-g auto"} -export BENCH_PASSES=${BENCH_PASSES:=5} -export XFS_MKFS_OPTIONS=${XFS_MKFS_OPTIONS:=-bsize=4096} -export TIME_FACTOR=${TIME_FACTOR:=1} -export LOAD_FACTOR=${LOAD_FACTOR:=1} -export DEBUGFS_MNT=${DEBUGFS_MNT:="/sys/kernel/debug"} - -export PWD=`pwd` -#export DEBUG=${DEBUG:=...} # arbitrary CFLAGS really. -export MALLOCLIB=${MALLOCLIB:=/usr/lib/libefence.a} -export LOCAL_CONFIGURE_OPTIONS=${LOCAL_CONFIGURE_OPTIONS:=--enable-readline=yes} - -# $1 = prog to look for, $2* = default pathnames if not found in $PATH -set_prog_path() -{ - p=`which $1 2> /dev/null` - if [ -n "$p" -a -x "$p" ]; then - echo $p - return 0 - fi - p=$1 - - shift - for f; do - if [ -x $f ]; then - echo $f - return 0 - fi - done - - echo "" - return 1 -} - -# Handle mkfs.btrfs which does (or does not) require -f to overwrite -set_btrfs_mkfs_prog_path_with_opts() -{ - p=`set_prog_path mkfs.btrfs` - if [ "$p" != "" ] && grep -q 'force overwrite' $p; then - echo "$p -f" - else - echo $p - fi -} - -_fatal() -{ - echo "$*" - status=1 - exit 1 -} - -export MKFS_PROG="`set_prog_path mkfs`" -[ "$MKFS_PROG" = "" ] && _fatal "mkfs not found" - -export MOUNT_PROG="`set_prog_path mount`" -[ "$MOUNT_PROG" = "" ] && _fatal "mount not found" - -export UMOUNT_PROG="`set_prog_path umount`" -[ "$UMOUNT_PROG" = "" ] && _fatal "umount not found" - -export FSSTRESS_PROG="`set_prog_path fsstress $PWD/ltp/fsstress`" -[ "$FSSTRESS_PROG" = "" ] && _fatal "fsstress not found" - -export PERL_PROG="`set_prog_path perl`" -[ "$PERL_PROG" = "" ] && _fatal "perl not found" - -export AWK_PROG="`set_prog_path awk`" -[ "$AWK_PROG" = "" ] && _fatal "awk not found" - -export SED_PROG="`set_prog_path sed`" -[ "$SED_PROG" = "" ] && _fatal "sed not found" - -export BC_PROG="`set_prog_path bc`" -[ "$BC_PROG" = "" ] && _fatal "bc not found" - -export PS_ALL_FLAGS="-ef" - -export DF_PROG="`set_prog_path df`" -[ "$DF_PROG" = "" ] && _fatal "df not found" -[ "$HOSTOS" = "Linux" ] && export DF_PROG="$DF_PROG -T" - -export XFS_LOGPRINT_PROG="`set_prog_path xfs_logprint`" -export XFS_REPAIR_PROG="`set_prog_path xfs_repair`" -export XFS_CHECK_PROG="`set_prog_path xfs_check`" -export XFS_DB_PROG="`set_prog_path xfs_db`" -export XFS_GROWFS_PROG=`set_prog_path xfs_growfs` -export XFS_IO_PROG="`set_prog_path xfs_io`" -export XFS_PARALLEL_REPAIR_PROG="`set_prog_path xfs_prepair`" -export XFS_PARALLEL_REPAIR64_PROG="`set_prog_path xfs_prepair64`" -export __XFSDUMP_PROG="`set_prog_path xfsdump`" -export XFSDUMP_PROG="$__XFSDUMP_PROG -e" -export XFSRESTORE_PROG="`set_prog_path xfsrestore`" -export XFSINVUTIL_PROG="`set_prog_path xfsinvutil`" -export GETFATTR_PROG="`set_prog_path getfattr`" -export SETFATTR_PROG="`set_prog_path setfattr`" -export ATTR_PROG="`set_prog_path attr`" -export QUOTA_PROG="`set_prog_path quota`" -export XFS_QUOTA_PROG="`set_prog_path xfs_quota`" -export KILLALL_PROG="`set_prog_path killall`" -export INDENT_PROG="`set_prog_path indent`" -export XFS_COPY_PROG="`set_prog_path xfs_copy`" -export FSTRIM_PROG="`set_prog_path fstrim`" -export DUMPE2FS_PROG="`set_prog_path dumpe2fs`" -export FIO_PROG="`set_prog_path fio`" -export FILEFRAG_PROG="`set_prog_path filefrag`" -export E4DEFRAG_PROG="`set_prog_path e4defrag`" -export LOGGER_PROG="`set_prog_path logger`" - -# Generate a comparable xfsprogs version number in the form of -# major * 10000 + minor * 100 + release -# -# $ xfs_db -V -# xfs_db version 2.9.7 -# -# so, 2.9.7 = 20907 -_version=`$XFS_DB_PROG -V | $AWK_PROG ' - /version/ { - if (split($3,ver,".") == 3) - print (ver[1] * 10000) + (ver[2] * 100) + ver[3]; - }'` -[ -z "$_version" ] && _fatal "xfsprogs version cannot be found" -export XFSPROGS_VERSION="$_version" - -case "$HOSTOS" in - IRIX*) - export MKFS_XFS_PROG="`set_prog_path mkfs_xfs`" - export MKFS_UDF_PROG="`set_prog_path mkfs_udf`" - export XFS_FSR_PROG="`set_prog_path /usr/etc/fsr_xfs`" - export MKFS_NFS_PROG="false" - ;; - Linux) - export MKFS_XFS_PROG="`set_prog_path mkfs.xfs`" - export MKFS_UDF_PROG="`set_prog_path mkudffs`" - export MKFS_BTRFS_PROG="`set_btrfs_mkfs_prog_path_with_opts`" - export BTRFS_UTIL_PROG="`set_prog_path btrfs`" - export XFS_FSR_PROG="`set_prog_path xfs_fsr`" - export MKFS_NFS_PROG="false" - ;; -esac - -known_hosts() -{ - [ "$HOST_CONFIG_DIR" ] || HOST_CONFIG_DIR=`pwd`/configs - - [ -f /etc/xfsqa.config ] && . /etc/xfsqa.config - [ -f $HOST_CONFIG_DIR/$HOST ] && . $HOST_CONFIG_DIR/$HOST - [ -f $HOST_CONFIG_DIR/$HOST.config ] && . $HOST_CONFIG_DIR/$HOST.config - - # Mandatory Config values. - MC="" - [ -z "$EMAIL" ] && MC="$MC EMAIL" - [ -z "$TEST_DIR" ] && MC="$MC TEST_DIR" - [ -z "$TEST_DEV" ] && MC="$MC TEST_DEV" - - if [ -n "$MC" ]; then - echo "Warning: need to define parameters for host $HOST" - echo " or set variables:" - echo " $MC" - exit 1 - fi -} - -if [ -f "$HOST_OPTIONS" ]; then - . "$HOST_OPTIONS" -else - known_hosts -fi - -echo $TEST_DEV | grep -q ":" > /dev/null 2>&1 -if [ ! -b "$TEST_DEV" -a "$?" != "0" ]; then - echo "common.config: Error: \$TEST_DEV ($TEST_DEV) is not a block device or a NFS filesystem" - exit 1 -fi - -if [ ! -d "$TEST_DIR" ]; then - echo "common.config: Error: \$TEST_DIR ($TEST_DIR) is not a directory" - exit 1 -fi - -# a btrfs tester will set only SCRATCH_DEV_POOL, we will put first of its dev -# to SCRATCH_DEV and rest to SCRATCH_DEV_POOL to maintain the backward compatibility -if [ ! -z "$SCRATCH_DEV_POOL" ]; then - if [ ! -z "$SCRATCH_DEV" ]; then - echo "common.config: Error: \$SCRATCH_DEV should be unset when \$SCRATCH_DEV_POOL is set" - exit 1 - fi - SCRATCH_DEV=`echo $SCRATCH_DEV_POOL | awk '{print $1}'` - SCRATCH_DEV_POOL=`echo $SCRATCH_DEV_POOL | awk '{ ORS=" "; for (i = 2; i <= NF; i++) print $i}'` -fi - -echo $SCRATCH_DEV | grep -q ":" > /dev/null 2>&1 -if [ ! -z "$SCRATCH_DEV" -a ! -b "$SCRATCH_DEV" -a "$?" != "0" ]; then - echo "common.config: Error: \$SCRATCH_DEV ($SCRATCH_DEV) is not a block device or a NFS filesystem" - exit 1 -fi - -if [ ! -z "$SCRATCH_MNT" -a ! -d "$SCRATCH_MNT" ]; then - echo "common.config: Error: \$SCRATCH_MNT ($SCRATCH_MNT) is not a directory" - exit 1 -fi - -# make sure this script returns success -/bin/true diff --git a/common.defrag b/common.defrag deleted file mode 100644 index 13405a48..00000000 --- a/common.defrag +++ /dev/null @@ -1,71 +0,0 @@ -##/bin/bash -# -# Copyright (c) 2009 Eric Sandeen -# All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# -# Functions useful for defragmentation tests -# - -_require_defrag() -{ - case "$FSTYP" in - xfs) - DEFRAG_PROG="$XFS_FSR_PROG" - ;; - ext4|ext4dev) - DEFRAG_PROG="$E4DEFRAG_PROG" - ;; - btrfs) - DEFRAG_PROG="$BTRFS_UTIL_PROG filesystem defragment" - ;; - *) - _notrun "defragmentation not supported for fstype \"$FSTYP\"" - ;; - esac - - _require_command $DEFRAG_PROG - _require_command $FILEFRAG_PROG -} - -_extent_count() -{ - $FILEFRAG_PROG $1 | awk '{print $2}' - $FILEFRAG_PROG -v $1 >> $RESULT_DIR/$seq.full 2>&1 -} - -# Defrag file, check it, and remove it. -_defrag() -{ - echo -n "Before: " - _extent_count $1 - CSUM_BEFORE=`md5sum $1` - STAT_BEFORE=`stat -c "a: %x m: %y c: %z" $1` - $DEFRAG_PROG -v $1 >> $RESULT_DIR/$seq.full 2>&1 - _scratch_remount - STAT_AFTER=`stat -c "a: %x m: %y c: %z" $1` - CSUM_AFTER=`md5sum $1` - echo -n "After: " - _extent_count $1 - if [ "$CSUM_BEFORE" != "$CSUM_AFTER" ]; then - _fail "file checksum changed post-defrag ($CSUM_BEFORE/$CSUM_AFTER)" - fi - if [ "$STAT_BEFORE" != "$STAT_AFTER" ]; then - _fail "file timestamps changed post-defrag:\n$STAT_BEFORE\n$STAT_AFTER" - fi - rm -f $1 -} - diff --git a/common.dmapi b/common.dmapi deleted file mode 100644 index c8a463a2..00000000 --- a/common.dmapi +++ /dev/null @@ -1,46 +0,0 @@ -##/bin/bash -# -# Copyright (c) 2006 Silicon Graphics, Inc. All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# -# dmapi functions -# - -# Commands relitive to dmapi qa - -DMAPI_QASUITE1_DIR=$here/dmapi/src/suite1/ -DMAPI_QASUITE2_DIR=$here/dmapi/src/suite2/ -DMAPI_COMMON_DIR=$here/dmapi/src/common/ - -DMAPI_LS_TO_COPY_PATH=$DMAPI_QASUITE2_DIR/bindir/ls_to_copy - -_dmapi_scratch_mount () { - if [ `echo "$MOUNT_OPTIONS" | grep -c dmapi` -gt 0 -o \ - `echo "$MOUNT_OPTIONS" | grep -c dmi` -gt 0 ] ; then - #already got dmapi options set - _scratch_mount - dmapi_mount_result=$? - else - _scratch_mount "-o dmapi,mtpt=$SCRATCH_MNT" - dmapi_mount_result=$? - fi - - if [ $dmapi_mount_result -ne 0 ] ; then - _notrun "Assuming DMAPI modules are not loaded" - fi -} - - diff --git a/common.dump b/common.dump deleted file mode 100644 index 0395ee32..00000000 --- a/common.dump +++ /dev/null @@ -1,1537 +0,0 @@ -##/bin/bash -# -# Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# -# Functions useful for xfsdump/xfsrestore tests -# - -# --- initializations --- -rm -f $RESULT_DIR/$seq.full - -if [ -n "$DEBUGDUMP" ]; then - _dump_debug=-v4 - _restore_debug=-v4 - _invutil_debug=-d - - # Use dump/restore in qa directory (copy them here) for debugging - export PATH="$here:$PATH" - export __XFSDUMP_PROG="`set_prog_path xfsdump`" - export XFSDUMP_PROG="$__XFSDUMP_PROG -e" - export XFSRESTORE_PROG="`set_prog_path xfsrestore`" - export XFSINVUTIL_PROG="`set_prog_path xfsinvutil`" - [ -x $here/xfsdump ] && echo "Using xfstests' xfsdump for debug" - [ -x $here/xfsrestore ] && echo "Using xfstests' xfsrestore for debug" - [ -x $here/xfsinvutil ] && echo "Using xfstests' xfsinvutil for debug" -fi - -[ "$XFSDUMP_PROG" = "" ] && _notrun "xfsdump not found" -[ "$XFSRESTORE_PROG" = "" ] && _notrun "xfsrestore not found" -[ "$XFSINVUTIL_PROG" = "" ] && _notrun "xfsinvutil not found" - -# status returned for not run tests -NOTRUNSTS=2 - -# name those directories -dump_file=$tmp.dumpfile -# dump_file=$here/dumpfile #TEMP OVERRIDE DUMP FILE -dump_sdir=dumpdir -dump_dir=$SCRATCH_MNT/$dump_sdir -restore_sdir=restoredir -restore_dir=$SCRATCH_MNT/$restore_sdir -multi=3 -dumptape=$TAPE_DEV -media_label="stress_tape_media" -session_label="stress_$seq" - -nobody=4 # define this uid/gid as a number -do_quota_check=true # do quota check if quotas enabled - -_need_to_be_root - -# install our cleaner -trap "_cleanup; exit \$status" 0 1 2 3 15 - -# start inventory from a known base - move it aside for test -for dir in /var/xfsdump/inventory /var/lib/xfsdump/inventory; do - if [ -d $dir ]; then - [ -d $dir.$seq ] && rm -rf $dir.$seq - mv $dir $dir.$seq - fi -done - -have_mtvariable=false -[ `uname` = "Linux" ] && have_mtvariable=true - - -_require_multi_stream() -{ - $XFSDUMP_PROG -JF -f /dev/null -f /dev/null 2> /dev/null | - grep -q "too many -f arguments" && - _notrun "xfsdump multi-stream support required" -} - -_require_legacy_v2_format() -{ - $XFSDUMP_PROG 2>&1 | - grep -q "generate format 2 dump" || - _notrun "xfsdump -K option required" - - $XFSRESTORE_PROG 2>&1 | - grep -q "force use of format 2 generation" || - _notrun "xfsrestore -K option required" -} - -# -# do a remote/local mt -# -_mt() -{ - op=$1 - if _isrmt; then - # REMOTE - _rmtdev=`echo $dumptape | $AWK_PROG -F: '{print $2}'` - - if echo $dumptape | grep '@' >/dev/null; then - _spec=`echo $dumptape | $AWK_PROG -F: '{print $1}'` - _rmtuser=`echo $_spec | $AWK_PROG -F@ '{print $1}'` - _rmthost=`echo $_spec | $AWK_PROG -F@ '{print $2}'` - rsh -n -l $_rmtuser $_rmthost "mt -t $_rmtdev $op" - else - _rmthost=`echo $dumptape | $AWK_PROG -F: '{print $1}'` - rsh -n $_rmthost "mt -t $_rmtdev $op" - fi - else - #LOCAL - mt -t $dumptape $op - fi -} - -_check_onl() -{ - _limit=10 - i=0 - while [ $i -lt $_limit ]; do - echo "Checking online..." >>$RESULT_DIR/$seq.full - if _mt status >$tmp.status 2>&1; then - break; - else - sleep 1 - fi - let i=$i+1 - done - - - if [ $i -eq $_limit ]; then - echo "ERROR: mt -f $dumptape failed" - cat $tmp.status - - echo "mt -f $dumptape failed" >$seq.notrun - status=$NOTRUNSTS - exit - fi - - - if egrep -i 'onl|ready' $tmp.status | grep -iv 'not ready' >/dev/null; then - : - else - echo "ERROR: $dumptape is not online" - cat $tmp.status - - echo "dumptape, $dumptape, is not online" >$seq.notrun - status=$NOTRUNSTS - exit - fi -} - -_wait_tape() -{ - echo "Wait for tape, $dumptape, ..." >>$RESULT_DIR/$seq.full - - i=0 - while [ $i -lt 20 ]; do - echo "Checking status..." >>$RESULT_DIR/$seq.full - if _mt status 2>&1 | tee -a $RESULT_DIR/$seq.full | egrep -i "onl|ready" >/dev/null; then - break; - else - sleep 1 - fi - let i=$i+1 - done -} - -# -# Keep trying so we know we really have rewound -# -_rewind() -{ - echo "Initiate rewind..." >>$RESULT_DIR/$seq.full - _wait_tape - _mt rewind >/dev/null - _wait_tape -} - -# -# Do a custom erase because: -# (i) some machines don't support it -# (ii) some machines take forever to do it -# -_erase_soft() -{ - echo "Erasing tape" | tee -a $RESULT_DIR/$seq.full - _rewind - _mt weof 3 - _rewind -} - -_erase_hard() -{ - echo "Erasing tape" | tee -a $RESULT_DIR/$seq.full - _mt erase -} - -_isrmt() -{ - echo $dumptape | grep ':' >/dev/null -} - -# -# Get tape ready -# -_set_variable() -{ - $have_mtvariable || return - - if _isrmt; then - : - else - # LOCAL - echo "Put scsi tape driver into variable block size mode" - mt -f $dumptape setblk 0 - fi -} - -_require_tape() -{ - dumptape=$1 - - if [ -z "$dumptape" -o "@" == "$dumptape" ]; then - echo "This test requires a dump tape - none was specified" - echo "No dump tape specified" >$RESULT_DIR/$seq.notrun - status=$NOTRUNSTS - exit - fi - - _check_onl - _set_variable -} - -_wipe_fs() -{ - _require_scratch - - _scratch_mkfs_xfs >>$RESULT_DIR/$seq.full || _fail "mkfs failed" - _scratch_mount >>$RESULT_DIR/$seq.full || _fail "mount failed" -} - -# -# Cleanup created dirs and files -# Called by trap -# -_cleanup() -{ - # Some tests include this before checking _supported_fs xfs - # and the sleeps & checks here get annoying - if [ "$FSTYP" != "xfs" ]; then - return - fi - - cd $here - rm -f $tmp.* - - if [ -n "$DEBUGDUMP" ]; then - # save it for inspection - for dir in /var/xfsdump/inventory /var/lib/xfsdump/inventory; do - [ -d $dir ] || continue - tar -cvf $RESULT_DIR/$seq.inventory.tar $dir - ls -nR $dir >$RESULT_DIR/$seq.inventory.ls - done - fi - - # put inventory dir back - for dir in /var/xfsdump/inventory /var/lib/xfsdump/inventory; do - [ -d $dir.$seq ] || continue - rm -rf $dir # get rid of new one - mv $dir.$seq $dir - done - - if [ $status -ne $NOTRUNSTS ]; then - # Sleep added to stop _check_scratch_fs from complaining that the - # scratch_dev is still busy - sleep 10 - - _check_scratch_fs - fi -} - -# -# ensure that bulkstat data will -# match with incore data -# by forcing disk data to be written out -# -_stable_fs() -{ - _saveddir=`pwd`; cd / - umount $SCRATCH_MNT >>$RESULT_DIR/$seq.full || _fail "unmount failed" - _scratch_mount >>$RESULT_DIR/$seq.full || _fail "mount failed" - cd $_saveddir -} - -# -# Run fsstress to create a mixture of -# files,dirs,links,symlinks -# -# Pinched from test 013. -# -_create_dumpdir_stress() -{ - echo "Creating directory system to dump using fsstress." - - _wipe_fs - - _param="-f link=10 -f creat=10 -f mkdir=10 -f truncate=5 -f symlink=10" - _count=240 - rm -rf $dump_dir - if ! mkdir $dump_dir; then - echo " failed to mkdir $dump_dir" - status=1 - exit - fi - echo "" - echo "-----------------------------------------------" - echo "fsstress : $_param" - echo "-----------------------------------------------" - if ! $here/ltp/fsstress $_param -s 1 $FSSTRESS_AVOID -n $_count -d $dump_dir >$tmp.out 2>&1 - then - echo " fsstress (count=$_count) returned $? - see $RESULT_DIR/$seq.full" - - echo "--------------------------------------" >>$RESULT_DIR/$seq.full - echo "output from fsstress:" >>$RESULT_DIR/$seq.full - echo "--------------------------------------" >>$RESULT_DIR/$seq.full - cat $tmp.out >>$RESULT_DIR/$seq.full - status=1 - fi - - _stable_fs -} - -_mk_fillconfig1() -{ - cat <$tmp.config -# pathname size in bytes owner group -# -small 10 $nobody $nobody -big 102400 daemon sys -sub/small 10 bin bin -sub/big 102400 $nobody sys -# -sub/a 1 $nobody $nobody -sub/b 2 $nobody $nobody -sub/c 4 $nobody $nobody -sub/d 8 $nobody $nobody -sub/e 16 $nobody $nobody -sub/f 32 $nobody $nobody -sub/g 64 $nobody $nobody -sub/h 128 $nobody $nobody -sub/i 256 $nobody $nobody -sub/j 512 $nobody $nobody -sub/k 1024 $nobody $nobody -sub/l 2048 $nobody $nobody -sub/m 4096 $nobody $nobody -sub/n 8192 $nobody $nobody -# -sub/a00 100 $nobody $nobody -sub/b00 200 $nobody $nobody -sub/c00 400 $nobody $nobody -sub/d00 800 $nobody $nobody -sub/e00 1600 $nobody $nobody -sub/f00 3200 $nobody $nobody -sub/g00 6400 $nobody $nobody -sub/h00 12800 $nobody $nobody -sub/i00 25600 $nobody $nobody -sub/j00 51200 $nobody $nobody -sub/k00 102400 $nobody $nobody -sub/l00 204800 $nobody $nobody -sub/m00 409600 $nobody $nobody -sub/n00 819200 $nobody $nobody -# -sub/a000 1000 $nobody $nobody -sub/e000 16000 $nobody $nobody -sub/h000 128000 $nobody $nobody -sub/k000 1024000 $nobody $nobody -End-of-File -} - -_mk_fillconfig2() -{ - cat <$tmp.config -# pathname size in bytes -# -smalll 10 $nobody $nobody -biggg 102400 $nobody $nobody -sub/smalll 10 $nobody $nobody -sub/biggg 102400 $nobody $nobody -End-of-File -} - -_mk_fillconfig_perm() -{ - # dir_guid: ugo=rwx,g+s on dir is for IRIX chmod(1) - - cat <$tmp.config -# pathname size/dir user group mode -# -file_suid 10 $nobody $nobody 04777 -file_guid 10 $nobody $nobody 02777 -file_sticky 10 $nobody $nobody 01777 -file_mix1 10 $nobody $nobody 761 -file_mix2 10 $nobody $nobody 642 -dir_suid d $nobody $nobody 04777 -dir_guid d $nobody $nobody ugo=rwx,g+s -dir_sticky d $nobody $nobody 01777 -dir_mix1 d $nobody $nobody 761 -dir_mix2 d $nobody $nobody 642 -End-of-File -} - -_mk_fillconfig_ea() -{ - cat <$tmp.config -# pathname size user group perm name value namespace -# -smalll 10 $nobody $nobody 777 attr1 some_text user -biggg 102400 $nobody $nobody 777 attr2 some_text2 root -sub/smalll 10 $nobody $nobody 777 attr3 some_text3 user -sub/biggg 102400 $nobody $nobody 777 attr4 some_text4 root -dir d $nobody $nobody 777 attr5 dir_text user -# -# Add more files so that there are more than the number -# of streams. -# There are bugs in dump/restore for # non-dir files < # streams -# It can be tested in another configuration. -# It is a pathalogical case. -# -sub/a 1 $nobody $nobody -sub/b 2 $nobody $nobody -sub/c 4 $nobody $nobody -sub/d 8 $nobody $nobody -sub/e 16 $nobody $nobody -sub/f 32 $nobody $nobody -sub/g 64 $nobody $nobody -sub/h 128 $nobody $nobody -sub/i 256 $nobody $nobody -sub/j 512 $nobody $nobody -sub/k 1024 $nobody $nobody -sub/l 2048 $nobody $nobody -sub/m 4096 $nobody $nobody -sub/n 8192 $nobody $nobody -End-of-File -} - -# -# extended file attribute flags -# -_mk_fillconfig_xattr() -{ - cat <$tmp.config -# pathname size user group perm name -# -xflag_realtime 10 $nobody $nobody 777 XFS_XFLAG_REALTIME -xflag_prealloc 10 $nobody $nobody 777 XFS_XFLAG_PREALLOC -xflag_immutable 10 $nobody $nobody 777 XFS_XFLAG_IMMUTABLE -xflag_append 10 $nobody $nobody 777 XFS_XFLAG_APPEND -xflag_sync 10 $nobody $nobody 777 XFS_XFLAG_SYNC -xflag_noatime 10 $nobody $nobody 777 XFS_XFLAG_NOATIME -xflag_nodump 10 $nobody $nobody 777 XFS_XFLAG_NODUMP -xflag_hasattr 10 $nobody $nobody 777 XFS_XFLAG_HASATTR -End-of-File -} - -# -# Create a bunch of directories/files of different sizes -# filled with data. -# -# Pinched from test 001. -# -_do_create_dumpdir_fill() -{ - echo "Creating directory system to dump using src/fill." - - mkdir -p $dump_dir || _fail "cannot mkdir \"$dump_dir\"" - cd $dump_dir - - $verbose && echo -n "Setup " - sed -e '/^#/d' $tmp.config \ - | while read file nbytes owner group perms ea_name ea_value namespace - do - if [ $nbytes = "d" ]; then - # create a directory - dir=$file - if [ ! -d $dir ] - then - if mkdir $dir - then - : - else - $verbose && echo - echo "Error: cannot mkdir \"$dir\"" - exit 1 - fi - fi - else - # create a directory/file - dir=`dirname $file` - if [ "$dir" != "." ] - then - if [ ! -d $dir ] - then - if mkdir $dir - then - : - else - $verbose && echo - echo "Error: cannot mkdir \"$dir\"" - exit 1 - fi - fi - fi - rm -f $file - if $here/src/fill $file $file $nbytes - then - : - else - $verbose && echo - echo "Error: cannot create \"$file\"" - exit 1 - fi - fi - if [ -n "$owner" -a -n "$group" ]; then - chown $owner.$group $file - fi - if [ -n "$perms" ]; then - chmod $perms $file - fi - - # extended attributes (EA) - if [ -n "$ea_name" -a -n "$ea_value" ]; then - if [ "X$namespace" = "Xroot" ]; then - attr -R -s $ea_name -V $ea_value $file - else - attr -s $ea_name -V $ea_value $file - fi - # extended file attribute flags - no value - NOT EAs - elif [ -n "$ea_name" -a -z "$ea_value" ]; then - # set the flag - # TODO XXX - # use xfs_io to send the ioctl - : - fi - $verbose && echo -n "." - done - $verbose && echo - - cd $here -} - -_mk_fillconfig_multi() -{ - _mk_fillconfig1 - cat <>$tmp.config -# pathname size in bytes -# -large000 8874368 $nobody $nobody -large111 2582912 $nobody $nobody -large222 7825792 $nobody $nobody -End-of-File -} - -_create_dumpdir_largefile() -{ - _wipe_fs - mkdir -p $dump_dir || _fail "cannot mkdir \"$dump_dir\"" - _largesize=4294967297 - _largefile=$dump_dir/largefile - echo "dd a largefile at offset $_largesize" - POSIXLY_CORRECT=yes \ - dd if=/dev/zero of=$_largefile bs=1 seek=$_largesize count=10 2>&1 - _stable_fs -} - -_create_dumpdir_fill() -{ - _wipe_fs - _mk_fillconfig1 - _do_create_dumpdir_fill - _stable_fs -} - -_create_dumpdir_fill2() -{ - _wipe_fs - _mk_fillconfig2 - _do_create_dumpdir_fill - _stable_fs -} - -_create_dumpdir_fill_perm() -{ - _wipe_fs - _mk_fillconfig_perm - _do_create_dumpdir_fill - _stable_fs -} - -_create_dumpdir_fill_ea() -{ - _wipe_fs - _mk_fillconfig_ea - _do_create_dumpdir_fill - _stable_fs -} - -# -# Create enough files, and a few large enough files, so that -# some files are likely to be split across streams. -# -_create_dumpdir_fill_multi() -{ - _wipe_fs - _mk_fillconfig_multi - _do_create_dumpdir_fill - _stable_fs -} - -# -# Append a subset of the fill'ed files -# So we can see if just these get dumped on an incremental -# -_append_dumpdir_fill() -{ - cd $dump_dir - cat <$tmp.config -# pathname -# -small -sub/big -# -sub/a -sub/c -sub/e -End-of-File - sed -e '/^#/d' $tmp.config \ - | while read file - do - echo 'Extra text' >>$file - done - - cd $here - _stable_fs -} - -_do_create_dump_symlinks() -{ - echo "Creating directory system of symlinks to dump." - - mkdir -p $dump_dir || _fail "cannot mkdir \"$dump_dir\"" - cd $dump_dir - - $verbose && echo -n "Setup " - sed -e '/^#/d' $tmp.config \ - | while read file nbytes owner group owner2 group2 perms perms2 - do - dir=`dirname $file` - if [ "$dir" != "." ] - then - if [ ! -d $dir ] - then - if mkdir $dir - then - : - else - $verbose && echo - echo "Error: cannot mkdir \"$dir\"" - exit 1 - fi - fi - fi - rm -f $file - touch $file - - # Do chmod on symlink using umask. - # This won't do the right thing as it subtracts permissions. - # However, I don't care, as long as I get some different perms - # for testing. - if [ -n "$perms2" ]; then - omask=`umask` - umask $perms2 - fi - ln -s $file $file-link - if [ -n "$perms2" ]; then - umask $omask - fi - - if [ -n "$owner" -a -n "$group" ]; then - chown $owner.$group $file - fi - if [ -n "$owner" -a -n "$group" ]; then - chown -h $owner.$group $file-link - fi - if [ -n "$perms" ]; then - chmod $perms $file - fi - $verbose && echo -n "." - done - $verbose && echo - - cd $here -} - -_mk_symlink_config() -{ - cat <$tmp.config -# path size owner1 group1 owner2 group2 perm1 perm2 -# -a 0 $nobody $nobody daemon sys 124 421 -b 0 daemon sys bin bin 347 743 -sub/a 0 bin bin $nobody sys 777 777 -sub/b 0 $nobody sys $nobody $nobody 367 763 -End-of-File -} - -_create_dumpdir_symlinks() -{ - _wipe_fs - _mk_symlink_config - _do_create_dump_symlinks - _stable_fs -} - -# -# create hardlinks of form $_fname, $_fname_h1 $_fname_h2 ... -# -_create_hardlinks() -{ - _fname=$1 - _numlinks=$2 - - touch $_fname - _j=1 - while [ $_j -le $_numlinks ]; do - _suffix=_h$_j - _hardlink=$_fname$_suffix - echo "creating hardlink $_hardlink to $_fname" - ln $_fname $_hardlink - let _j=$_j+1 - done -} - -# -# create a set of hardlinks -# create hardlinks of form file1, file1_h1 file1_h2 ... -# create hardlinks of form file2, file2_h1 file2_h2 ... -# create hardlinks of form file3, file3_h1 file3_h2 ... -# -_create_hardset() -{ - _numsets=$1 - _i=1 - while [ $_i -le $_numsets ]; do - _create_hardlinks file$_i 5 - let _i=$_i+1 - done -} - - -_modify_level() -{ - _level=$1 - echo "mod level $_level" >$dump_dir/file$_level -} - -_create_dumpdir_hardlinks() -{ - _numsets=$1 - _wipe_fs - echo "Creating directory system of hardlinks to incrementally dump." - - mkdir -p $dump_dir || _fail "cannot mkdir \"$dump_dir\"" - cd $dump_dir - - _create_hardset $_numsets - - cd $here - _stable_fs -} - -# -# Filter for ls -# Filter out times and dates on symlinks and char devices. -# Filter out size on directories because this can differ -# when transitioning to long inode numbers (ie. 64 bits). -# -_ls_filter() -{ - $AWK_PROG ' - /^l/ { date = $8; time = $7; sub(date,"DATE"); sub(time,"TIME"); print} - /^c/ { date = $9; time = $7; sub(date,"DATE"); sub(time,"TIME"); print} - /^d/ { size = $5; sub(size,"SIZE"); print} - {print}' \ - | sed -e 's/total [0-9][0-9]*/total TOTAL/' -} - -# -# Filtering of Irix character hwgraph device names -# e.g. -# chardev: /hw/node/xtalk/15/pci/0/scsi_ctlr/0/target/1/lun/0/disk/partition/4/char -# blkdev: /dev/dsk/dks0d1s4 -# -_filter_devchar() -{ - $AWK_PROG ' - /\/hw\/node/ { - sub(/\/hw.*scsi_ctlr\//,"/dev/dsk/dks") # blah blah /dev/dsk/dks0/target/1/.... - sub(/\/target\//,"d") # blah blah /dev/dsk/dks0d1/lun/0/disk..... - sub(/\/lun.*partition\//,"s") # blah blah /dev/dsk/dks0d1s4/char - sub(/\/char/,"") # blah blah /dev/dsk/dks0d1s4 - } - { print } - ' -} - - -# -# Filter out the non-deterministic dump msgs from -# xfsdump and xfsrestore -# -_dump_filter_main() -{ - _filter_devchar |\ - sed \ - -e "s#$__XFSDUMP_PROG#xfsdump#" \ - -e "s#$XFSRESTORE_PROG#xfsrestore#" \ - -e "s#$XFSINVUTIL_PROG#xfsinvutil#" \ - -e "s/`hostname`/HOSTNAME/" \ - -e "s#$SCRATCH_DEV#SCRATCH_DEV#" \ - -e "s#$SCRATCH_RAWDEV#SCRATCH_DEV#" \ - -e "s#$dumptape#TAPE_DEV#" \ - -e "s#$SCRATCH_MNT#SCRATCH_MNT#" \ - -e "s#$dump_file#DUMP_FILE#" \ - -e 's#/var/lib/xfsdump#/var/xfsdump#' \ - -e 's/session id:[ ]*[0-9a-f-]*/session id: ID/' \ - -e '/filesystem id:[ ]*[0-9a-f-]*/d' \ - -e 's/time:[ ].*/time: TIME/' \ - -e 's/date:[ ].*/date: DATE/' \ - -e 's/dump begun .*/dump begun DATE/' \ - -e 's/previously begun .*/previously begun DATE/' \ - -e 's/[0-9][0-9]* seconds/SECS seconds/' \ - -e 's/restore.[0-9][0-9]*/restore.PID/' \ - -e 's/ino [0-9][0-9]*/ino INO/g' \ - -e '/stream [0-9]:/s/offset [0-9][0-9]*/offset NUM/g' \ - -e '/: dump size/s/[0-9][0-9]*/NUM/' \ - -e '/dump size:/s/[0-9][0-9]*/NUM/' \ - -e '/dump size per stream:/s/[0-9][0-9]*/NUM/' \ - -e 's/\(media file size[ ]*\)[0-9][0-9]*/\1NUM/' \ - -e 's/\(mfile size:[ ]*\)[0-9][0-9]*/\1NUM/' \ - -e '/drive[ ]*[0-9][0-9]*:/d' \ - -e '/\/dev\/tty/d' \ - -e '/inventory session uuid/d' \ - -e '/ - Running single-threaded/d' \ - -e '/Mount point match/d' \ - -e '/^.*I\/O metrics: .*$/d' \ - -e 's/1048576/BLOCKSZ/' \ - -e 's/2097152/BLOCKSZ/' \ - -e 's/(pid[ ]*[1-9][0-9]*)/\(pid PID\)/' \ - -e '/version [3-9]\.[0-9]/d' \ - -e 's/\/hw\/module.*$/SCRATCH_DEV/' \ - -e 's/xfsdump: ino map phase 1: .*/xfsdump: ino map /' \ - -e '/xfsdump: ino map phase [2]/,1d' \ - -e '/xfsdump: ino map phase [3]/,1d' \ - -e '/xfsdump: ino map phase [4]/,1d' \ - -e '/xfsdump: ino map phase [5]/,1d' \ - -e 's/id:[[:space:]]*[0-9a-f]\{8\}-[0-9a-f]\{4\}-[0-9a-f]\{4\}-[0-9a-f]\{4\}-[0-9a-f]\{12\}/ID: ID/' \ - -e 's/\[y\/n\][- ]----------------------*/\[y\/n\]/' \ - -e '/skip attribute set/d' \ - | perl -ne ' - # filter out all the output between the lines "Dump Summary:" - # and "Dump Status:" - if ($_ =~ /(?:Dump|Restore) Summary/) { - $skip = 1; - } elsif ($_ =~ /(?:Dump|Restore) Status/) { - $skip = 0; - } - print if (! $skip);' \ - | perl -ne ' - # correct the file count if large scratch devices are being used - $skip = 0; - if ($_ =~ /(\S+) directories and (\S+) entries/) { - $foo = $2; - if ($ENV{'LARGE_SCRATCH_DEV'} && $foo > 0) { - $foo -= 1; - } - printf("xfsrestore: %u directories and %u entries processed\n", - $1, $foo); - $skip = 1; - } - print if (! $skip);' -} - -_dump_filter() -{ - if $do_quota_check - then - _dump_filter_main | _check_quota_dumprestore | _check_quota_entries - else - _dump_filter_main - fi -} - -_invutil_filter() -{ - _dump_filter_main \ - | sed \ - -e 's/UUID[ ]*:[ ][0-9a-f-]*/UUID : ID/' \ - -e 's/TIME OF DUMP[ ]*:.*/TIME OF DUMP : TIME/' \ - -e 's/HOSTNAME:SCRATCH_MNT.*/HOSTNAME:SCRATCH_MNT/' \ - -e 's#inventory/[0-9a-f-]*#inventory/UUID#' \ - -} - - -_dir_filter() -{ - sed \ - -e "s#$dump_file#DUMP_FILE#g" \ - -e "s#$SCRATCH_DEV#SCRATCH_DEV#" \ - -e "s#$SCRATCH_RAWDEV#SCRATCH_DEV#" \ - -e "s#$dumptape#TAPE_DEV#" \ - -e "s#$dump_dir#DUMP_DIR#g" \ - -e "s#$restore_dir#RESTORE_DIR#g" \ - -e "s#$SCRATCH_MNT#SCRATCH_MNT#g" \ - -e "s#$dump_sdir#DUMP_SUBDIR#g" \ - -e "s#$restore_sdir#RESTORE_SUBDIR#g" \ - -e "s#$$#PID#g" \ - -e "/Only in SCRATCH_MNT: .use_space/d" \ - -} - -# -# Parse xfsdump arguments. -# Note: requires a space between option letter and argument -# -_parse_dump_args() -{ - OPTIND=0 - dump_args="" - while [ $# -gt 0 ] - do - case $1 - in - -f) - [ -z "$2" ] && _fail "missing argument for -f" - dumptape=$2 - dump_file=$2 - shift - ;; - -L) - [ -z "$2" ] && _fail "missing argument for -L" - session_label=$2 - shift - ;; - --multi) - [ -z "$2" ] && _fail "missing argument for --multi" - multi=$2 - shift - ;; - --check-quota) - do_quota_check=true - ;; - --no-check-quota) - do_quota_check=false - ;; - -o|-D|-F|-K) - dump_args="$dump_args $1" - ;; - -l|-d) - [ -z "$2" ] && _fail "missing argument for $1" - dump_args="$dump_args $1$2" - shift - ;; - *) - _fail "invalid argument to common.dump function: $1" - ;; - esac - shift - done -} - -# -# Parse xfsrestore arguments. -# Note: requires a space between option letter and argument -# -_parse_restore_args() -{ - OPTIND=0 - restore_args="" - while [ $# -gt 0 ] - do - case $1 - in - -f) - [ -z "$2" ] && _fail "missing argument for -f" - dumptape=$2 - dump_file=$2 - shift - ;; - -L) - [ -z "$2" ] && _fail "missing argument for -L" - session_label=$2 - shift - ;; - --multi) - [ -z "$2" ] && _fail "missing argument for --multi" - multi=$2 - shift - ;; - --check-quota) - do_quota_check=true - ;; - --no-check-quota) - do_quota_check=false - ;; - -K|-R) - restore_args="$restore_args $1" - ;; - *) - _fail "invalid argument to common.dump function: $1" - ;; - esac - shift - done -} - - -# -# Dump a subdir -# -_do_dump_sub() -{ - _parse_dump_args $* - - echo "Dumping to tape..." - opts="$_dump_debug$dump_args -s $dump_sdir -f $dumptape -M $media_label -L $session_label $SCRATCH_MNT" - echo "xfsdump $opts" | _dir_filter - $XFSDUMP_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter -} - -# -# Do dump to tape -# -_do_dump() -{ - _parse_dump_args $* - - echo "Dumping to tape..." - opts="$_dump_debug$dump_args -f $dumptape -M $media_label -L $session_label $SCRATCH_MNT" - echo "xfsdump $opts" | _dir_filter - $XFSDUMP_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter -} - - -# -# Do full dump with -m -# -_do_dump_min() -{ - _parse_dump_args $* - - echo "Dumping to tape..." - onemeg=1048576 - opts="$_dump_debug$dump_args -m -b $onemeg -l0 -f $dumptape -M $media_label -L $session_label $SCRATCH_MNT" - echo "xfsdump $opts" | _dir_filter - $XFSDUMP_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter -} - - -# -# Do full dump to file -# -_do_dump_file() -{ - _parse_dump_args $* - - echo "Dumping to file..." - opts="$_dump_debug$dump_args -f $dump_file -M $media_label -L $session_label $SCRATCH_MNT" - echo "xfsdump $opts" | _dir_filter - $XFSDUMP_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter -} - -# -# Do full dump to multiple files -# -_do_dump_multi_file() -{ - _parse_dump_args $* - - multi_args="" - - i=0 - while [ $i -lt $multi ] - do - multi_args="$multi_args -f $dump_file.$i -M $media_label.$i" - let i=$i+1 - done - - echo "Dumping to files..." - opts="$_dump_debug$dump_args $multi_args -L $session_label $SCRATCH_MNT" - echo "xfsdump $opts" | _dir_filter - $XFSDUMP_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter -} - - -_prepare_restore_dir() -{ - rm -rf $restore_dir - mkdir $restore_dir || _fail "failed to mkdir $restore_dir" -} - - -# -# Get tape ready and restore dir -# -_prepare_restore() -{ - _prepare_restore_dir - - echo "Rewinding tape" - _rewind -} - -# -# Restore the tape into $restore_dir -# -_do_restore() -{ - _parse_restore_args $* - _prepare_restore - - echo "Restoring from tape..." - opts="$_restore_debug$restore_args -f $dumptape -L $session_label $restore_dir" - echo "xfsrestore $opts" | _dir_filter - $XFSRESTORE_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter -} - -# -# Restore the tape into $restore_dir using -m -# -_do_restore_min() -{ - _parse_restore_args $* - _prepare_restore - - echo "Restoring from tape..." - onemeg=1048576 - opts="$_restore_debug$restore_args -m -b $onemeg -f $dumptape -L $session_label $restore_dir" - echo "xfsrestore $opts" | _dir_filter - $XFSRESTORE_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter -} - -# -# Restore the tape from a dump file -# -_do_restore_file() -{ - _parse_restore_args $* - _prepare_restore_dir - - echo "Restoring from file..." - opts="$_restore_debug$restore_args -f $dump_file -L $session_label $restore_dir" - echo "xfsrestore $opts" | _dir_filter - $XFSRESTORE_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter -} - -# -# Cumulative restore from a file -# Must call _prepare_restore_dir before the first -# (and only the first) call to this function. -# -_do_restore_file_cum() -{ - _parse_restore_args $* - - echo "Restoring cumumlative from file..." - opts="$_restore_debug$restore_args -f $dump_file -r $restore_dir" - echo "xfsrestore $opts" | _dir_filter - $XFSRESTORE_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter -} - -_do_restore_toc() -{ - _parse_restore_args $* - - echo "Contents of dump ..." - opts="$_restore_debug$restore_args -f $dump_file -t" - echo "xfsrestore $opts" | _dir_filter - cd $SCRATCH_MNT # for IRIX which needs xfs cwd - $XFSRESTORE_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter_main |\ - _check_quota_file |\ - _check_quota_entries |\ - $AWK_PROG 'NF != 1 { print; next } - {files = sprintf("%s\n%s", files, $1)} - END { print files | "sort" } ' - # the above awk code is to alpha sort only the output - # of files (and not the verbose restore msgs) - cd $here # put back -} - -# -# Restore the tape from multiple dump files -# -_do_restore_multi_file() -{ - _parse_restore_args $* - _prepare_restore_dir - - multi_args="" - - i=0 - while [ $i -lt $multi ] - do - multi_args="$multi_args -f $dump_file.$i" - let i=$i+1 - done - - echo "Restoring from file..." - opts="$_restore_debug$restore_args $multi_args -L $session_label $restore_dir" - echo "xfsrestore $opts" | _dir_filter - $XFSRESTORE_PROG $opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter -} - -# -# Do xfsdump piped into xfsrestore - xfsdump | xfsrestore -# Pass dump options in $1 and restore options in $2, if required. e.g.: -# _do_dump_restore "-o -F" "-R" -# _do_dump_restore "" "-R" -# -# Use -s as we want to dump and restore to the same xfs partition -# -_do_dump_restore() -{ - _parse_dump_args $1 - _parse_restore_args $2 - _prepare_restore_dir - echo "xfsdump|xfsrestore ..." - restore_opts="$_restore_debug$restore_args - $restore_dir" - dump_opts="$_dump_debug$dump_args -s $dump_sdir - $SCRATCH_MNT" - echo "xfsdump $dump_opts | xfsrestore $restore_opts" | _dir_filter - $XFSDUMP_PROG $dump_opts 2>$tmp.dump.mlog | $XFSRESTORE_PROG $restore_opts 2>&1 | tee -a $RESULT_DIR/$seq.full | _dump_filter - _dump_filter <$tmp.dump.mlog -} - -# -# Compare dumped subdirectory with restored dir -# using ls -nR. -# Thus no contents are compared but permissions, sizes, -# owners, etc... are. -# -_ls_compare_sub() -{ - # - # verify we got back what we dumped - # - echo "Comparing listing of dump directory with restore directory" - ls -nR $dump_dir | tee -a $RESULT_DIR/$seq.full | _ls_filter >$tmp.dump_dir - ls -nR $restore_dir/$dump_sdir | tee -a $RESULT_DIR/$seq.full | _ls_filter \ - | sed -e "s#$restore_sdir\/##" >$tmp.restore_dir - - diff -bcs $tmp.dump_dir $tmp.restore_dir | sed -e "s#$tmp#TMP#g" -} - -# -# filter out the date fields -# -_ls_nodate_filter() -{ - $AWK_PROG 'NF == 9 { print $1, $2, $3, $4, $9 }' -} - -# -# _ls_compare_sub but don't compare dates -_ls_nodate_compare_sub() -{ - # - # verify we got back what we dumped - # - echo "Comparing listing of dump directory with restore directory" - ls -nR $dump_dir | tee -a $RESULT_DIR/$seq.full | _ls_filter | _ls_nodate_filter >$tmp.dump_dir - ls -nR $restore_dir/$dump_sdir | tee -a $RESULT_DIR/$seq.full | _ls_filter \ - | _ls_nodate_filter | sed -e "s#$restore_sdir\/##" >$tmp.restore_dir - - diff -bcs $tmp.dump_dir $tmp.restore_dir | sed -e "s#$tmp#TMP#g" -} - -# -# Compare using recursive diff the files of the dumped -# subdirectory. -# This one will compare the contents. -# -_diff_compare_sub() -{ - echo "Comparing dump directory with restore directory" - diff -rs $dump_dir $restore_dir/$dump_sdir | _dir_filter -} - -_get_eas_on_path() -{ - _path=$1 - -# Tim - this is the IRIX way... - # find $_path -exec attr -l {} \; |\ - # awk '{print $9, $2}' |\ - # sed 's/["]//g' |\ - # sort |\ -# and this is now the Linux way... - echo "User names" - getfattr --absolute-names -Rh -m user $_path |\ - perl -wn -e ' - if (m/^# file: (\S+)/) { $file = $1 } - elsif (m/^user\.(\w+)/) { print $file, " ",$1,"\n" }' |\ - sort |\ - while read file ea_name; do - attr -g $ea_name $file - done - - if [ "$USE_ATTR_SECURE" = yes ]; then - echo "Security names" - getfattr --absolute-names -Rh -m security $_path |\ - perl -wn -e ' - if (m/^# file: (\S+)/) { $file = $1 } - elsif (m/^security\.(\w+)/) { print $file, " ",$1,"\n" }' |\ - sort |\ - while read file ea_name; do - attr -g $ea_name $file - done - fi - - echo "Root names" - getfattr --absolute-names -Rh -m trusted $_path |\ - perl -wn -e ' - if (m/^# file: (\S+)/) { $file = $1 } - elsif (m/^trusted\.(\w+)/) { print $file, " ",$1,"\n" }' |\ - sort |\ - while read file ea_name; do - attr -R -g $ea_name $file - done -} - -# -# Compare the extended attributes of the files/dirs -# b/w the dumped and restore dirs. -# -# -# Attribute "attr5" had a 8 byte value for /spare1/dump.5460/dir: -# Attribute "attr5" had a 8 byte value for /spare1/restore.5460/dump.5460/dir: -# -_diff_compare_eas() -{ - echo "Comparing dump directory with restore directory" - echo "Looking at the extended attributes (EAs)" - echo "EAs on dump" - _get_eas_on_path $dump_dir | tee $RESULT_DIR/$seq.ea1 | _dir_filter - echo "EAs on restore" - _get_eas_on_path $restore_dir/$dump_sdir \ - | sed -e "s#$restore_sdir\/##" \ - | tee $RESULT_DIR/$seq.ea2 \ - | _dir_filter - diff -s $RESULT_DIR/$seq.ea1 $RESULT_DIR/$seq.ea2 -} - - -# -# Compare using recursive diff the files of the dumped -# filesystem -# -_diff_compare() -{ - echo "Comparing dump directory with restore directory" - diff -rs $SCRATCH_MNT $restore_dir | _dir_filter | _check_quota_diff -} - -# -# Check out the dump inventory -# -_dump_inventory() -{ - $XFSDUMP_PROG $_dump_debug -I | tee -a $RESULT_DIR/$seq.full | _dump_filter_main -} - -# -# Do the xfsinvutil cmd with debug and filters -# Need to set variable: "$middate" to the invutil date -# -_do_invutil() -{ - host=`hostname` - echo "xfsinvutil $_invutil_debug -M $host:$SCRATCH_MNT \"$middate\" $*" >$RESULT_DIR/$seq.full - $XFSINVUTIL_PROG $_invutil_debug $* -M $host:$SCRATCH_MNT "$middate" \ - | tee -a $RESULT_DIR/$seq.full | _invutil_filter -} - -# -# ensure we can find the user quota msg if user quotas are on -# ensure we can find the group quota msg if group quotas are on -# -_check_quota() -{ - usermsg=$1 - groupmsg=$2 - projectmsg=$3 - uquota=0 - gquota=0 - pquota=0 - $here/src/feature -U $SCRATCH_DEV && uquota=1 - $here/src/feature -G $SCRATCH_DEV && gquota=1 - $here/src/feature -P $SCRATCH_DEV && pquota=1 - - $AWK_PROG -v uquota=$uquota -v gquota=$gquota -v pquota=$pquota \ - -v full=$RESULT_DIR/$seq.full -v usermsg="$usermsg" \ - -v groupmsg="$groupmsg" -v projectmsg="$projectmsg" ' - $0 ~ projectmsg { - print "Found project quota:", $0 >>full - found_pquota = 1 - if (!pquota) { - print "Found extra:", $0 - } - next - } - $0 ~ groupmsg { - print "Found group quota:", $0 >>full - found_gquota = 1 - if (!gquota) { - print "Found extra:", $0 - } - next - } - $0 ~ usermsg { - print "Found user quota:", $0 >>full - found_uquota = 1 - if (!uquota) { - print "Found extra:", $0 - } - next - } - { print } - END { - if (uquota && !found_uquota) { - print "Missing user quota msg:", usermsg - } - if (gquota && !found_gquota) { - print "Missing group quota msg:", groupmsg - } - if (pquota && !found_pquota) { - print "Missing project quota msg:", projectmsg - } - } - ' -} - -# -# xfsrestore: 3 directories and 40 entries processed -# $5 = 40 -# num entries needs to be reduced by num quota file(s) -# -_check_quota_entries() -{ - uquota=0 - gquota=0 - pquota=0 - $here/src/feature -U $SCRATCH_DEV && uquota=1 - $here/src/feature -G $SCRATCH_DEV && gquota=1 - $here/src/feature -P $SCRATCH_DEV && pquota=1 - $AWK_PROG -v uquota=$uquota -v gquota=$gquota -v pquota=$pquota ' - /entries processed/ { - if (uquota) $5-- - if (gquota) $5-- - if (pquota) $5-- - } - {print}' -} - -# -# Look for: -# xfsdump: saving user quota information for: SCRATCH_MNT -# xfsdump: saving group quota information for: SCRATCH_MNT -# xfsdump: saving project quota information for: SCRATCH_MNT -# xfsrestore: user quota information written to ...' -# xfsrestore: group quota information written to ...' -# xfsrestore: project quota information written to ...' -# -# If on IRIX then look for: -# xfsrestore: use 'edquota' to restore quotas -# Else look for: -# xfsrestore: use 'xfs_quota' to restore quotas -# -_check_quota_dumprestore() -{ - if [ "$HOSTOS" == "IRIX" ]; then - _check_quota 'user quota information' \ - 'group quota information' \ - 'project quota information' | \ - sed "/xfsrestore:.*use 'edquota' to restore quotas/d" - else - _check_quota 'user quota information' \ - 'group quota information' \ - 'project quota information' | \ - sed "/xfsrestore:.*use 'xfs_quota' to restore quotas/d" - fi -} - -# -# Look for: -# Only in RESTORE_DIR: xfsdump_quotas -# Only in RESTORE_DIR: xfsdump_quotas_group -# Only in RESTORE_DIR: xfsdump_quotas_project -# -_check_quota_diff() -{ - _check_quota 'Only in RESTORE_DIR: xfsdump_quotas' \ - 'Only in RESTORE_DIR: xfsdump_quotas_group' \ - 'Only in RESTORE_DIR: xfsdump_quotas_proj' -} - -# -# Look for the quota file in the output -# Ensure that it is there if it should be -# Filter it out so that the output is always the same -# even with no quotas -# -_check_quota_file() -{ - _check_quota 'xfsdump_quotas' 'xfsdump_quotas_group' 'xfsdump_quotas_proj' -} - - -# make sure this script returns success -/bin/true diff --git a/common.filestreams b/common.filestreams deleted file mode 100644 index b3aee270..00000000 --- a/common.filestreams +++ /dev/null @@ -1,229 +0,0 @@ -##/bin/bash -# -# Copyright (c) 2007 Silicon Graphics, Inc. All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# -# Core of filestreams tests. -# - -_check_filestreams_support() -{ - local irix_timeout_sysvar="xfs_mfstream_timeout" - local linux_timeout_procvar="/proc/sys/fs/xfs/filestream_centisecs" - local streams_avail="" - if [ "$HOSTOS" == "IRIX" ]; then - # check for the filestreams timeout systune variable in irix - streams_avail=`systune $irix_timeout_sysvar 2>&1 | - perl -ne 'if (/'$irix_timeout_sysvar'\s+=\s+\d+/) {print "true"}'` - else - # check for the filestreams timeout proc entry in linux - [ -f $linux_timeout_procvar ] && streams_avail="true" - fi - - if [ "$streams_avail" == "true" ]; then - return 0 - else - return 1 - fi -} - -_set_stream_timeout_centisecs() -{ - local new_timeout_csecs=$1 - local irix_timeout_sysvar="xfs_mfstream_timeout" - local linux_timeout_procvar="/proc/sys/fs/xfs/filestream_centisecs" - if [ "$HOSTOS" == "IRIX" ]; then - echo y | systune -r $irix_timeout_sysvar $new_timeout_csecs >/dev/null - else - echo $new_timeout_csecs > $linux_timeout_procvar - fi -} - -_do_stream() -{ - local directory_name=$1 - local files=$2 - local file_size=$3 - local bsize=$4 - local iflag=$5 - local dio=$6 - local blocks_in_file=`expr $file_size / $bsize` - - mkdir $directory_name - if [ "$iflag" = "1" -a "$HOSTOS" != "IRIX" ]; then - $XFS_IO_PROG -x -c "chattr +S" $directory_name \ - || _fail "chattr of filestream flag" - fi - cd $directory_name - - local dd_cmd="" - if [ "$HOSTOS" == "IRIX" ]; then - # for irix use lmdd - dd_cmd="lmdd" - [ "$dio" = "1" ] && dd_cmd="$dd_cmd odirect=1" - else - # for linux use dd - dd_cmd="dd" - [ "$dio" = "1" ] && dd_cmd="$dd_cmd oflag=direct" - fi - dd_cmd="$dd_cmd if=/dev/zero bs=${bsize} count=${blocks_in_file}" - - local i=1 - while [ $i -le $files ]; do - $dd_cmd of=frame-${i} 2>&1 | grep -v records | grep -v secs - i=`expr $i + 1` - done -} - -_filter_agno() -{ - # the ag number is in column 4 of xfs_bmap output - perl -ne ' - $ag = (split /\s+/)[4] ; - if ($ag =~ /\d+/) {print "$ag "} ; - ' -} - -_get_stream_ags() -{ - local directory_name=$1 - local stream_ags=`xfs_bmap -vp ${directory_name}/* | _filter_agno` - echo $stream_ags -} - -_check_for_dupes() -{ - # check for duplicate numbers between two space seperated vars - local num_str_one=$1 - local num_str_two=$2 - - local this_num_one - local this_num_two - for this_num_one in $num_str_one; do - for this_num_two in $num_str_two; do - if [ "$this_num_one" == "$this_num_two" ]; then - echo "duplicate AG $this_num_one found" \ - >> $RESULT_DIR/$seq.full - return 1 - fi - done - done - return 0 -} - -_test_streams() { - - echo "# testing $* ...." - local agcount="$1" - local agsize="$2" # in MB - local stream_count="$3" - local stream_files="$4" - local stream_file_size=`expr $5 \* 1024 \* 1024` - local use_iflag="$6" - local use_directio="$7" - local expected_result="$8" # "fail" if failure is expected - - local size=`expr $agsize \* 1024 \* 1024 \* $agcount` - _scratch_mkfs_xfs -dsize=$size,agcount=$agcount >/dev/null 2>&1 \ - || _fail "mkfs failed" - - if [ "$use_iflag" = "0" -o "$HOSTOS" == "IRIX" ]; then - # mount using filestreams mount option - _scratch_mount "-o filestreams" \ - || _fail "filestreams mount failed" - else - # test will set inode flag - _scratch_mount || _fail "mount failed" - fi - - cd $SCRATCH_MNT - - # start $stream_count streams - # each stream writes ($stream_files x $stream_file_size)M - echo "# streaming" - local stream_pids="" - local stream_index=1 - while [ $stream_index -le $stream_count ]; do - _do_stream stream${stream_index}-dir $stream_files \ - $stream_file_size 1048576 $use_iflag $use_directio & - stream_pids="$stream_pids $!" - stream_index=`expr $stream_index + 1` - done - - # wait for streams to finish - # XXX wait here not needed? -dgc - wait $stream_pids - - # sync the buffered streams out in parallel - # _get_stream_ags does a xfs_bmap which syncs delayed allocations - echo "# sync AGs..." - local ag_sync_pids="" - stream_index=1 - while [ $stream_index -le $stream_count ]; do - _get_stream_ags stream${stream_index}-dir > /dev/null 2>&1 & - ag_sync_pids="$ag_sync_pids $!" - stream_index=`expr $stream_index + 1` - done - - # wait for syncs to finish - wait $ag_sync_pids - - # confirm streams are in seperate AGs - echo "# checking stream AGs..." - local this_stream_ags="" - local ags_seen="" - local num_streams_with_matching_ags=0 - stream_index=1 - while [ $stream_index -le $stream_count ]; do - this_stream_ags=`_get_stream_ags stream${stream_index}-dir` - echo "stream $stream_index AGs: $this_stream_ags" >> $RESULT_DIR/$seq.full - _check_for_dupes "$ags_seen" "$this_stream_ags" - if [ $? -ne 0 ]; then - # this stream is not in seperate AGs to previous streams - num_streams_with_matching_ags=`expr $num_streams_with_matching_ags + 1` - fi - ags_seen="$ags_seen $this_stream_ags" - stream_index=`expr $stream_index + 1` - done - - _cleanup_streams_umount - if [ "$expected_result" != "fail" ]; then - if [ $num_streams_with_matching_ags -eq 0 ]; then - # all streams in seperate AGs, as expected - echo "+ passed, streams are in seperate AGs" - else - # streams with matching AGs, should be seperate - _fail "- failed, $num_streams_with_matching_ags streams with matching AGs" - fi - else - # expecting streams to have overlapped - if [ $num_streams_with_matching_ags -eq 0 ]; then - # all streams in seperate AGs, should have overlapped - _fail "- streams are in seperate AGs, expected _matching_" - else - # streams with matching AGs, as expected - echo "+ expected failure, matching AGs" - fi - fi - return 0 -} - -_cleanup_streams_umount() -{ - cd / - rm -rf ${SCRATCH_MNT}/stream* - umount $SCRATCH_DEV 2>/dev/null -} diff --git a/common.filter b/common.filter deleted file mode 100644 index bdd64270..00000000 --- a/common.filter +++ /dev/null @@ -1,284 +0,0 @@ -##/bin/bash -# -# Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# -# standard filters -# - -# Checks that given_value is in range of correct_value +/- tolerance. -# Tolerance can be an absolute value or a percentage of the correct value -# (see examples with tolerances below). -# Outputs suitable message to stdout if it's not in range. -# -# A verbose option, -v, may be used as the LAST argument -# -# e.g. -# foo: 0.0298 = 0.03 +/- 5% -# _within_tolerance "foo" 0.0298 0.03 5% -# -# foo: 0.0298 = 0.03 +/- 0.01 -# _within_tolerance "foo" 0.0298 0.03 0.01 -# -# foo: 0.0298 = 0.03 -0.01 +0.002 -# _within_tolerance "foo" 0.0298 0.03 0.01 0.002 -# -# foo: verbose output of 0.0298 = 0.03 +/- 5% -# _within_tolerance "foo" 0.0298 0.03 5% -v -_within_tolerance() -{ - _name=$1 - _given_val=$2 - _correct_val=$3 - _mintol=$4 - _maxtol=$_mintol - _verbose=0 - _debug=false - - # maxtol arg is optional - # verbose arg is optional - if [ $# -ge 5 ] - then - if [ "$5" = "-v" ] - then - _verbose=1 - else - _maxtol=$5 - fi - fi - if [ $# -ge 6 ] - then - [ "$6" = "-v" ] && _verbose=1 - fi - - # find min with or without % - _mintolerance=`echo $_mintol | sed -e 's/%//'` - if [ $_mintol = $_mintolerance ] - then - _min=`echo "scale=5; $_correct_val-$_mintolerance" | bc` - else - _min=`echo "scale=5; $_correct_val-$_mintolerance*0.01*$_correct_val" | bc` - fi - - # find max with or without % - _maxtolerance=`echo $_maxtol | sed -e 's/%//'` - if [ $_maxtol = $_maxtolerance ] - then - _max=`echo "scale=5; $_correct_val+$_maxtolerance" | bc` - else - _max=`echo "scale=5; $_correct_val+$_maxtolerance*0.01*$_correct_val" | bc` - fi - - $_debug && echo "min = $_min" - $_debug && echo "max = $_max" - - cat <$tmp.bc.1 -scale=5; -if ($_min <= $_given_val) 1; -if ($_min > $_given_val) 0; -EOF - - cat <$tmp.bc.2 -scale=5; -if ($_given_val <= $_max) 1; -if ($_given_val > $_max) 0; -EOF - - _above_min=`bc <$tmp.bc.1` - _below_max=`bc <$tmp.bc.2` - - rm -f $tmp.bc.[12] - - _in_range=`expr $_above_min \& $_below_max` - - # fix up min, max precision for output - # can vary for 5.3, 6.2 - - # remove any trailing zeroes from min, max if they have fractional parts - _min=`echo $_min | sed -e '/\./s/0*$//' -e 's/\.$//'` - _max=`echo $_max | sed -e '/\./s/0*$//' -e 's/\.$//'` - - if [ $_in_range -eq 1 ] - then - [ $_verbose -eq 1 ] && echo $_name is in range - return 0 - else - [ $_verbose -eq 1 ] && echo $_name has value of $_given_val - [ $_verbose -eq 1 ] && echo $_name is NOT in range $_min .. $_max - return 1 - fi -} - -# ctime(3) dates -# -_filter_date() -{ - sed \ - -e 's/[A-Z][a-z][a-z] [A-z][a-z][a-z] *[0-9][0-9]* [0-9][0-9]:[0-9][0-9]:[0-9][0-9] [0-9][0-9][0-9][0-9]$/DATE/' -} - -# prints filtered output on stdout, values (use eval) on stderr -# -_filter_mkfs() -{ - set - - perl -ne ' - if (/^meta-data=([\w,|\/.-]+)\s+isize=(\d+)\s+agcount=(\d+), agsize=(\d+) blks/) { - print STDERR "ddev=$1\nisize=$2\nagcount=$3\nagsize=$4\n"; - print STDOUT "meta-data=DDEV isize=XXX agcount=N, agsize=XXX blks\n"; - } - if (/^\s+=\s+sectsz=(\d+)\s+attr=(\d+)/) { - print STDERR "sectsz=$1\nattr=$2\n"; - } - if (/^data\s+=\s+bsize=(\d+)\s+blocks=(\d+), imaxpct=(\d+)/) { - print STDERR "dbsize=$1\ndblocks=$2\nimaxpct=$3\n"; - print STDOUT "data = bsize=XXX blocks=XXX, imaxpct=PCT\n"; - } - if (/^\s+=\s+sunit=(\d+)\s+swidth=(\d+) blks/) { - print STDERR "sunit=$1\nswidth=$2\nunwritten=1\n"; - print STDOUT " = sunit=XXX swidth=XXX, unwritten=X\n"; - } - if (/^naming\s+=version\s+(\d+)\s+bsize=(\d+)/) { - print STDERR "dirversion=$1\ndirbsize=$2\n"; - print STDOUT "naming =VERN bsize=XXX\n"; - } - if (/^log\s+=(internal log|[\w|\/.-]+)\s+bsize=(\d+)\s+blocks=(\d+),\s+version=(\d+)/ || - /^log\s+=(internal log|[\w|\/.-]+)\s+bsize=(\d+)\s+blocks=(\d+)/) { - print STDERR "ldev=\"$1\"\nlbsize=$2\nlblocks=$3\nlversion=$4\n"; - print STDOUT "log =LDEV bsize=XXX blocks=XXX\n"; - } - if (/^\s+=\s+sectsz=(\d+)\s+sunit=(\d+) blks/) { - print STDERR "logsectsz=$1\nlogsunit=$2\n\n"; - } - if (/^realtime\s+=([\w|\/.-]+)\s+extsz=(\d+)\s+blocks=(\d+), rtextents=(\d+)/) { - print STDERR "rtdev=$1\nrtextsz=$2\nrtblocks=$3\nrtextents=$4\n"; - print STDOUT "realtime =RDEV extsz=XXX blocks=XXX, rtextents=XXX\n"; - }' -} - - -# prints the bits we care about in growfs -# -_filter_growfs() -{ - perl -ne ' - if (/^data\s+=\s+bsize=(\d+)\s+blocks=(\d+), imaxpct=(\d+)/) { - print "xfs_growfs --BlockSize=$1 --Blocks=$2\n"; - } - elsif (/^data/) { - print; - }' -} - -_filter_dd() -{ - $AWK_PROG ' - /records in/ { next } - /records out/ { next } - /No space left on device/ { print " !!! disk full (expected)" - next } - { print " *** " $0 } - ' -} - -common_line_filter() -{ - perl -ne 'if (/.*:(.*)/) { - if ( "$last_line" ne "$1" ) { print "$_"; $first_match=1; } - elsif ( $first_match==1 ) { print "*\n"; $first_match=0; } - $last_line="$1"; - } - else { - print $_; $last_line=$_; - }' -} - -_filter_xfs_io() -{ - sed -e "s/[0-9/.]* [GMKiBbytes]*, [0-9]* ops\; [0-9/:. sec]* ([inf0-9/.]* [EPGMKiBbytes]*\/sec and [inf0-9/.]* ops\/sec)/XXX Bytes, X ops\; XX:XX:XX.X (XXX YYY\/sec and XXX ops\/sec)/" -} - -_filter_xfs_io_unique() -{ - common_line_filter | _filter_xfs_io -} - -_filter_test_dir() -{ - sed -e "s,$TEST_DEV,TEST_DEV,g" -e "s,$TEST_DIR,TEST_DIR,g" -} - -_filter_scratch() -{ - sed -e "s,$SCRATCH_DEV,SCRATCH_DEV,g" \ - -e "s,$SCRATCH_MNT,SCRATCH_MNT,g" \ - -e "/.use_space/d" -} - -# Turn any device in the scratch pool into SCRATCH_DEV -_filter_scratch_pool() -{ - FILTER_STRINGS=`echo $SCRATCH_DEV_POOL | sed -e 's/\s\+/\\\|/g'` - sed -e "s,$FILTER_STRINGS,SCRATCH_DEV,g" -} - -_filter_spaces() -{ - sed -e 's/ [ ]*/ /g' -} - -# Account for different "ln" failure messages -_filter_ln() -{ - sed -e "s,\(creating symbolic link .*\) to .*: ,\1," \ - -e "s,failed to create,creating," -} - -# If given an arg, filter *that* UUID string -# Otherwise look for something that looks like a generic UUID -_filter_uuid() -{ - if [ ! -z $1 ]; then - UUID=$1 - sed -e "s/\(uuid:\) $UUID/\1 /i" - else - sed -e "s/\(uuid:\) *[0-9a-f-][0-9a-f-]*/\1 /i" - fi -} - -# Filter out sizes like 6.14MB etc -_filter_size() -{ - sed -e "s/[0-9\.]\+\s\?[b|k|m|g|t][b]\?//ig" -} - -# Convert string read from stdin like 128K to bytes and print it to stdout -_filter_size_to_bytes() -{ - read size - suffix=${size:${#size}-1} - mul=1 - case $suffix in - k|K) mul=1024 ;; - m|M) mul=$((1024*1024)) ;; - g|G) mul=$((1024*1024*1024)) ;; - t|T) mul=$((1024*1024*1024*1024)) ;; - esac - echo $((${size:0:${#size}-1}*$mul)) -} - -# make sure this script returns success -/bin/true diff --git a/common.filter.btrfs b/common.filter.btrfs deleted file mode 100644 index 4aaaa9b1..00000000 --- a/common.filter.btrfs +++ /dev/null @@ -1,60 +0,0 @@ -# Filters for btrfs command output - -. ./common.filter.btrfs - -# Some, but not all, commands emit "Btrfs " -_filter_btrfs_version() -{ - sed -e "s/^Btrfs.*//g" -} - -_filter_devid() -{ - sed -e "s/\(devid\s\+\)[0-9]\+/\1 /g" -} - -# If passed a number as first arg, filter that number of devices -# If passed a UUID as second arg, filter that exact UUID -_filter_btrfs_filesystem_show() -{ - if [ ! -z $1 ]; then - NUMDEVS=$1 - NUM_SUBST="" - else - NUMDEVS="[0-9]\+" - NUM_SUBST="" - fi - - UUID="" - if [ ! -z $2 ]; then - UUID=$2 - fi - - # the uniq collapses all device lines into 1 - _filter_uuid $UUID | _filter_scratch | _filter_scratch_pool | \ - _filter_size | _filter_btrfs_version | _filter_devid | \ - sed -e "s/\(Total devices\) $NUMDEVS/\1 $NUM_SUBST/g" | \ - uniq -} - -# This eliminates all numbers, and shows only unique lines, -# to accomodate a varying nr. of devices. -# If given an argument, make sure we saw that many devices -# in total. -_filter_btrfs_device_stats() -{ - if [ ! -z $1 ]; then - NUMDEVS=$1 - UNIQ_OPT="-c" - else - NUMDEVS="thiswillnotmatch" - UNIQ_OPT="" - fi - - _filter_scratch | _filter_scratch_pool | \ - sed -e "s/[0-9]\+$//g" | sort | uniq $UNIQ_OPT | \ - sed -e "s/$NUMDEVS / /g" -} - -# make sure this script returns success -/bin/true diff --git a/common.log b/common.log deleted file mode 100644 index 727bb6c7..00000000 --- a/common.log +++ /dev/null @@ -1,475 +0,0 @@ -##/bin/bash -# -# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it would be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# -# common routines for log testing -# Created by dxm@sgi.com & tes@sgi.com -# - -fulldir=$RESULT_DIR/$seq.fulldir -rm -rf $fulldir - -_cleanup_logfiles() -{ - if [ $status -eq 0 ]; then - # don't keep these files around unless something went wrong - rm -rf $fulldir - fi -} - -_full() -{ - echo "" >>$RESULT_DIR/$seq.full - echo "*** $* ***" >>$RESULT_DIR/$seq.full - echo "" >>$RESULT_DIR/$seq.full -} - -_echofull() -{ - echo "" | tee -a $RESULT_DIR/$seq.full - echo "*** $* ***" | tee -a $RESULT_DIR/$seq.full - echo "" | tee -a $RESULT_DIR/$seq.full -} - -# Handle the operations which get split over Log Record -# boundaries. -# Oper (379)..... flags: CONTINUE -# ... -# Oper (0)....... flags: WAS_CONT END -# -# or -# -# Oper (379)..... flags: none -# ... -# Oper (0)....... flags: none -# -_filter_opnum() -{ - $AWK_PROG ' -BEGIN { - debug = 0 - } -/^Oper/ && debug { - printf "line = %s\n", $0 - } -/^Oper/ { - was_cont = 0 - } -/^Oper/ && /flags: CONTINUE/ { - # this will be the first op of split region - $9 = "none" # overwrite CONTINUE flags - print - print "Not printing rest" - was_cont = 1 - next - } -/^Oper/ && /flags: WAS_CONT END/ { - # this will be the last op of split region - # skip over was-continued op - # we assume there can be only 1 - was_cont = 1 - next - } -(was_cont == 1) { - # skip over any continued op stuff - next - } - {print} - ' -} - -# -# Filter out things that can change -# We have complexities which change when log is sync'ed at different -# times. -# Example1: DATA FORK EXTENTS -# These will not show up if inode is sync'ed sooner -# /DATA FORK EXTENTS/d; -# /INODE:/s/flags:0x5/flags:0x1/g; -# define XFS_ILOG_CORE 0x001 /* log standard inode fields */ -# define XFS_ILOG_DEXT 0x004 /* log i_df.if_extents */ -# -# - -_filter_logprint() -{ - _fix_malloc |\ - sed ' - s/ver:[0-9]/ver:/; - s/version [0-9] format [0-9]/version format /; - s/data device: 0x[0-9a-f][0-9a-f]*/data device: /; - s/log device: 0x[0-9a-f][0-9a-f]*/log device: /; - s/log file: \".*\"/log device: /; - s/daddr: [0-9][0-9]*/daddr: /; - s/length: [0-9][0-9]*/length: /; - s/length: [0-9][0-9]*/length: /; - s/^cycle num overwrites: .*$/cycle num overwrites: /; - s/tid: [0-9a-f][0-9a-f]*/tid: /; - s/tid:0x[0-9a-f][0-9a-f]*/tid:/; - s/q:0x[0-9a-f][0-9a-f]*/q:/; - s/a:0x[0-9a-f][0-9a-f]*/a:/g; - s/blkno:0x[0-9a-f][0-9a-f]*/blkno:/g; - s/blkno: *[0-9][0-9]* (0x[0-9a-f]*)/blkno: ()/g; - s/blkno: *[0-9][0-9]*/blkno: /g; - s/boff: [0-9][0-9]*/boff: /g; - s/len: *[0-9][0-9]*/len:/g; - /BUF:/s/[ ]*flags:.*$//; - /zeroed blocks/s/[0-9][0-9]*//g; - /cleared blocks/d; - /log tail/s/[0-9][0-9]*//g; - s/atime:[0-9a-fx]* *mtime:[0-9a-fx]* *ctime:[0-9a-fx]*/atime:/g; + s/blkno:0x[0-9a-f][0-9a-f]*/blkno:/g; + s/blkno: *[0-9][0-9]* (0x[0-9a-f]*)/blkno: ()/g; + s/blkno: *[0-9][0-9]*/blkno: /g; + s/boff: [0-9][0-9]*/boff: /g; + s/len: *[0-9][0-9]*/len:/g; + /BUF:/s/[ ]*flags:.*$//; + /zeroed blocks/s/[0-9][0-9]*//g; + /cleared blocks/d; + /log tail/s/[0-9][0-9]*//g; + s/atime:[0-9a-fx]* *mtime:[0-9a-fx]* *ctime:[0-9a-fx]*/atime: