From: Pavel Reichl Date: Thu, 22 Sep 2022 18:55:01 +0000 (+0200) Subject: cleanup: remove left files after test finishes X-Git-Tag: v2022.10.09~10 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=ce018e24f0a5b30ca8bc624494f1fd4d509bba8a;p=xfstests-dev.git cleanup: remove left files after test finishes Fix file leak in _get_max_file_size - This is obviously mostly problematic for FS lacking support for sparse files. There seems to be some seek_sanity_testfile files that are not cleaned up and take up space: -rwxr-xr-x. 1 root root 8.0G Sep 22 13:39 seek_sanity_testfile10 g/394: Clean up test files taking space: -rwxr-xr-x. 1 root root 1.0G Sep 22 14:58 394.1183899 -rwxr-xr-x. 1 root root 1.0G Sep 22 14:58 394.1183899-1 -rwxr-xr-x. 1 root root 0 sep 22 14:58 394.1183899+1 Signed-off-by: Pavel Reichl Reviewed-by: Zorro Lang Signed-off-by: Zorro Lang --- ce018e24f0a5b30ca8bc624494f1fd4d509bba8a diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..ad9f4543 --- /dev/null +++ b/.gitignore @@ -0,0 +1,211 @@ +*.lo +*.o +*.la +.dep +.libs +.ltdep +.* +*.state +tags + +/local.config +/results + +# autogenerated group files +/tests/*/group.list + +# autoconf generated files +/aclocal.m4 +/autom4te.cache +/configure +/config.guess +/config.log +/config.status +/config.sub +/m4/libtool.m4 +/m4/ltoptions.m4 +/m4/ltsugar.m4 +/m4/ltversion.m4 +/m4/lt~obsolete.m4 + +# libtool +/libtool +/install-sh +/ltmain.sh + +# build system +/include/builddefs +/include/config.h +/include/config.h.in + +# quilt +/patches +/.pc + +# custom config files +/configs/*.config + +# ltp/ binaries +/ltp/aio-stress +/ltp/doio +/ltp/fsstress +/ltp/fsx +/ltp/growfiles +/ltp/iogen + +# src/ binaries +/src/af_unix +/src/alloc +/src/allocstale +/src/append_reader +/src/append_writer +/src/attr_replace_test +/src/attr-list-by-handle-cursor-test +/src/bstat +/src/bulkstat_null_ocount +/src/bulkstat_unlink_test +/src/bulkstat_unlink_test_modified +/src/checkpoint_journal +/src/chprojid_fail +/src/cloner +/src/dbtest +/src/deduperace +/src/detached_mounts_propagation +/src/devzero +/src/dio-interleaved +/src/dio-invalidate-cache +/src/dirhash_collide +/src/dirperf +/src/dirstress +/src/e4compact +/src/ext4_resize +/src/fault +/src/feature +/src/fiemap-tester +/src/fill +/src/fill2 +/src/fs_perms +/src/fscrypt-crypt-util +/src/fssum +/src/fstest +/src/fsync-err +/src/fsync-tester +/src/ftrunc +/src/genhashnames +/src/getdevicesize +/src/getpagesize +/src/godown +/src/holes +/src/holetest +/src/itrash +/src/listxattr +/src/locktest +/src/loggen +/src/looptest +/src/lstat64 +/src/makeextents +/src/metaperf +/src/mkswap +/src/mmapcat +/src/mmap-rw-fault +/src/mmap-write-concurrent +/src/multi_open_unlink +/src/nametest +/src/nsexec +/src/open_by_handle +/src/permname +/src/preallo_rw_pattern_reader +/src/preallo_rw_pattern_writer +/src/punch-alternating +/src/pwrite_mmap_blocked +/src/randholes +/src/rename +/src/renameat2 +/src/resvtest +/src/runas +/src/seek_copy_test +/src/seek_sanity_test +/src/splice2pipe +/src/splice-test +/src/stale_handle +/src/stat_test +/src/swapon +/src/t_access_root +/src/t_attr_corruption +/src/t_create_long_dirs +/src/t_create_short_dirs +/src/t_dir_offset +/src/t_dir_offset2 +/src/t_dir_type +/src/t_encrypted_d_revalidate +/src/t_enospc +/src/t_ext4_dax_inline_corruption +/src/t_ext4_dax_journal_corruption +/src/t_futimens +/src/t_get_file_time +/src/t_getcwd +/src/t_holes +/src/t_immutable +/src/t_mmap_collision +/src/t_mmap_cow_memory_failure +/src/t_mmap_cow_race +/src/t_mmap_dio +/src/t_mmap_fallocate +/src/t_mmap_stale_pmd +/src/t_mmap_write_ro +/src/t_mmap_writev +/src/t_mmap_writev_overlap +/src/t_mtab +/src/t_ofd_locks +/src/t_open_tmpfiles +/src/t_readdir_1 +/src/t_readdir_2 +/src/t_readdir_3 +/src/t_rename_overwrite +/src/t_stripealign +/src/t_truncate_cmtime +/src/t_truncate_self +/src/test-nextquota +/src/testx +/src/trunc +/src/truncfile +/src/unwritten_mmap +/src/unwritten_sync +/src/uring_read_fault +/src/usemem +/src/uuid_ioctl +/src/writemod +/src/writev_on_pagefault +/src/xfsctl +/src/aio-dio-regress/aio-dio-append-write-fallocate-race +/src/aio-dio-regress/aio-dio-append-write-read-race +/src/aio-dio-regress/aio-dio-cow-race +/src/aio-dio-regress/aio-dio-cycle-write +/src/aio-dio-regress/aio-dio-eof-race +/src/aio-dio-regress/aio-dio-extend-stat +/src/aio-dio-regress/aio-dio-fcntl-race +/src/aio-dio-regress/aio-dio-hole-filling-race +/src/aio-dio-regress/aio-dio-invalidate-failure +/src/aio-dio-regress/aio-dio-invalidate-readahead +/src/aio-dio-regress/aio-dio-subblock-eof-read +/src/aio-dio-regress/aio-dio-write-verify +/src/aio-dio-regress/aio-free-ring-with-bogus-nr-pages +/src/aio-dio-regress/aio-io-setup-with-nonwritable-context-pointer +/src/aio-dio-regress/aio-last-ref-held-by-io +/src/aio-dio-regress/aiocp +/src/aio-dio-regress/aiodio_sparse2 +/src/vfs/vfstest +/src/vfs/mount-idmapped +/src/log-writes/replay-log +/src/perf/*.pyc + +# Symlinked files +/tests/generic/035.out +/tests/generic/050.out +/tests/xfs/033.out +/tests/xfs/071.out +/tests/xfs/096.out + +# cscope files +cscope.* +ncscope.* diff --git a/LICENSES/GPL-2.0 b/LICENSES/GPL-2.0 new file mode 100644 index 00000000..b8db91d3 --- /dev/null +++ b/LICENSES/GPL-2.0 @@ -0,0 +1,353 @@ +Valid-License-Identifier: GPL-2.0 +Valid-License-Identifier: GPL-2.0+ +SPDX-URL: https://spdx.org/licenses/GPL-2.0.html +Usage-Guide: + To use this license in source code, put one of the following SPDX + tag/value pairs into a comment according to the placement + guidelines in the licensing rules documentation. + For 'GNU General Public License (GPL) version 2 only' use: + SPDX-License-Identifier: GPL-2.0 + For 'GNU General Public License (GPL) version 2 or any later version' use: + SPDX-License-Identifier: GPL-2.0+ +License-Text: + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..f6f91a4d --- /dev/null +++ b/Makefile @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2000-2008, 2011 SGI All Rights Reserved. +# +ifeq ("$(origin V)", "command line") + BUILD_VERBOSE = $(V) +endif +ifndef BUILD_VERBOSE + BUILD_VERBOSE = 0 +endif + +ifeq ($(BUILD_VERBOSE),1) + Q = +else + Q = @ +endif + +MAKEOPTS = --no-print-directory Q=$(Q) + +TOPDIR = . +HAVE_BUILDDEFS = $(shell test -f $(TOPDIR)/include/builddefs && echo yes || echo no) + +ifeq ($(HAVE_BUILDDEFS), yes) +include $(TOPDIR)/include/builddefs +else +export TESTS_DIR = tests +endif + +SRCTAR = $(PKG_NAME)-$(PKG_VERSION).tar.gz + +CONFIGURE = configure include/config.h include/config.h.in \ + aclocal.m4 config.guess config.sub install-sh ltmain.sh \ + m4/libtool.m4 m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \ + m4/lt~obsolete.m4 +LSRCFILES = configure configure.ac aclocal.m4 README VERSION +LDIRT = config.log .ltdep .dep config.status config.cache confdefs.h \ + conftest* check.log check.time libtool include/builddefs + +ifeq ($(HAVE_BUILDDEFS), yes) +LDIRT += $(SRCTAR) +endif + +LIB_SUBDIRS = include lib +TOOL_SUBDIRS = ltp src m4 common + +SUBDIRS = $(LIB_SUBDIRS) $(TOOL_SUBDIRS) $(TESTS_DIR) + +default: include/builddefs +ifeq ($(HAVE_BUILDDEFS), no) + $(Q)$(MAKE) $(MAKEOPTS) $@ +else + $(Q)$(MAKE) $(MAKEOPTS) $(SUBDIRS) +endif + +# tool/lib dependencies +$(TOOL_SUBDIRS): $(LIB_SUBDIRS) + +ifeq ($(HAVE_BUILDDEFS), yes) +include $(BUILDRULES) +else +clean: # if configure hasn't run, nothing to clean +endif + +configure: configure.ac + libtoolize -cfi + cp include/install-sh . + aclocal -I m4 + autoheader + autoconf + +include/builddefs include/config.h: configure + ./configure \ + --libexecdir=/usr/lib \ + --exec_prefix=/var/lib + +aclocal.m4:: + aclocal --acdir=`pwd`/m4 --output=$@ + +depend: include/builddefs $(addsuffix -depend,$(SUBDIRS)) + +install: default $(addsuffix -install,$(SUBDIRS)) + $(INSTALL) -m 755 -d $(PKG_LIB_DIR) + $(INSTALL) -m 755 check $(PKG_LIB_DIR) + $(INSTALL) -m 644 randomize.awk $(PKG_LIB_DIR) + +# Nothing. +install-dev install-lib: + +%-install: + $(MAKE) $(MAKEOPTS) -C $* install + +realclean distclean: clean + $(Q)rm -f $(LDIRT) $(CONFIGURE) + $(Q)rm -rf autom4te.cache Logs + +dist: include/builddefs include/config.h default +ifeq ($(HAVE_BUILDDEFS), no) + $(Q)$(MAKE) $(MAKEOPTS) -C . $@ +else + $(Q)$(MAKE) $(MAKEOPTS) $(SRCTAR) +endif + +$(SRCTAR) : default + $(Q)git archive --prefix=$(PKG_NAME)-$(PKG_VERSION)/ --format=tar \ + v$(PKG_VERSION) > $(PKG_NAME)-$(PKG_VERSION).tar + $(Q)$(TAR) --transform "s,^,$(PKG_NAME)-$(PKG_VERSION)/," \ + -rf $(PKG_NAME)-$(PKG_VERSION).tar $(CONFIGURE) + $(Q)$(ZIP) $(PKG_NAME)-$(PKG_VERSION).tar + echo Wrote: $@ diff --git a/Makepkgs b/Makepkgs new file mode 100755 index 00000000..04e029a7 --- /dev/null +++ b/Makepkgs @@ -0,0 +1,88 @@ +#! /bin/bash +# +# Make whichever packages have been requested. +# Defaults to RPMs. +# +LOGDIR=Logs + +type=rpm +verbose=false + +MAKE=${MAKE:-make} +test ! -z "$MAKE" && make=$MAKE + +for opt in $* +do + case "$opt" in + clean) + ;; # ignored, kept for backward compatibility + rpm) + type=rpm ;; + debian) + type=debian ;; + verbose) + verbose=true ;; + *) + echo "Usage: Makepkgs [verbose] [debian|rpm]"; exit 1 ;; + esac +done + +# start with a clean manifest +test -f files.rpm && rm -f files.rpm +test -f filesdevel.rpm && rm -f filesdevel.rpm +test -f fileslib.rpm && rm -f fileslib.rpm + +test ! -d $LOGDIR && mkdir $LOGDIR +rm -rf $LOGDIR/* > /dev/null 2>&1 + +# build Debian packages, cleans itself before starting +SUDO=${SUDO:-sudo} +test ! -z "$SUDO" && sudo=$SUDO +if [ $type = debian ] ; then + LOGDEB=`pwd` + LOGDEB=../`basename $LOGDEB`.log + echo "== Debian build, log is $LOGDEB"; echo + if $verbose ; then + dpkg-buildpackage -r$SUDO | tee $LOGDEB + else + dpkg-buildpackage -r$SUDO > $LOGDEB || exit 1 + fi + exit 0 +fi + +# build RPM packages - manual clean before starting +echo "== clean, log is $LOGDIR/clean" +if $verbose ; then + $MAKE clean 2>&1 | tee $LOGDIR/clean +else + $MAKE clean > $LOGDIR/clean 2>&1 || exit 1 +fi + +echo +echo "== configure, log is $LOGDIR/configure" +rm -f .census # force configure to run here +if $verbose ; then + $MAKE configure 2>&1 | tee $LOGDIR/configure +else + $MAKE configure > $LOGDIR/configure 2>&1 || exit 1 +fi + +echo +echo "== default, log is $LOGDIR/default" +if $verbose ; then + $MAKE default 2>&1 | tee $LOGDIR/default +else + $MAKE default > $LOGDIR/default 2>&1 || exit 1 +fi + +echo +echo "== dist, log is $LOGDIR/dist" +[ ! -f .census ] && touch .census +if $verbose ; then + $MAKE -C build dist 2>&1 | tee $LOGDIR/dist +else + $MAKE -C build dist > $LOGDIR/dist 2>&1 || exit 1 + grep '^Wrote:' $LOGDIR/dist | sed -e 's/\.\.\/\.\.\///' +fi + +exit 0 diff --git a/README b/README new file mode 100644 index 00000000..80d148be --- /dev/null +++ b/README @@ -0,0 +1,461 @@ +_______________________ +BUILDING THE FSQA SUITE +_______________________ + +Ubuntu or Debian +---------------- + +1. Make sure that package list is up-to-date and install all necessary packages: + + $ sudo apt-get update + $ sudo apt-get install acl attr automake bc dbench dump e2fsprogs fio gawk \ + gcc git indent libacl1-dev libaio-dev libcap-dev libgdbm-dev libtool \ + libtool-bin liburing-dev libuuid1 lvm2 make psmisc python3 quota sed \ + uuid-dev uuid-runtime xfsprogs linux-headers-$(uname -r) sqlite3 + +2. Install packages for the filesystem(s) being tested: + + $ sudo apt-get install exfatprogs f2fs-tools ocfs2-tools udftools xfsdump \ + xfslibs-dev + + For OverlayFS install: + - see https://github.com/hisilicon/overlayfs-progs + +Fedora +------ + +1. Install all necessary packages from standard repository: + + $ sudo yum install acl attr automake bc dbench dump e2fsprogs fio gawk gcc \ + gdbm-devel git indent kernel-devel libacl-devel libaio-devel \ + libcap-devel libtool liburing-devel libuuid-devel lvm2 make psmisc \ + python3 quota sed sqlite udftools xfsprogs + +2. Install packages for the filesystem(s) being tested: + + $ sudo yum install btrfs-progs exfatprogs f2fs-tools ocfs2-tools xfsdump \ + xfsprogs-devel + + For OverlayFS build and install: + - see https://github.com/hisilicon/overlayfs-progs + +RHEL or CentOS +-------------- + +1. Enable EPEL repository: + - see https://docs.fedoraproject.org/en-US/epel/#How_can_I_use_these_extra_packages.3F + +2. Install all necessary packages which are available from standard repository + and EPEL: + + $ sudo yum install acl attr automake bc dbench dump e2fsprogs fio gawk gcc \ + gdbm-devel git indent kernel-devel libacl-devel libaio-devel \ + libcap-devel libtool libuuid-devel lvm2 make psmisc python3 quota sed \ + sqlite udftools xfsprogs + + Or, EPEL packages could be compiled from sources, see: + - https://dbench.samba.org/web/download.html + - https://www.gnu.org/software/indent/ + +3. Build and install 'liburing': + - see https://github.com/axboe/liburing. + +4. Install packages for the filesystem(s) being tested: + + For XFS install: + $ sudo yum install xfsdump xfsprogs-devel + + For exfat install: + $ sudo yum install exfatprogs + + For f2fs build and install: + - see https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs-tools.git/about/ + + For ocfs2 build and install: + - see https://github.com/markfasheh/ocfs2-tools + + For OverlayFS build and install: + - see https://github.com/hisilicon/overlayfs-progs + +Build and install test, libs and utils +-------------------------------------- + +$ git clone git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git +$ cd xfstests-dev +$ make +$ sudo make install + +Setup Environment +----------------- + +1. Compile XFS/EXT4/BTRFS/etc. into your kernel or load as module. For example, + for XFS, enable XFS_FS in your kernel configuration, or compile it as a + module and load it with 'sudo modprobe xfs'. Most of the distributions will + have these filesystems already in the kernel/as module. + +2. Create TEST device: + - format as the filesystem type you wish to test. + - should be at least 10GB in size. + - optionally populate with destroyable data. + - device contents may be destroyed. + +3. (optional) Create SCRATCH device. + - many tests depend on the SCRATCH device existing. + - not need to be formatted. + - should be at least 10GB in size. + - must be different to TEST device. + - device contents will be destroyed. + +4. (optional) Create SCRATCH device pool. + - needed for BTRFS testing + - specifies 3 or more independent SCRATCH devices via the SCRATCH_DEV_POOL + variable e.g SCRATCH_DEV_POOL="/dev/sda /dev/sdb /dev/sdc" + - device contents will be destroyed. + - SCRATCH device should be left unset, it will be overridden + by the SCRATCH_DEV_POOL implementation. + +5. Copy local.config.example to local.config and edit as needed. The TEST_DEV + and TEST_DIR are required. + +6. (optional) Create fsgqa test users and groups: + + $ sudo useradd -m fsgqa + $ sudo useradd 123456-fsgqa + $ sudo useradd fsgqa2 + $ sudo groupadd fsgqa + + The "123456-fsgqa" user creation step can be safely skipped if your system + doesn't support names starting with digits, only a handful of tests require + it. + +7. (optional) If you wish to run the udf components of the suite install + mkudffs. Also download and build the Philips UDF Verification Software from + https://www.lscdweb.com/registered/udf_verifier.html, then copy the udf_test + binary to xfstests/src/. + + +For example, to run the tests with loopback partitions: + + # xfs_io -f -c "falloc 0 10g" test.img + # xfs_io -f -c "falloc 0 10g" scratch.img + # mkfs.xfs test.img + # losetup /dev/loop0 ./test.img + # losetup /dev/loop1 ./scratch.img + # mkdir -p /mnt/test && mount /dev/loop0 /mnt/test + # mkdir -p /mnt/scratch + +The config for the setup above is: + + $ cat local.config + export TEST_DEV=/dev/loop0 + export TEST_DIR=/mnt/test + export SCRATCH_DEV=/dev/loop1 + export SCRATCH_MNT=/mnt/scratch + +From this point you can run some basic tests, see 'USING THE FSQA SUITE' below. + +Additional Setup +---------------- + +Some tests require additional configuration in your local.config. Add these +variables to a local.config and keep that file in your workarea. Or add a case +to the switch in common/config assigning these variables based on the hostname +of your test machine. Or use 'setenv' to set them. + +Extra TEST device specifications: + - Set TEST_LOGDEV to "device for test-fs external log" + - Set TEST_RTDEV to "device for test-fs realtime data" + - If TEST_LOGDEV and/or TEST_RTDEV, these will always be used. + - Set FSTYP to "the filesystem you want to test", the filesystem type is + devised from the TEST_DEV device, but you may want to override it; if + unset, the default is 'xfs' + +Extra SCRATCH device specifications: + - Set SCRATCH_LOGDEV to "device for scratch-fs external log" + - Set SCRATCH_RTDEV to "device for scratch-fs realtime data" + - If SCRATCH_LOGDEV and/or SCRATCH_RTDEV, the USE_EXTERNAL environment + +Tape device specification for xfsdump testing: + - Set TAPE_DEV to "tape device for testing xfsdump". + - Set RMT_TAPE_DEV to "remote tape device for testing xfsdump" + variable set to "yes" will enable their use. + - Note that if testing xfsdump, make sure the tape devices have a tape which + can be overwritten. + +Extra XFS specification: + - Set TEST_XFS_REPAIR_REBUILD=1 to have _check_xfs_filesystem run + xfs_repair -n to check the filesystem; xfs_repair to rebuild metadata + indexes; and xfs_repair -n (a third time) to check the results of the + rebuilding. + - Set FORCE_XFS_CHECK_PROG=yes to have _check_xfs_filesystem run xfs_check + to check the filesystem. As of August 2021, xfs_repair finds all + filesystem corruptions found by xfs_check, and more, which means that + xfs_check is no longer run by default. + - xfs_scrub, if present, will always check the test and scratch + filesystems if they are still online at the end of the test. It is no + longer necessary to set TEST_XFS_SCRUB. + +Tools specification: + - dump: + - Set DUMP_CORRUPT_FS=1 to record metadata dumps of XFS, ext* or + btrfs filesystems if a filesystem check fails. + - Set DUMP_COMPRESSOR to a compression program to compress metadumps of + filesystems. This program must accept '-f' and the name of a file to + compress; and it must accept '-d -f -k' and the name of a file to + decompress. In other words, it must emulate gzip. + - dmesg: + - Set KEEP_DMESG=yes to keep dmesg log after test + - kmemleak: + - Set USE_KMEMLEAK=yes to scan for memory leaks in the kernel after every + test, if the kernel supports kmemleak. + - fsstress: + - Set FSSTRESS_AVOID and/or FSX_AVOID, which contain options added to + the end of fsstresss and fsx invocations, respectively, in case you wish + to exclude certain operational modes from these tests. + +Kernel/Modules related configuration: + - Set TEST_FS_MODULE_RELOAD=1 to unload the module and reload it between + test invocations. This assumes that the name of the module is the same + as FSTYP. + - Set MODPROBE_PATIENT_RM_TIMEOUT_SECONDS to specify the amount of time we + should try a patient module remove. The default is 50 seconds. Set this + to "forever" and we'll wait forever until the module is gone. + - Set KCONFIG_PATH to specify your preferred location of kernel config + file. The config is used by tests to check if kernel feature is enabled. + +Misc: + - If you wish to disable UDF verification test set the environment variable + DISABLE_UDF_TEST to 1. + - Set LOGWRITES_DEV to a block device to use for power fail testing. + - Set PERF_CONFIGNAME to a arbitrary string to be used for identifying + the test setup for running perf tests. This should be different for + each type of performance test you wish to run so that relevant results + are compared. For example 'spinningrust' for configurations that use + spinning disks and 'nvme' for tests using nvme drives. + - Set MIN_FSSIZE to specify the minimal size (bytes) of a filesystem we + can create. Setting this parameter will skip the tests creating a + filesystem less than MIN_FSSIZE. + - Set DIFF_LENGTH to "number of diff lines to print from a failed test", + by default 10, set to 0 to print the full diff + - set IDMAPPED_MOUNTS=true to run all tests on top of idmapped mounts. While + this option is supported for all filesystems currently only -overlay is + expected to run without issues. For other filesystems additional patches + and fixes to the test suite might be needed. + +______________________ +USING THE FSQA SUITE +______________________ + +Running tests: + + - cd xfstests + - By default the tests suite will run all the tests in the auto group. These + are the tests that are expected to function correctly as regression tests, + and it excludes tests that exercise conditions known to cause machine + failures (i.e. the "dangerous" tests). + - ./check '*/001' '*/002' '*/003' + - ./check '*/06?' + - Groups of tests maybe ran by: ./check -g [group(s)] + See the tests/*/group.list files after building xfstests to learn about + each test's group memberships. + - If you want to run all tests regardless of what group they are in + (including dangerous tests), use the "all" group: ./check -g all + - To randomize test order: ./check -r [test(s)] + - You can explicitly specify NFS/CIFS/OVERLAY, otherwise + the filesystem type will be autodetected from $TEST_DEV: + - for running nfs tests: ./check -nfs [test(s)] + - for running cifs/smb3 tests: ./check -cifs [test(s)] + - for overlay tests: ./check -overlay [test(s)] + The TEST and SCRATCH partitions should be pre-formatted + with another base fs, where the overlay dirs will be created + + + The check script tests the return value of each script, and + compares the output against the expected output. If the output + is not as expected, a diff will be output and an .out.bad file + will be produced for the failing test. + + Unexpected console messages, crashes and hangs may be considered + to be failures but are not necessarily detected by the QA system. + +__________________________ +ADDING TO THE FSQA SUITE +__________________________ + + +Creating new tests scripts: + + Use the "new" script. + +Test script environment: + + When developing a new test script keep the following things in + mind. All of the environment variables and shell procedures are + available to the script once the "common/preamble" file has been + sourced and the "_begin_fstest" function has been called. + + 1. The tests are run from an arbitrary directory. If you want to + do operations on an XFS filesystem (good idea, eh?), then do + one of the following: + + (a) Create directories and files at will in the directory + $TEST_DIR ... this is within an XFS filesystem and world + writeable. You should cleanup when your test is done, + e.g. use a _cleanup shell procedure in the trap ... see + 001 for an example. If you need to know, the $TEST_DIR + directory is within the filesystem on the block device + $TEST_DEV. + + (b) mkfs a new XFS filesystem on $SCRATCH_DEV, and mount this + on $SCRATCH_MNT. Call the the _require_scratch function + on startup if you require use of the scratch partition. + _require_scratch does some checks on $SCRATCH_DEV & + $SCRATCH_MNT and makes sure they're unmounted. You should + cleanup when your test is done, and in particular unmount + $SCRATCH_MNT. + Tests can make use of $SCRATCH_LOGDEV and $SCRATCH_RTDEV + for testing external log and realtime volumes - however, + these tests need to simply "pass" (e.g. cat $seq.out; exit + - or default to an internal log) in the common case where + these variables are not set. + + 2. You can safely create temporary files that are not part of the + filesystem tests (e.g. to catch output, prepare lists of things + to do, etc.) in files named $tmp.. The standard test + script framework created by "new" will initialize $tmp and + cleanup on exit. + + 3. By default, tests are run as the same uid as the person + executing the control script "check" that runs the test scripts. + + 4. Some other useful shell procedures: + + _get_fqdn - echo the host's fully qualified + domain name + + _get_pids_by_name - one argument is a process name, and + return all of the matching pids on + standard output + + _within_tolerance - fancy numerical "close enough is good + enough" filter for deterministic + output ... see comments in + common/filter for an explanation + + _filter_date - turn ctime(3) format dates into the + string DATE for deterministic + output + + _cat_passwd, - dump the content of the password + _cat_group or group file (both the local file + and the content of the NIS database + if it is likely to be present) + + 5. General recommendations, usage conventions, etc.: + - When the content of the password or group file is + required, get it using the _cat_passwd and _cat_group + functions, to ensure NIS information is included if NIS + is active. + - When calling getfacl in a test, pass the "-n" argument so + that numeric rather than symbolic identifiers are used in + the output. + - When creating a new test, it is possible to enter a custom name + for the file. Filenames are in form NNN-custom-name, where NNN + is automatically added by the ./new script as an unique ID, + and "custom-name" is the optional string entered into a prompt + in the ./new script. It can contain only alphanumeric characters + and dash. Note the "NNN-" part is added automatically. + + 6. Test group membership: Each test can be associated with any number + of groups for convenient selection of subsets of tests. Group names + must be human readable using only characters in the set [:alnum:_-]. + + Test authors associate a test with groups by passing the names of those + groups as arguments to the _begin_fstest function. While _begin_fstests + is a shell function that must be called at the start of a test to + initialise the test environment correctly, the the build infrastructure + also scans the test files for _begin_fstests invocations. It does this + to compile the group lists that are used to determine which tests to run + when `check` is executed. In other words, test files files must call + _begin_fstest with their intended groups or they will not be run. + + However, because the build infrastructure also uses _begin_fstests as + a defined keyword, addition restrictions are placed on how it must be + formatted: + + (a) It must be a single line with no multi-line continuations. + + (b) group names should be separated by spaces and not other whitespace + + (c) A '#' placed anywhere in the list, even in the middle of a group + name, will cause everything from the # to the end of the line to be + ignored. + + For example, the code: + + _begin_fstest auto quick subvol snapshot # metadata + + associates the current test with the "auto", "quick", "subvol", and + "snapshot" groups. Because "metadata" is after the "#" comment + delimiter, it is ignored by the build infrastructure and so it will not + be associated with that group. + + It is not necessary to specify the "all" group in the list because that + group is always computed at run time from the group lists. + + +Verified output: + + Each test script has a name, e.g. 007, and an associated + verified output, e.g. 007.out. + + It is important that the verified output is deterministic, and + part of the job of the test script is to filter the output to + make this so. Examples of the sort of things that need filtering: + + - dates + - pids + - hostnames + - filesystem names + - timezones + - variable directory contents + - imprecise numbers, especially sizes and times + +Pass/failure: + + The script "check" may be used to run one or more tests. + + Test number $seq is deemed to "pass" when: + (a) no "core" file is created, + (b) the file $seq.notrun is not created, + (c) the exit status is 0, and + (d) the output matches the verified output. + + In the "not run" case (b), the $seq.notrun file should contain a + short one-line summary of why the test was not run. The standard + output is not checked, so this can be used for a more verbose + explanation and to provide feedback when the QA test is run + interactively. + + + To force a non-zero exit status use: + status=1 + exit + + Note that: + exit 1 + won't have the desired effect because of the way the exit trap + works. + + The recent pass/fail history is maintained in the file "check.log". + The elapsed time for the most recent pass for each test is kept + in "check.time". + + The compare-failures script in tools/ may be used to compare failures + across multiple runs, given files containing stdout from those runs. + +__________________ +SUBMITTING PATCHES +__________________ + +Send patches to the fstests mailing list at fstests@vger.kernel.org. diff --git a/README.config-sections b/README.config-sections new file mode 100644 index 00000000..4f1a4dc6 --- /dev/null +++ b/README.config-sections @@ -0,0 +1,130 @@ +Configuration file with sections +================================ + +Configuration file with sections is useful for running xfstests on multiple +file systems, or multiple file system setups in a single run without any +help of external scripts. + + +Syntax +------ + +Syntax for defining a section is the following: + + [section_name] + +Section name should consist of alphanumeric characters and '_'. Anything +else is forbidden and the section will not be recognised. + +Each section in the configuration file should contain options in the format + + OPTION=value + +'OPTION' must not contain any white space characters. 'value' can contain +any character you want with one simple limitation - characters ' and " can +only appear at the start and end of the 'value', however it is not required. + +Note that options are carried between sections so the same options does not +have to be specified in each and every sections. However caution should be +exercised not to leave unwanted options set from previous sections. + + +Results +------- + +For every section xfstests will run with specified options and will produce +separate results in the '$RESULT_BASE/$section_name' directory. + + +Different mount options +----------------------- + +Specifying different mount options in difference config sections is allowed. +When TEST_FS_MOUNT_OPTS differs in the following section TEST_DEV will be +remounted with new TEST_FS_MOUNT_OPTS automatically before running the test. + + +Multiple file systems +--------------------- + +Having different file systems in different config sections is allowed. When +FSTYP differs in the following section the FSTYP file system will be created +automatically before running the test. + +Note that if TEST_FS_MOUNT_OPTS, MOUNT_OPTIONS, MKFS_OPTIONS, or FSCK_OPTIONS +are not directly specified in the section it will be reset to the default for a +given file system. + +You can also force the file system recreation by specifying RECREATE_TEST_DEV. + +Run specified section only +-------------------------- + +Specifying '-s' argument with section name will run only the section +specified. The '-s' argument can be specified multiple times to allow multiple +sections to be run. + +The options are still carried between section, that includes the sections +which are not going to be run. So you can do something like + +[ext4] +TEST_DEV=/dev/sda1 +TEST_DIR=/mnt/test +SCRATCH_DEV=/dev/sdb1 +SCRATCH_MNT=/mnt/test1 +FSTYP=ext4 + +[xfs] +FSTYP=xfs + +[btrfs] +FSTYP=btrfs + + +and run + +./check -s xfs -s btrfs + +to check xfs and btrfs only. All the devices and mounts are still going to +be parsed from the section [ext4]. + +Example +------- + +Here is an example of config file with sections: + +[ext4_4k_block_size] +TEST_DEV=/dev/sda +TEST_DIR=/mnt/test +SCRATCH_DEV=/dev/sdb +SCRATCH_MNT=/mnt/test1 +MKFS_OPTIONS="-q -F -b4096" +FSTYP=ext4 +RESULT_BASE="`pwd`/results/`date +%d%m%y_%H%M%S`" + +[ext4_1k_block_size] +MKFS_OPTIONS="-q -F -b1024" + +[ext4_nojournal] +MKFS_OPTIONS="-q -F -b4096 -O ^has_journal" + +[xfs_filesystem] +MKFS_OPTIONS="-f" +FSTYP=xfs + +[ext3_filesystem] +FSTYP=ext3 +MOUNT_OPTIONS="-o noatime" + +[cephfs] +TEST_DIR=/mnt/test +TEST_DEV=192.168.14.1:6789:/ +TEST_FS_MOUNT_OPTS="-o name=admin,secret=AQDuEBtYKEYRINGSECRETriSC8YJGDZsQHcr7g==" +FSTYP="ceph" + +[glusterfs] +FSTYP=glusterfs +TEST_DIR=/mnt/gluster/test +TEST_DEV=192.168.1.1:testvol +SCRATCH_MNT=/mnt/gluster/scratch +SCRATCH_DEV=192.168.1.1:scratchvol diff --git a/README.device-mapper b/README.device-mapper new file mode 100644 index 00000000..4ff68121 --- /dev/null +++ b/README.device-mapper @@ -0,0 +1,8 @@ + +To use xfstests on device mapper always use the /dev/mapper/ symlinks, +not the /dev/dm-* devices, or the symlinks created by LVM. + +For example: + +TEST_DEV=/dev/mapper/test +SCRATCH_DEV=/dev/mapper/scratch diff --git a/README.overlay b/README.overlay new file mode 100644 index 00000000..ec4671c3 --- /dev/null +++ b/README.overlay @@ -0,0 +1,71 @@ + +To run xfstest on overlayfs, configure the variables of TEST and SCRATCH +partitions to be used as the "base fs" and run './check -overlay'. + +For example, the following config file can be used to run tests on +xfs test/scratch partitions: + + TEST_DEV=/dev/sda5 + TEST_DIR=/mnt/test + SCRATCH_DEV=/dev/sda6 + SCRATCH_MNT=/mnt/scratch + FSTYP=xfs + +Using the same config file, but executing './check -overlay' will +use the same partitions as base fs for overlayfs directories +and set TEST_DIR/SCRATCH_MNT values to overlay mount points, i.e.: +/mnt/test/ovl-mnt and /mnt/scratch/ovl-mnt, for the context of +individual tests. + +'./check -overlay' does not support mkfs and fsck on the base fs, so +the base fs should be pre-formatted before starting the -overlay run. +An easy way to accomplish this is by running './check ' once, +before running './check -overlay'. + +'./check -overlay' support check overlay test and scratch dirs, +OVERLAY_FSCK_OPTIONS should be set instead of FSCK_OPTIONS if fsck +options need to given directly. + +Because of the lack of mkfs support, multi-section config files are only +partly supported with './check -overlay'. Only multi-section files that +do not change FSTYP and MKFS_OPTIONS can be safely used with -overlay. + +For example, the following multi-section config file can be used to +run overlay tests on the same base fs, but with different mount options, and on +top of idmapped mounts: + + [xfs] + TEST_DEV=/dev/sda5 + TEST_DIR=/mnt/test + SCRATCH_DEV=/dev/sda6 + SCRATCH_MNT=/mnt/scratch + FSTYP=xfs + + [xfs_pquota] + MOUNT_OPTIONS="-o pquota" + TEST_FS_MOUNT_OPTS="-o noatime" + OVERLAY_MOUNT_OPTIONS="-o redirect_dir=off" + OVERLAY_FSCK_OPTIONS="-n -o redirect_dir=off" + + [idmapped] + IDMAPPED_MOUNTS=true + +In the example above, MOUNT_OPTIONS will be used to mount the base scratch fs, +TEST_FS_MOUNT_OPTS will be used to mount the base test fs, +OVERLAY_MOUNT_OPTIONS will be used to mount both test and scratch overlay and +OVERLAY_FSCK_OPTIONS will be used to check both test and scratch overlay. + + +Unionmount Testsuite +==================== + +xfstests can be used as a test harness to run unionmount testsuite test cases +and provide extended test coverage for overlayfs. + +To enable running unionmount testsuite, clone the git repository from: + https://github.com/amir73il/unionmount-testsuite.git +under the xfstests src directory, or set the environment variable +UNIONMOUNT_TESTSUITE to the local path where the repository was cloned. + +Run './check -overlay -g overlay/union' to execute all the unionmount testsuite +test cases. diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..7294a002 --- /dev/null +++ b/VERSION @@ -0,0 +1,7 @@ +# +# This file is used by configure to get version information +# +PKG_MAJOR=1 +PKG_MINOR=1 +PKG_REVISION=1 +PKG_BUILD=1 diff --git a/acinclude.m4 b/acinclude.m4 new file mode 100644 index 00000000..fd92f0d5 --- /dev/null +++ b/acinclude.m4 @@ -0,0 +1,39 @@ +dnl Copyright (C) 2016 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +AC_DEFUN([AC_PACKAGE_WANT_LINUX_FIEMAP_H], + [ AC_CHECK_HEADERS([linux/fiemap.h], [ have_fiemap=true ], [ have_fiemap=false ]) + AC_SUBST(have_fiemap) + ]) + +AC_DEFUN([AC_PACKAGE_WANT_LINUX_PRCTL_H], + [ AC_CHECK_HEADERS([sys/prctl.h], [ have_prctl=true ], [ have_prctl=false ]) + AC_SUBST(have_prctl) + ]) + +AC_DEFUN([AC_PACKAGE_WANT_LINUX_FS_H], + [ AC_CHECK_HEADER([linux/fs.h]) + ]) + +AC_DEFUN([AC_PACKAGE_WANT_FALLOCATE], + [ AC_MSG_CHECKING([for fallocate]) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ +#define _GNU_SOURCE +#define _FILE_OFFSET_BITS 64 +#include +#include ]], [[ fallocate(0, 0, 0, 0); ]])],[ have_fallocate=true; AC_MSG_RESULT(yes) ],[ have_fallocate=false; AC_MSG_RESULT(no) ]) + AC_SUBST(have_fallocate) + ]) + +AC_DEFUN([AC_PACKAGE_WANT_OPEN_BY_HANDLE_AT], + [ AC_MSG_CHECKING([for open_by_handle_at]) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ +#define _GNU_SOURCE +#include + ]], [[ + struct file_handle fh; + open_by_handle_at(0, &fh, 0); + ]])],[ have_open_by_handle_at=true; AC_MSG_RESULT(yes) ],[ have_open_by_handle_at=false; AC_MSG_RESULT(no) ]) + AC_SUBST(have_open_by_handle_at) + ]) diff --git a/build/Makefile b/build/Makefile new file mode 100644 index 00000000..ec04bed8 --- /dev/null +++ b/build/Makefile @@ -0,0 +1,49 @@ +# +# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. +# + +TOPDIR = .. +include $(TOPDIR)/include/builddefs + +MANIFEST=src-manifest +SRCTAR=$(PKG_NAME)-$(PKG_VERSION).src.tar.gz + +LDIRT = *-manifest *.gz $(TOPDIR)/$(PKG_NAME)-* + +# for clean and clobber +SUBDIRS = tar rpm + +# nothing to build here (it's all packaging) +default install install-dev install-lib: + +include $(BUILDRULES) + +# Symlink in the TOPDIR is used to pack files relative to +# product-version directory. +$(MANIFEST) : $(_FORCE) + @if [ ! -L $(TOPDIR)/$(PKG_NAME)-$(PKG_VERSION) ] ; then \ + $(LN_S) . $(TOPDIR)/$(PKG_NAME)-$(PKG_VERSION) ; \ + fi + @CDIR=`pwd`; cd $(TOPDIR); \ + $(MAKE) --no-print-directory source | \ + sed -e 's/^\./$(PKG_NAME)-$(PKG_VERSION)/' > $$CDIR/$@ ;\ + if [ $$? -ne 0 ] ; then \ + exit 1; \ + else \ + unset TAPE; \ + $(TAR) -T $$CDIR/$@ -cf - | $(ZIP) --best > $$CDIR/$(SRCTAR); \ + echo Wrote: $$CDIR/$(SRCTAR); \ + fi + +dist : default $(MANIFEST) + @DIST_MANIFEST=`pwd`/bin-manifest; DIST_ROOT=/tmp/$$$$; \ + export DIST_MANIFEST DIST_ROOT; \ + rm -f $$DIST_MANIFEST; \ + echo === install === && $(MAKE) -C $(TOPDIR) install || exit $$?; \ + if [ -x $(TAR) ]; then \ + ( echo "=== tar ===" && $(MAKEF) -C tar $@ || exit $$? ); \ + fi; \ + if [ -x $(RPMBUILD) ]; then \ + ( echo "=== rpm ===" && $(MAKEF) -C rpm $@ || exit $$? ); \ + fi; \ + test -z "$$KEEP_DIST_ROOT" || rm -rf $$DIST_ROOT; echo Done diff --git a/build/rpm/Makefile b/build/rpm/Makefile new file mode 100644 index 00000000..8bae7067 --- /dev/null +++ b/build/rpm/Makefile @@ -0,0 +1,62 @@ +# +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. +# + +TOPDIR = ../.. +TREEROOT = $(shell cd ${TOPDIR}; pwd) +include $(TOPDIR)/include/builddefs + +SPECF = $(PKG_NAME).spec +LDIRT = *.rpm $(SPECF) rpmmacros rpmfiles* rpm-*.rc + +LSRCFILES = macros.template $(SPECF).in rpm-2.rc.template + +default install install-dev install-lib: + +include $(BUILDRULES) + +# Generate a binary rpm file +dist : default $(SPECF) rpm-$(RPM_VERSION).rc + $(RPMBUILD) -ba --rcfile ./rpm-$(RPM_VERSION).rc $(SPECF) + +# Because rpm prior to v.2.90 does not support macros and old style config +# is not supported by rpm v.3, we have to resort to such ugly hacks +ifneq ($(RPM_VERSION),2) +rpm-$(RPM_VERSION).rc : rpmmacros + @$(SED) -e '/^macrofiles:/s|~/.rpmmacros|rpmmacros|' $@ + +rpmmacros : macros.template + @$(SED) -e 's|%topdir%|$(TREEROOT)|g' < $< > $@ +else +rpm-2.rc: rpm-2.rc.template + @$(SED) -e 's|%topdir%|$(TOPDIR)|g' < $< > $@ +endif + +# Generate the rpm specfile format file list from the install-sh manifest +rpmfiles rpmfiles-dev rpmfiles-lib: + $(SORT) -u $$DIST_MANIFEST | $(AWK) > $@ '\ +$$1 == "d" { printf ("%%%%dir %%%%attr(%s,%s,%s) %s\n", $$2, $$3, $$4, $$5); } \ +$$1 == "f" { if (match ($$6, "$(PKG_MAN_DIR)") || \ + match ($$6, "$(PKG_DOC_DIR)")) \ + printf ("%%%%doc "); \ + if (match ($$6, "$(PKG_MAN_DIR)")) \ + printf ("%%%%attr(%s,%s,%s) %s*\n", $$2, $$3, $$4, $$6); \ + else \ + printf ("%%%%attr(%s,%s,%s) %s\n", $$2, $$3, $$4, $$6); } \ +$$1 == "l" { if (match ($$3, "$(PKG_MAN_DIR)") || \ + match ($$3, "$(PKG_DOC_DIR)")) \ + printf ("%%%%doc "); \ + if (match ($$3, "$(PKG_MAN_DIR)")) \ + printf ("%%%%attr(0777,root,root) %s*\n", $$3); \ + else \ + printf ("%%%%attr(0777,root,root) %s\n", $$3); }' + +.PHONY: $(SPECF) +${SPECF} : ${SPECF}.in + $(SED) -e's|@pkg_name@|$(PKG_NAME)|g' \ + -e's|@pkg_version@|$(PKG_VERSION)|g' \ + -e's|@pkg_release@|$(PKG_RELEASE)|g' \ + -e's|@pkg_distribution@|$(PKG_DISTRIBUTION)|g' \ + -e's|@build_root@|$(DIST_ROOT)|g' \ + -e'/^BuildRoot: *$$/d' \ + -e's|@make@|$(MAKE)|g' < $< > $@ diff --git a/build/rpm/macros.template b/build/rpm/macros.template new file mode 100644 index 00000000..200ba39e --- /dev/null +++ b/build/rpm/macros.template @@ -0,0 +1,30 @@ +# +# rpmrc.template +# +# Template to fudge rpm directory structure inside IRIX-like build +# environment + +# Force 386 build on all platforms +%_target i386-pc-linux +%_target_cpu i386 +%_target_os linux + +# topdir == $(WORKAREA) +%_topdir %topdir% + +# Following directories are specific to the topdir +# This is where build is done. In our case it's the same as $WORKAREA +%_builddir %topdir% + +# This is where foo.1.99.tar.gz is living in the real world. +# Be careful not to run full rpm build as it will override the sources +%_sourcedir %topdir%/build + +# This is where binary RPM and source RPM would end up +%_rpmdir %topdir%/build/rpm +%_srcrpmdir %topdir%/build/rpm +%_specdir %topdir%/build/rpm + +# Leave RPM files in the same directory - we're not building for +# multiple architectures +%_rpmfilename %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm diff --git a/build/rpm/xfstests.spec.in b/build/rpm/xfstests.spec.in new file mode 100644 index 00000000..e0f7c5f9 --- /dev/null +++ b/build/rpm/xfstests.spec.in @@ -0,0 +1,49 @@ +Summary: XFS regression test suite +Name: @pkg_name@ +Version: @pkg_version@ +Release: @pkg_release@ +Distribution: @pkg_distribution@ +Packager: Silicon Graphics, Inc. +BuildRoot: @build_root@ +BuildRequires: autoconf, xfsprogs-devel, e2fsprogs-devel +BuildREquires: libacl-devel, libaio-devel +Requires: bash, xfsprogs, xfsdump, perl, acl, attr, bind-utils +Requires: bc, indent, quota +Source: @pkg_name@-@pkg_version@.src.tar.gz +License: GPL2+ +Vendor: Silicon Graphics, Inc. +URL: http://oss.sgi.com/projects/xfs/ +Group: System Environment/Base + +%description +The XFS regression test suite. Also includes some support for +acl, attr, udf, and nfs testing. Contains around 200 specific tests +for userspace & kernelspace. + +%prep +if [ -f .census ] ; then + if [ ! -d ${RPM_PACKAGE_NAME}-${RPM_PACKAGE_VERSION} ] ; then + ln -s . ${RPM_PACKAGE_NAME}-${RPM_PACKAGE_VERSION} + fi +else +%setup +INSTALL_USER=root +INSTALL_GROUP=root +export INSTALL_USER INSTALL_GROUP +@make@ configure +fi + +%build +@make@ + +%install +DIST_ROOT="$RPM_BUILD_ROOT" +DIST_INSTALL=`pwd`/install.manifest +export DIST_ROOT DIST_INSTALL +@make@ install DIST_MANIFEST="$DIST_INSTALL" +@make@ -C build/rpm rpmfiles DIST_MANIFEST="$DIST_INSTALL" + +%clean +rm -rf $RPM_BUILD_ROOT + +%files -f build/rpm/rpmfiles diff --git a/build/tar/Makefile b/build/tar/Makefile new file mode 100644 index 00000000..94a3adaa --- /dev/null +++ b/build/tar/Makefile @@ -0,0 +1,22 @@ +# +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. +# + +TOPDIR = ../.. +include $(TOPDIR)/include/builddefs + +BINTAR=$(PKG_NAME)-$(PKG_VERSION).tar.gz +LDIRT = *.gz + +default install install-dev install-lib: + +include $(BUILDRULES) + +dist : default + @HERE=`pwd`; cd $${DIST_ROOT:-/}; \ + $(SORT) -u $$HERE/../bin-manifest | $(AWK) ' \ + $$1 == "f" { printf (".%s\n", $$6); } \ + $$1 == "d" { next; } \ + $$1 == "l" { printf (".%s\n", $$3); }' \ + | $(TAR) -T - -cf - | $(ZIP) --best > $$HERE/$(BINTAR) + @echo Wrote: `pwd`/$(BINTAR) diff --git a/check b/check new file mode 100755 index 00000000..af23572c --- /dev/null +++ b/check @@ -0,0 +1,1037 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved. +# +# Control script for QA +# +tmp=/tmp/$$ +status=0 +needwrap=true +needsum=true +try=() +sum_bad=0 +bad=() +notrun=() +interrupt=true +diff="diff -u" +showme=false +have_test_arg=false +randomize=false +exact_order=false +export here=`pwd` +xfile="" +subdir_xfile="" +brief_test_summary=false +do_report=false +DUMP_OUTPUT=false +iterations=1 +istop=false +loop_on_fail=0 + +# This is a global variable used to pass test failure text to reporting gunk +_err_msg="" + +# start the initialisation work now +iam=check + +# mkfs.xfs uses the presence of both of these variables to enable formerly +# supported tiny filesystem configurations that fstests use for fuzz testing +# in a controlled environment +export MSGVERB="text:action" +export QA_CHECK_FS=${QA_CHECK_FS:=true} + +# number of diff lines from a failed test, 0 for whole output +export DIFF_LENGTH=${DIFF_LENGTH:=10} + +# by default don't output timestamps +timestamp=${TIMESTAMP:=false} + +rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist $tmp.report.* + +SRC_GROUPS="generic shared" +export SRC_DIR="tests" + +usage() +{ + echo "Usage: $0 [options] [testlist]"' + +check options + -nfs test NFS + -glusterfs test GlusterFS + -cifs test CIFS + -9p test 9p + -virtiofs test virtiofs + -overlay test overlay + -pvfs2 test PVFS2 + -tmpfs test TMPFS + -ubifs test ubifs + -l line mode diff + -udiff show unified diff (default) + -n show me, do not run tests + -T output timestamps + -r randomize test order + --exact-order run tests in the exact order specified + -i iterate the test list times + -I iterate the test list times, but stops iterating further in case of any test failure + -d dump test output to stdout + -b brief test summary + -R fmt[,fmt] generate report in formats specified. Supported formats: xunit, xunit-quiet + --large-fs optimise scratch device for large filesystems + -s section run only specified section from config file + -S section exclude the specified section from the config file + -L loop tests times following a failure, measuring aggregate pass/fail metrics + +testlist options + -g group[,group...] include tests from these groups + -x group[,group...] exclude tests from these groups + -X exclude_file exclude individual tests + -e testlist exclude a specific list of tests + -E external_file exclude individual tests + [testlist] include tests matching names in testlist + +testlist argument is a list of tests in the form of /. + + is a directory under tests that contains a group file, +with a list of the names of the tests in that directory. + + may be either a specific test file name (e.g. xfs/001) or +a test file name match pattern (e.g. xfs/*). + +group argument is either a name of a tests group to collect from all +the test dirs (e.g. quick) or a name of a tests group to collect from +a specific tests dir in the form of / (e.g. xfs/quick). +If you want to run all the tests in the test suite, use "-g all" to specify all +groups. + +exclude_file argument refers to a name of a file inside each test directory. +for every test dir where this file is found, the listed test names are +excluded from the list of tests to run from that test dir. + +external_file argument is a path to a single file containing a list of tests +to exclude in the form of /. + +examples: + check xfs/001 + check -g quick + check -g xfs/quick + check -x stress xfs/* + check -X .exclude -g auto + check -E ~/.xfstests.exclude +' + exit 1 +} + +get_sub_group_list() +{ + local d=$1 + local grp=$2 + + test -s "$SRC_DIR/$d/group.list" || return 1 + + local grpl=$(sed -n < $SRC_DIR/$d/group.list \ + -e 's/#.*//' \ + -e 's/$/ /' \ + -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p") + echo $grpl +} + +get_group_list() +{ + local grp=$1 + local grpl="" + local sub=$(dirname $grp) + local fsgroup="$FSTYP" + + if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then + # group is given as / (e.g. xfs/quick) + grp=$(basename $grp) + get_sub_group_list $sub $grp + return + fi + + if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then + fsgroup=ext4 + fi + for d in $SRC_GROUPS $fsgroup; do + if ! test -d "$SRC_DIR/$d" ; then + continue + fi + grpl="$grpl $(get_sub_group_list $d $grp)" + done + echo $grpl +} + +# Find all tests, excluding files that are test metadata such as group files. +# It matches test names against $VALID_TEST_NAME defined in common/rc +get_all_tests() +{ + touch $tmp.list + for d in $SRC_GROUPS $FSTYP; do + if ! test -d "$SRC_DIR/$d" ; then + continue + fi + ls $SRC_DIR/$d/* | \ + grep -v "\..*" | \ + grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \ + grep -v "group\|Makefile" >> $tmp.list 2>/dev/null + done +} + +# takes the list of tests to run in $tmp.list, and removes the tests passed to +# the function from that list. +trim_test_list() +{ + local test_list="$*" + + rm -f $tmp.grep + local numsed=0 + for t in $test_list + do + if [ $numsed -gt 100 ]; then + grep -v -f $tmp.grep <$tmp.list >$tmp.tmp + mv $tmp.tmp $tmp.list + numsed=0 + rm -f $tmp.grep + fi + echo "^$t\$" >>$tmp.grep + numsed=`expr $numsed + 1` + done + grep -v -f $tmp.grep <$tmp.list >$tmp.tmp + mv $tmp.tmp $tmp.list + rm -f $tmp.grep +} + + +_wallclock() +{ + date "+%s" +} + +_timestamp() +{ + local now=`date "+%T"` + echo -n " [$now]" +} + +_prepare_test_list() +{ + unset list + # Tests specified on the command line + if [ -s $tmp.arglist ]; then + cat $tmp.arglist > $tmp.list + else + touch $tmp.list + fi + + # Specified groups to include + # Note that the CLI processing adds a leading space to the first group + # parameter, so we have to catch that here checking for "all" + if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then + # no test numbers, do everything + get_all_tests + else + for group in $GROUP_LIST; do + list=$(get_group_list $group) + if [ -z "$list" ]; then + echo "Group \"$group\" is empty or not defined?" + exit 1 + fi + + for t in $list; do + grep -s "^$t\$" $tmp.list >/dev/null || \ + echo "$t" >>$tmp.list + done + done + fi + + # Specified groups to exclude + for xgroup in $XGROUP_LIST; do + list=$(get_group_list $xgroup) + if [ -z "$list" ]; then + echo "Group \"$xgroup\" is empty or not defined?" + continue + fi + + trim_test_list $list + done + + # sort the list of tests into numeric order unless we're running tests + # in the exact order specified + if ! $exact_order; then + if $randomize; then + if type shuf >& /dev/null; then + sorter="shuf" + else + sorter="awk -v seed=$RANDOM -f randomize.awk" + fi + else + sorter="cat" + fi + list=`sort -n $tmp.list | uniq | $sorter` + else + list=`cat $tmp.list` + fi + rm -f $tmp.list +} + +# Process command arguments first. +while [ $# -gt 0 ]; do + case "$1" in + -\? | -h | --help) usage ;; + + -nfs|-glusterfs|-cifs|-9p|-virtiofs|-pvfs2|-tmpfs|-ubifs) + FSTYP="${1:1}" + ;; + -overlay) + FSTYP=overlay + export OVERLAY=true + ;; + + -g) group=$2 ; shift ; + GROUP_LIST="$GROUP_LIST ${group//,/ }" + ;; + + -x) xgroup=$2 ; shift ; + XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }" + ;; + + -X) subdir_xfile=$2; shift ; + ;; + -e) + xfile=$2; shift ; + echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist + ;; + + -E) xfile=$2; shift ; + if [ -f $xfile ]; then + sed "s/#.*$//" "$xfile" >> $tmp.xlist + fi + ;; + -s) RUN_SECTION="$RUN_SECTION $2"; shift ;; + -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;; + -l) diff="diff" ;; + -udiff) diff="$diff -u" ;; + + -n) showme=true ;; + -r) + if $exact_order; then + echo "Cannot specify -r and --exact-order." + exit 1 + fi + randomize=true + ;; + --exact-order) + if $randomize; then + echo "Cannnot specify --exact-order and -r." + exit 1 + fi + exact_order=true + ;; + -i) iterations=$2; shift ;; + -I) iterations=$2; istop=true; shift ;; + -T) timestamp=true ;; + -d) DUMP_OUTPUT=true ;; + -b) brief_test_summary=true;; + -R) report_fmt=$2 ; shift ; + REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }" + do_report=true + ;; + --large-fs) export LARGE_SCRATCH_DEV=yes ;; + --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;; + -L) [[ $2 =~ ^[0-9]+$ ]] || usage + loop_on_fail=$2; shift + ;; + + -*) usage ;; + *) # not an argument, we've got tests now. + have_test_arg=true ;; + esac + + # if we've found a test specification, the break out of the processing + # loop before we shift the arguments so that this is the first argument + # that we process in the test arg loop below. + if $have_test_arg; then + break; + fi + + shift +done + +# we need common/rc, that also sources common/config. We need to source it +# after processing args, overlay needs FSTYP set before sourcing common/config +if ! . ./common/rc; then + echo "check: failed to source common/rc" + exit 1 +fi + +if [ -n "$subdir_xfile" ]; then + for d in $SRC_GROUPS $FSTYP; do + [ -f $SRC_DIR/$d/$subdir_xfile ] || continue + for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do + echo $d/$f >> $tmp.xlist + done + done +fi + +# Process tests from command line now. +if $have_test_arg; then + while [ $# -gt 0 ]; do + case "$1" in + -*) echo "Arguments before tests, please!" + status=1 + exit $status + ;; + *) # Expand test pattern (e.g. xfs/???, *fs/001) + list=$(cd $SRC_DIR; echo $1) + for t in $list; do + test_dir=`dirname $t` + test_dir=${test_dir#$SRC_DIR/*} + test_name=`basename $t` + group_file=$SRC_DIR/$test_dir/group.list + + if grep -Eq "^$test_name" $group_file; then + # in group file ... OK + echo $SRC_DIR/$test_dir/$test_name \ + >>$tmp.arglist + else + # oops + echo "$t - unknown test, ignored" + fi + done + ;; + esac + + shift + done +elif [ -z "$GROUP_LIST" ]; then + # default group list is the auto group. If any other group or test is + # specified, we use that instead. + GROUP_LIST="auto" +fi + +if [ `id -u` -ne 0 ] +then + echo "check: QA must be run as root" + exit 1 +fi + +_wipe_counters() +{ + try=() + notrun=() + bad=() +} + +_global_log() { + echo "$1" >> $check.log + if $OPTIONS_HAVE_SECTIONS; then + echo "$1" >> ${REPORT_DIR}/check.log + fi +} + +_wrapup() +{ + seq="check" + check="$RESULT_BASE/check" + + if $showme && $needwrap; then + if $do_report; then + # $showme = all selected tests are notrun (no tries) + _make_section_report "$section" "${#notrun[*]}" "0" \ + "${#notrun[*]}" \ + "$((sect_stop - sect_start))" + fi + needwrap=false + elif $needwrap; then + if [ -f $check.time -a -f $tmp.time ]; then + cat $check.time $tmp.time \ + | $AWK_PROG ' + { t[$1] = $2 } + END { + if (NR > 0) { + for (i in t) print i " " t[i] + } + }' \ + | sort -n >$tmp.out + mv $tmp.out $check.time + if $OPTIONS_HAVE_SECTIONS; then + cp $check.time ${REPORT_DIR}/check.time + fi + fi + + _global_log "" + _global_log "$(date)" + + echo "SECTION -- $section" >>$tmp.summary + echo "=========================" >>$tmp.summary + if ((${#try[*]} > 0)); then + if [ $brief_test_summary == "false" ]; then + echo "Ran: ${try[*]}" + echo "Ran: ${try[*]}" >>$tmp.summary + fi + _global_log "Ran: ${try[*]}" + fi + + $interrupt && echo "Interrupted!" | tee -a $check.log + if $OPTIONS_HAVE_SECTIONS; then + $interrupt && echo "Interrupted!" | tee -a \ + ${REPORT_DIR}/check.log + fi + + if ((${#notrun[*]} > 0)); then + if [ $brief_test_summary == "false" ]; then + echo "Not run: ${notrun[*]}" + echo "Not run: ${notrun[*]}" >>$tmp.summary + fi + _global_log "Not run: ${notrun[*]}" + fi + + if ((${#bad[*]} > 0)); then + echo "Failures: ${bad[*]}" + echo "Failed ${#bad[*]} of ${#try[*]} tests" + _global_log "Failures: ${bad[*]}" + _global_log "Failed ${#bad[*]} of ${#try[*]} tests" + echo "Failures: ${bad[*]}" >>$tmp.summary + echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary + else + echo "Passed all ${#try[*]} tests" + _global_log "Passed all ${#try[*]} tests" + echo "Passed all ${#try[*]} tests" >>$tmp.summary + fi + echo "" >>$tmp.summary + if $do_report; then + _make_section_report "$section" "${#try[*]}" \ + "${#bad[*]}" "${#notrun[*]}" \ + "$((sect_stop - sect_start))" + fi + needwrap=false + fi + + sum_bad=`expr $sum_bad + ${#bad[*]}` + _wipe_counters + rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time + if ! $OPTIONS_HAVE_SECTIONS; then + rm -f $tmp.* + fi +} + +_summary() +{ + _wrapup + if $showme; then + : + elif $needsum; then + count=`wc -L $tmp.summary | cut -f1 -d" "` + cat $tmp.summary + needsum=false + fi + rm -f $tmp.* +} + +_check_filesystems() +{ + local ret=0 + + if [ -f ${RESULT_DIR}/require_test ]; then + _check_test_fs || ret=1 + rm -f ${RESULT_DIR}/require_test* + else + _test_unmount 2> /dev/null + fi + if [ -f ${RESULT_DIR}/require_scratch ]; then + _check_scratch_fs || ret=1 + rm -f ${RESULT_DIR}/require_scratch* + fi + _scratch_unmount 2> /dev/null + return $ret +} + +_expunge_test() +{ + local TEST_ID="$1" + if [ -s $tmp.xlist ]; then + if grep -q $TEST_ID $tmp.xlist; then + echo " [expunged]" + return 1 + fi + fi + return 0 +} + +# retain files which would be overwritten in subsequent reruns of the same test +_stash_fail_loop_files() { + local seq_prefix="${REPORT_DIR}/${1}" + local cp_suffix="$2" + + for i in ".full" ".dmesg" ".out.bad" ".notrun" ".core" ".hints"; do + rm -f "${seq_prefix}${i}${cp_suffix}" + if [ -f "${seq_prefix}${i}" ]; then + cp "${seq_prefix}${i}" "${seq_prefix}${i}${cp_suffix}" + fi + done +} + +# Retain in @bad / @notrun the result of the just-run @test_seq. @try array +# entries are added prior to execution. +_stash_test_status() { + local test_seq="$1" + local test_status="$2" + + if $do_report && [[ $test_status != "expunge" ]]; then + _make_testcase_report "$section" "$test_seq" \ + "$test_status" "$((stop - start))" + fi + + if ((${#loop_status[*]} > 0)); then + # continuing or completing rerun-on-failure loop + _stash_fail_loop_files "$test_seq" ".rerun${#loop_status[*]}" + loop_status+=("$test_status") + if ((${#loop_status[*]} > loop_on_fail)); then + printf "%s aggregate results across %d runs: " \ + "$test_seq" "${#loop_status[*]}" + awk "BEGIN { + n=split(\"${loop_status[*]}\", arr);"' + for (i = 1; i <= n; i++) + stats[arr[i]]++; + for (x in stats) + printf("%s=%d (%.1f%%)", + (i-- > n ? x : ", " x), + stats[x], 100 * stats[x] / n); + }' + echo + loop_status=() + fi + return # only stash @bad result for initial failure in loop + fi + + case "$test_status" in + fail) + if ((loop_on_fail > 0)); then + # initial failure, start rerun-on-failure loop + _stash_fail_loop_files "$test_seq" ".rerun0" + loop_status+=("$test_status") + fi + bad+=("$test_seq") + ;; + list|notrun) + notrun+=("$test_seq") + ;; + pass|expunge) + ;; + *) + echo "Unexpected test $test_seq status: $test_status" + ;; + esac +} + +# Can we run systemd scopes? +HAVE_SYSTEMD_SCOPES= +systemctl reset-failed "fstests-check" &>/dev/null +systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null +test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes + +# Make the check script unattractive to the OOM killer... +OOM_SCORE_ADJ="/proc/self/oom_score_adj" +function _adjust_oom_score() { + test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}" +} +_adjust_oom_score -500 + +# ...and make the tests themselves somewhat more attractive to it, so that if +# the system runs out of memory it'll be the test that gets killed and not the +# test framework. The test is run in a separate process without any of our +# functions, so we open-code adjusting the OOM score. +# +# If systemd is available, run the entire test script in a scope so that we can +# kill all subprocesses of the test if it fails to clean up after itself. This +# is essential for ensuring that the post-test unmount succeeds. Note that +# systemd doesn't automatically remove transient scopes that fail to terminate +# when systemd tells them to terminate (e.g. programs stuck in D state when +# systemd sends SIGKILL), so we use reset-failed to tear down the scope. +_run_seq() { + local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq") + + if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then + local unit="$(systemd-escape "fs$seq").scope" + systemctl reset-failed "${unit}" &> /dev/null + systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}" + res=$? + systemctl stop "${unit}" &> /dev/null + return "${res}" + else + "${cmd[@]}" + fi +} + +_detect_kmemleak +_prepare_test_list + +if $OPTIONS_HAVE_SECTIONS; then + trap "_summary; exit \$status" 0 1 2 3 15 +else + trap "_wrapup; exit \$status" 0 1 2 3 15 +fi + +function run_section() +{ + local section=$1 skip + + OLD_FSTYP=$FSTYP + OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS + get_next_config $section + + # Do we need to run only some sections ? + if [ ! -z "$RUN_SECTION" ]; then + skip=true + for s in $RUN_SECTION; do + if [ $section == $s ]; then + skip=false + break; + fi + done + if $skip; then + return + fi + fi + + # Did this section get excluded? + if [ ! -z "$EXCLUDE_SECTION" ]; then + skip=false + for s in $EXCLUDE_SECTION; do + if [ $section == $s ]; then + skip=true + break; + fi + done + if $skip; then + return + fi + fi + + mkdir -p $RESULT_BASE + if [ ! -d $RESULT_BASE ]; then + echo "failed to create results directory $RESULT_BASE" + status=1 + exit + fi + + if $OPTIONS_HAVE_SECTIONS; then + echo "SECTION -- $section" + fi + + sect_start=`_wallclock` + if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then + echo "RECREATING -- $FSTYP on $TEST_DEV" + _test_unmount 2> /dev/null + if ! _test_mkfs >$tmp.err 2>&1 + then + echo "our local _test_mkfs routine ..." + cat $tmp.err + echo "check: failed to mkfs \$TEST_DEV using specified options" + status=1 + exit + fi + if ! _test_mount + then + echo "check: failed to mount $TEST_DEV on $TEST_DIR" + status=1 + exit + fi + # TEST_DEV has been recreated, previous FSTYP derived from + # TEST_DEV could be changed, source common/rc again with + # correct FSTYP to get FSTYP specific configs, e.g. common/xfs + . common/rc + _prepare_test_list + elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then + _test_unmount 2> /dev/null + if ! _test_mount + then + echo "check: failed to mount $TEST_DEV on $TEST_DIR" + status=1 + exit + fi + fi + + init_rc + + seq="check" + check="$RESULT_BASE/check" + + # don't leave old full output behind on a clean run + rm -f $check.full + + [ -f $check.time ] || touch $check.time + + # print out our test configuration + echo "FSTYP -- `_full_fstyp_details`" + echo "PLATFORM -- `_full_platform_details`" + if [ ! -z "$SCRATCH_DEV" ]; then + echo "MKFS_OPTIONS -- `_scratch_mkfs_options`" + echo "MOUNT_OPTIONS -- `_scratch_mount_options`" + fi + echo + needwrap=true + + if [ ! -z "$SCRATCH_DEV" ]; then + _scratch_unmount 2> /dev/null + # call the overridden mkfs - make sure the FS is built + # the same as we'll create it later. + + if ! _scratch_mkfs >$tmp.err 2>&1 + then + echo "our local _scratch_mkfs routine ..." + cat $tmp.err + echo "check: failed to mkfs \$SCRATCH_DEV using specified options" + status=1 + exit + fi + + # call the overridden mount - make sure the FS mounts with + # the same options that we'll mount with later. + if ! _try_scratch_mount >$tmp.err 2>&1 + then + echo "our local mount routine ..." + cat $tmp.err + echo "check: failed to mount \$SCRATCH_DEV using specified options" + status=1 + exit + else + _scratch_unmount + fi + fi + + seqres="$check" + _check_test_fs + + loop_status=() # track rerun-on-failure state + local tc_status ix + local -a _list=( $list ) + for ((ix = 0; ix < ${#_list[*]}; !${#loop_status[*]} && ix++)); do + seq="${_list[$ix]}" + + if [ ! -f $seq ]; then + # Try to get full name in case the user supplied only + # seq id and the test has a name. A bit of hassle to + # find really the test and not its sample output or + # helping files. + bname=$(basename $seq) + full_seq=$(find $(dirname $seq) -name $bname* -executable | + awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\ + END { print shortest }') + if [ -f $full_seq ] && \ + [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then + seq=$full_seq + fi + fi + + # the filename for the test and the name output are different. + # we don't include the tests/ directory in the name output. + export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"` + + # Similarly, the result directory needs to replace the tests/ + # part of the test location. + group=`dirname $seq` + if $OPTIONS_HAVE_SECTIONS; then + export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"` + REPORT_DIR="$RESULT_BASE/$section" + else + export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"` + REPORT_DIR="$RESULT_BASE" + fi + seqres="$REPORT_DIR/$seqnum" + + mkdir -p $RESULT_DIR + rm -f ${RESULT_DIR}/require_scratch* + rm -f ${RESULT_DIR}/require_test* + echo -n "$seqnum" + + if $showme; then + _expunge_test $seqnum + if [ $? -eq 1 ]; then + tc_status="expunge" + else + echo + start=0 + stop=0 + tc_status="list" + fi + _stash_test_status "$seqnum" "$tc_status" + continue + fi + + tc_status="pass" + if [ ! -f $seq ]; then + echo " - no such test?" + _stash_test_status "$seqnum" "$tc_status" + continue + fi + + # really going to try and run this one + rm -f $seqres.out.bad $seqres.hints + + # check if we really should run it + _expunge_test $seqnum + if [ $? -eq 1 ]; then + tc_status="expunge" + _stash_test_status "$seqnum" "$tc_status" + continue + fi + + # record that we really tried to run this test. + if ((!${#loop_status[*]})); then + try+=("$seqnum") + fi + + awk 'BEGIN {lasttime=" "} \ + $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \ + END {printf "%s", lasttime}' "$check.time" + rm -f core $seqres.notrun + + start=`_wallclock` + $timestamp && _timestamp + [ ! -x $seq ] && chmod u+x $seq # ensure we can run it + $LOGGER_PROG "run xfstest $seqnum" + if [ -w /dev/kmsg ]; then + export date_time=`date +"%F %T"` + echo "run fstests $seqnum at $date_time" > /dev/kmsg + # _check_dmesg depends on this log in dmesg + touch ${RESULT_DIR}/check_dmesg + rm -f ${RESULT_DIR}/dmesg_filter + fi + _try_wipe_scratch_devs > /dev/null 2>&1 + + # clear the WARN_ONCE state to allow a potential problem + # to be reported for each test + (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1 + + if [ "$DUMP_OUTPUT" = true ]; then + _run_seq 2>&1 | tee $tmp.out + # Because $? would get tee's return code + sts=${PIPESTATUS[0]} + else + _run_seq >$tmp.out 2>&1 + sts=$? + fi + + if [ -f core ]; then + _dump_err_cont "[dumped core]" + mv core $RESULT_BASE/$seqnum.core + tc_status="fail" + fi + + if [ -f $seqres.notrun ]; then + $timestamp && _timestamp + stop=`_wallclock` + $timestamp || echo -n "[not run] " + $timestamp && echo " [not run]" && \ + echo -n " $seqnum -- " + cat $seqres.notrun + tc_status="notrun" + _stash_test_status "$seqnum" "$tc_status" + + # Unmount the scratch fs so that we can wipe the scratch + # dev state prior to the next test run. + _scratch_unmount 2> /dev/null + continue; + fi + + if [ $sts -ne 0 ]; then + _dump_err_cont "[failed, exit status $sts]" + _test_unmount 2> /dev/null + _scratch_unmount 2> /dev/null + rm -f ${RESULT_DIR}/require_test* + rm -f ${RESULT_DIR}/require_scratch* + tc_status="fail" + else + # The test apparently passed, so check for corruption + # and log messages that shouldn't be there. Run the + # checking tools from a subshell with adjusted OOM + # score so that the OOM killer will target them instead + # of the check script itself. + (_adjust_oom_score 250; _check_filesystems) || tc_status="fail" + _check_dmesg || tc_status="fail" + fi + + # Reload the module after each test to check for leaks or + # other problems. + if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then + _test_unmount 2> /dev/null + _scratch_unmount 2> /dev/null + modprobe -r fs-$FSTYP + modprobe fs-$FSTYP + fi + + # Scan for memory leaks after every test so that associating + # a leak to a particular test will be as accurate as possible. + _check_kmemleak || tc_status="fail" + + # test ends after all checks are done. + $timestamp && _timestamp + stop=`_wallclock` + + if [ ! -f $seq.out ]; then + _dump_err "no qualified output" + tc_status="fail" + _stash_test_status "$seqnum" "$tc_status" + continue; + fi + + # coreutils 8.16+ changed quote formats in error messages + # from `foo' to 'foo'. Filter old versions to match the new + # version. + sed -i "s/\`/\'/g" $tmp.out + if diff $seq.out $tmp.out >/dev/null 2>&1 ; then + if [ "$tc_status" != "fail" ]; then + echo "$seqnum `expr $stop - $start`" >>$tmp.time + echo -n " `expr $stop - $start`s" + fi + echo "" + else + _dump_err "- output mismatch (see $seqres.out.bad)" + mv $tmp.out $seqres.out.bad + $diff $seq.out $seqres.out.bad | { + if test "$DIFF_LENGTH" -le 0; then + cat + else + head -n "$DIFF_LENGTH" + echo "..." + echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \ + " to see the entire diff)" + fi; } | sed -e 's/^\(.\)/ \1/' + tc_status="fail" + fi + if [ -f $seqres.hints ]; then + if [ "$tc_status" == "fail" ]; then + echo + cat $seqres.hints + else + rm -f $seqres.hints + fi + fi + _stash_test_status "$seqnum" "$tc_status" + done + + sect_stop=`_wallclock` + interrupt=false + _wrapup + interrupt=true + echo + + _test_unmount 2> /dev/null + _scratch_unmount 2> /dev/null +} + +for ((iters = 0; iters < $iterations; iters++)) do + for section in $HOST_OPTIONS_SECTIONS; do + run_section $section + if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then + interrupt=false + status=`expr $sum_bad != 0` + exit + fi + done +done + +interrupt=false +status=`expr $sum_bad != 0` +exit diff --git a/common/Makefile b/common/Makefile new file mode 100644 index 00000000..5f91e8c3 --- /dev/null +++ b/common/Makefile @@ -0,0 +1,16 @@ +# +# Copyright (c) 2003-2006 Silicon Graphics, Inc. All Rights Reserved. +# + +TOPDIR = .. +include $(TOPDIR)/include/builddefs + +COMMON_DIR = common + +include $(BUILDRULES) + +install: + $(INSTALL) -m 755 -d $(PKG_LIB_DIR)/$(COMMON_DIR) + $(INSTALL) -m 644 * $(PKG_LIB_DIR)/$(COMMON_DIR) + +install-dev install-lib: diff --git a/common/attr b/common/attr new file mode 100644 index 00000000..cce4d1b2 --- /dev/null +++ b/common/attr @@ -0,0 +1,268 @@ +##/bin/bash +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. +# +# common extended attribute and ACL support + +# filesystems that want to test maximum supported acl counts need to +# add support in here +_acl_get_max() +{ + case $FSTYP in + xfs) + # CRC format filesystems have much larger ACL counts. The actual + # number is into the thousands, but testing that meany takes too + # long, so just test well past the old limit of 25. + $XFS_INFO_PROG $TEST_DIR | _filter_mkfs > /dev/null 2> $tmp.info + . $tmp.info + rm $tmp.info + if [ $_fs_has_crcs -eq 0 ]; then + echo 25 + else + echo 5461 + fi + ;; + jfs) + echo 8191 + ;; + f2fs) + # If noinline_xattr is enabled, max xattr size should be: + # (4096 - 24) - (24 + 4) = 4044 + # then ACL_MAX_ENTRIES should be: + # (4044 - (4 + 4 * 4)) / 8 + 4 = 507 + _fs_options $TEST_DEV | grep "noinline_xattr" >/dev/null 2>&1 + if [ $? -eq 0 ]; then + echo 507 + else + # If inline_xattr is enabled, max xattr size should be: + # (4096 - 24 + 200) - (24 + 4) = 4244 + # then ACL_MAX_ENTRIES should be: + # (4244 - (4 + 4 * 4)) / 8 + 4 = 532 + _fs_options $TEST_DEV | grep "inline_xattr" >/dev/null 2>&1 + if [ $? -eq 0 ]; then + echo 532 + else + echo 507 + fi + fi + ;; + bcachefs) + echo 251 + ;; + *) + echo 0 + ;; + esac +} + +_require_acl_get_max() +{ + if [ $(_acl_get_max) -eq 0 ]; then + _notrun "$FSTYP does not define maximum ACL count" + fi +} + +# pick three unused user/group ids, store them as $acl[1-3] +# +_acl_setup_ids() +{ + eval `(_cat_passwd; _cat_group) | awk -F: ' + { ids[$3]=1 } + END { + j=1 + for(i=1; i<1000000 && j<=3;i++){ + if (! (i in ids)) { + printf "acl%d=%d;", j, i; + j++ + } + } + }'` +} + +# filter for the acl ids selected above +# +_acl_filter_id() +{ + sed \ + -e "s/u:$acl1/u:id1/" \ + -e "s/u:$acl2/u:id2/" \ + -e "s/u:$acl3/u:id3/" \ + -e "s/g:$acl1/g:id1/" \ + -e "s/g:$acl2/g:id2/" \ + -e "s/g:$acl3/g:id3/" \ + -e "s/ $acl1 / id1 /" \ + -e "s/ $acl2 / id2 /" \ + -e "s/ $acl3 / id3 /" +} + +_getfacl_filter_id() +{ + sed \ + -e "s/user:$acl1/user:id1/" \ + -e "s/user:$acl2/user:id2/" \ + -e "s/user:$acl3/user:id3/" \ + -e "s/group:$acl1/group:id1/" \ + -e "s/group:$acl2/group:id2/" \ + -e "s/group:$acl3/group:id3/" \ + -e "s/: $acl1/: id1/" \ + -e "s/: $acl2/: id2/" \ + -e "s/: $acl3/: id3/" +} + +# filtered ls +# +_acl_ls() +{ + _ls_l -n $* | awk '{ print $1, $3, $4, $NF }' | _acl_filter_id +} + +# create an ACL with n ACEs in it +# +_create_n_aces() +{ + let n=$1-4 + acl='u::rwx,g::rwx,o::rwx,m::rwx' # 4 ace acl start + while [ $n -ne 0 ]; do + acl="$acl,u:$n:rwx" + let n=$n-1 + done + echo $acl +} + +# filter user ace names to user ids +# +_filter_aces() +{ + tmp_file=`mktemp /tmp/ace.XXXXXX` + + (_cat_passwd; _cat_group) > $tmp_file + + $AWK_PROG -v tmpfile=$tmp_file ' + BEGIN { + FS=":" + while ( getline 0 ) { + idlist[$1] = $3 + } + } + /^user/ { if ($2 in idlist) sub($2, idlist[$2]); print; next} + /^u/ { if ($2 in idlist) sub($2, idlist[$2]); print; next} + /^default:user/ { if ($3 in idlist) sub($3, idlist[$3]); print; next} + {print} + ' + rm -f $tmp_file +} + +_filter_aces_notypes() +{ + tr '\[' '\012' | tr ']' '\012' | tr ',' '\012' | _filter_aces|\ + sed -e 's/u:/user:/' -e 's/g:/group:/' -e 's/o:/other:/' -e 's/m:/mask:/' +} + +_require_acls() +{ + [ -n "$CHACL_PROG" ] || _notrun "chacl command not found" + + # + # Test if chacl is able to list ACLs on the target filesystems. On really + # old kernels the system calls might not be implemented at all, but the + # more common case is that the tested filesystem simply doesn't support + # ACLs. + # + touch $TEST_DIR/syscalltest + chacl -l $TEST_DIR/syscalltest > $TEST_DIR/syscalltest.out 2>&1 + cat $TEST_DIR/syscalltest.out >> $seqres.full + + if grep -q 'Function not implemented' $TEST_DIR/syscalltest.out; then + _notrun "kernel does not support ACLs" + fi + if grep -q 'Operation not supported' $TEST_DIR/syscalltest.out; then + _notrun "ACLs not supported by this filesystem type: $FSTYP" + fi + + rm -f $TEST_DIR/syscalltest.out +} + +_list_acl() +{ + file=$1 + + ls -dD $file | _acl_filter_id +} + +_require_attrs() +{ + local args + local nsp + + if [ $# -eq 0 ]; then + args="user" + else + args="$*" + fi + + [ -n "$ATTR_PROG" ] || _notrun "attr command not found" + [ -n "$GETFATTR_PROG" ] || _notrun "getfattr command not found" + [ -n "$SETFATTR_PROG" ] || _notrun "setfattr command not found" + + for nsp in $args; do + # + # Test if chacl is able to write an attribute on the target + # filesystems. On really old kernels the system calls might + # not be implemented at all, but the more common case is that + # the tested filesystem simply doesn't support attributes. + # Note that we can't simply list attributes as various security + # modules generate synthetic attributes not actually stored on + # disk. + # + touch $TEST_DIR/syscalltest + $SETFATTR_PROG -n "$nsp.xfstests" -v "attr" $TEST_DIR/syscalltest > $TEST_DIR/syscalltest.out 2>&1 + cat $TEST_DIR/syscalltest.out >> $seqres.full + + if grep -q 'Function not implemented' $TEST_DIR/syscalltest.out; then + _notrun "kernel does not support attrs" + fi + if grep -q 'Operation not supported' $TEST_DIR/syscalltest.out; then + _notrun "attr namespace $nsp not supported by this filesystem type: $FSTYP" + fi + + rm -f $TEST_DIR/syscalltest.out + done +} + +_require_attr_v1() +{ + _scratch_mkfs_xfs_supported -i attr=1 >/dev/null 2>&1 \ + || _notrun "attr v1 not supported on $SCRATCH_DEV" +} + +# check if we support the noattr2 mount option +_require_noattr2() +{ + _scratch_mkfs_xfs > /dev/null 2>&1 \ + || _fail "_scratch_mkfs_xfs failed on $SCRATCH_DEV" + _try_scratch_mount -o noattr2 > /dev/null 2>&1 \ + || _notrun "noattr2 mount option not supported on $SCRATCH_DEV" + _scratch_unmount +} + +# getfattr -R returns info in readdir order which varies from fs to fs. +# This sorts the output by filename +_sort_getfattr_output() +{ + awk '{a[FNR]=$0}END{n = asort(a); for(i=1; i <= n; i++) print a[i]"\n"}' RS='' +} + +# Previously, when getfattr dumps values of all extended attributes, it prints +# empty attr as 'user.name', but new getfattr (since attr-2.4.48) prints it as +# 'user.name=""'. Filter out the ending '=""' so that both old and new getfattr +# pints the same output. +# +# Note: This function returns the getfattr command result. +_getfattr() +{ + $GETFATTR_PROG "$@" | sed -e 's/=\"\"//' + return ${PIPESTATUS[0]} +} + +# make sure this script returns success +/bin/true diff --git a/common/btrfs b/common/btrfs new file mode 100644 index 00000000..d27d3384 --- /dev/null +++ b/common/btrfs @@ -0,0 +1,587 @@ +# +# Common btrfs specific functions +# + +. common/module + +_btrfs_get_subvolid() +{ + mnt=$1 + name=$2 + + $BTRFS_UTIL_PROG sub list $mnt | grep -E "\s$name$" | $AWK_PROG '{ print $2 }' +} + +# _require_btrfs_command [|