From: Theodore Ts'o Date: Fri, 14 Nov 2025 01:02:38 +0000 (-0500) Subject: generic/773: fix expected output "QA output created by 1226" X-Git-Tag: v2025.11.18~13 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=28e2a205c8d1694bc71a4247e6fd92bce29d3624;p=xfstests-dev.git generic/773: fix expected output "QA output created by 1226" The test generic/773 was apparently submitted as generic/1226, but when it was renamed to pack the test namespace, apparently the test output wasn't adjusted to reflect the new test name, leading to the test failing on sytems that have devices that support atomic writes. Fixes: 1499d4ff2365 ("generic: Add atomic write test using fio crc ...") Signed-off-by: Theodore Ts'o Reviewed-by: Ojaswin Mujoo Reviewed-by: Zorro Lang Signed-off-by: Zorro Lang --- 28e2a205c8d1694bc71a4247e6fd92bce29d3624 diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..82c57f41 --- /dev/null +++ b/.gitignore @@ -0,0 +1,225 @@ +*.lo +*.o +*.la +.dep +.libs +.ltdep +.* +*.state +tags + +/local.config +/results + +# autogenerated group files +/tests/*/group.list + +# autoconf generated files +/aclocal.m4 +/autom4te.cache +/configure +/config.guess +/config.log +/config.status +/config.sub +/m4/libtool.m4 +/m4/ltoptions.m4 +/m4/ltsugar.m4 +/m4/ltversion.m4 +/m4/lt~obsolete.m4 + +# libtool +/libtool +/install-sh +/ltmain.sh + +# build system +/include/builddefs +/include/config.h +/include/config.h.in + +# quilt +/patches +/.pc + +# custom config files +/configs/*.config + +# ltp/ binaries +/ltp/aio-stress +/ltp/doio +/ltp/fsstress +/ltp/fsx +/ltp/iogen + +# src/ binaries +/src/af_unix +/src/alloc +/src/allocstale +/src/append_reader +/src/append_writer +/src/attr_replace_test +/src/attr-list-by-handle-cursor-test +/src/bstat +/src/btrfs_encoded_read +/src/btrfs_encoded_write +/src/bulkstat_null_ocount +/src/bulkstat_unlink_test +/src/bulkstat_unlink_test_modified +/src/checkpoint_journal +/src/chprojid_fail +/src/cloner +/src/dbtest +/src/deduperace +/src/detached_mounts_propagation +/src/devzero +/src/dio-append-buf-fault +/src/dio-buf-fault +/src/dio-interleaved +/src/dio-invalidate-cache +/src/dio-write-fsync-same-fd +/src/dirhash_collide +/src/dirperf +/src/dirstress +/src/e4compact +/src/ext4_resize +/src/fake-dump-rootino +/src/fault +/src/feature +/src/fiemap-tester +/src/fill +/src/fill2 +/src/fs_perms +/src/fscrypt-crypt-util +/src/fssum +/src/fstest +/src/fsync-err +/src/fsync-tester +/src/ftrunc +/src/genhashnames +/src/getdevicesize +/src/getpagesize +/src/godown +/src/holes +/src/holetest +/src/itrash +/src/listxattr +/src/locktest +/src/loggen +/src/looptest +/src/lstat64 +/src/makeextents +/src/metaperf +/src/mkswap +/src/mmapcat +/src/mmap-rw-fault +/src/mmap-write-concurrent +/src/multi_open_unlink +/src/nametest +/src/nsexec +/src/open_by_handle +/src/permname +/src/preallo_rw_pattern_reader +/src/preallo_rw_pattern_writer +/src/punch-alternating +/src/pwrite_mmap_blocked +/src/randholes +/src/readdir-while-renames +/src/rename +/src/renameat2 +/src/resvtest +/src/rewinddir-test +/src/runas +/src/seek_copy_test +/src/seek_sanity_test +/src/splice2pipe +/src/splice-test +/src/stale_handle +/src/stat_test +/src/swapon +/src/t_access_root +/src/t_attr_corruption +/src/t_create_long_dirs +/src/t_create_short_dirs +/src/t_dir_offset +/src/t_dir_offset2 +/src/t_dir_type +/src/t_encrypted_d_revalidate +/src/t_enospc +/src/t_ext4_dax_inline_corruption +/src/t_ext4_dax_journal_corruption +/src/t_futimens +/src/t_get_file_time +/src/t_getcwd +/src/t_holes +/src/t_immutable +/src/t_mmap_collision +/src/t_mmap_cow_memory_failure +/src/t_mmap_cow_race +/src/t_mmap_dio +/src/t_mmap_fallocate +/src/t_mmap_stale_pmd +/src/t_mmap_write_ro +/src/t_mmap_writev +/src/t_mmap_writev_overlap +/src/t_mtab +/src/t_ofd_locks +/src/t_open_tmpfiles +/src/t_readdir_1 +/src/t_readdir_2 +/src/t_readdir_3 +/src/t_reflink_read_race +/src/t_rename_overwrite +/src/t_snapshot_deleted_subvolume +/src/t_stripealign +/src/t_truncate_cmtime +/src/test-nextquota +/src/testx +/src/trunc +/src/truncfile +/src/unwritten_mmap +/src/unwritten_sync +/src/uring_read_fault +/src/usemem +/src/uuid_ioctl +/src/writemod +/src/writev_on_pagefault +/src/xfsctl +/src/xfsfind +/src/aio-dio-regress/aio-dio-append-write-fallocate-race +/src/aio-dio-regress/aio-dio-append-write-read-race +/src/aio-dio-regress/aio-dio-cow-race +/src/aio-dio-regress/aio-dio-cycle-write +/src/aio-dio-regress/aio-dio-eof-race +/src/aio-dio-regress/aio-dio-extend-stat +/src/aio-dio-regress/aio-dio-fcntl-race +/src/aio-dio-regress/aio-dio-hole-filling-race +/src/aio-dio-regress/aio-dio-invalidate-failure +/src/aio-dio-regress/aio-dio-invalidate-readahead +/src/aio-dio-regress/aio-dio-subblock-eof-read +/src/aio-dio-regress/aio-dio-write-verify +/src/aio-dio-regress/aio-io-setup-with-nonwritable-context-pointer +/src/aio-dio-regress/aio-last-ref-held-by-io +/src/aio-dio-regress/aiocp +/src/aio-dio-regress/aiodio_sparse2 +/src/rw_hint +/src/vfs/vfstest +/src/vfs/mount-idmapped +/src/log-writes/replay-log +/src/perf/*.pyc +/src/fiemap-fault +/src/min_dio_alignment +/src/dio-writeback-race +/src/unlink-fsync +/src/file_attr + +# Symlinked files +/tests/generic/035.out +/tests/generic/050.out +/tests/xfs/033.out +/tests/xfs/071.out +/tests/xfs/216.out + +# cscope files +cscope.* +ncscope.* diff --git a/LICENSES/GPL-2.0 b/LICENSES/GPL-2.0 new file mode 100644 index 00000000..b8db91d3 --- /dev/null +++ b/LICENSES/GPL-2.0 @@ -0,0 +1,353 @@ +Valid-License-Identifier: GPL-2.0 +Valid-License-Identifier: GPL-2.0+ +SPDX-URL: https://spdx.org/licenses/GPL-2.0.html +Usage-Guide: + To use this license in source code, put one of the following SPDX + tag/value pairs into a comment according to the placement + guidelines in the licensing rules documentation. + For 'GNU General Public License (GPL) version 2 only' use: + SPDX-License-Identifier: GPL-2.0 + For 'GNU General Public License (GPL) version 2 or any later version' use: + SPDX-License-Identifier: GPL-2.0+ +License-Text: + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 00000000..a0f50f3d --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1,200 @@ +List of reviewers, co-maintainers and how to submit fstests changes +==================================================== + +Please try to follow the guidelines below. This will make things +easier on the maintainers. Not all of these guidelines matter for every +trivial patch so apply some common sense. + +Tips for patch submitters +------------------------- + +1. Always *test* your changes, however small, on at least 4 or + 5 people, preferably many more. + +2. Make sure your changes compile correctly in multiple + configurations. In particular check that changes don't break + fstests basic running. + +3. When you are happy with a change make it generally available for + testing and await feedback. + +4. Make a patch available to fstests@ list directly, that's the only + one mailing list which maintain the whole fstests project. + + PLEASE CC: the relevant reviewers, co-maintainers and mailing lists + that are generated by ``tools/get_maintainer.pl.`` + + PLEASE try to include any credit lines you want added with the + patch. It avoids people being missed off by mistake and makes + it easier to know who wants adding and who doesn't. + + PLEASE document known bugs. If it doesn't work for everything + or does something very odd once a month document it. + +5. Make sure you have the right to send any changes you make. If you + do changes at work you may find your employer owns the patch + not you. + +6. Happy hacking. + +Descriptions of section entries and preferred order +--------------------------------------------------- + + M: *Mail* patches to: FullName + These people might be a co-maintainer (with Supported status) or + maintainer (with Maintained status). + R: Designated *Reviewer*: FullName + These reviewers should be CCed on patches. + L: Besides fstests@ list itself, this *Mailing list* is relevant to + this area, should be CCed. + S: *Status*, one of the following (note: all things are maintained by + fstests@vger.kernel.org): + Supported: Someone is actually paid to look after this. + Maintained: Someone actually looks after it, has the privilege to + merge & push. + Odd Fixes: It has a maintainer but they don't have time to do + much other than throw the odd patch in. See below.. + Orphan: No current maintainer [but maybe you could take the + role as you write your new code]. + Obsolete: Old code. Something tagged obsolete generally means + it has been replaced by a better system and you + should be using that. + W: *Web-page* with status/info + Q: *Patchwork* web based patch tracking system site + B: URI for where to file *bugs*. A web-page with detailed bug + filing info, a direct bug tracker link, or a mailto: URI. + C: URI for *chat* protocol, server and channel where developers + usually hang out, for example irc://server/channel. + P: Subsystem Profile document for more details submitting + patches to the given subsystem. This is either an in-tree file, + or a URI. + T: *SCM* tree type and location. + Type is one of: git, hg, quilt, stgit, topgit + F: *Files* and directories wildcard patterns. + A trailing slash includes all files and subdirectory files. + F: tests/xfs/ all files in and below tests/xfs + F: tests/generic/* all files in tests/generic, but not below + F: */ext4/* all files in "any top level directory"/ext4 + One pattern per line. Multiple F: lines acceptable. + X: *Excluded* files and directories that are NOT maintained, same + rules as F:. Files exclusions are tested before file matches. + Can be useful for excluding a specific subdirectory, for instance: + F: src/ + X: src/vfs + matches all files in and below net excluding net/ipv6/ + N: Files and directories *Regex* patterns. + N: [^a-z]tegra all files whose path contains tegra + (not including files like integrator) + One pattern per line. Multiple N: lines acceptable. + tools/get_maintainer.pl has different behavior for files that + match F: pattern and matches of N: patterns. By default, + get_maintainer will not look at git log history when an F: pattern + match occurs. When an N: match occurs, git log history is used + to also notify the people that have git commit signatures. + K: *Content regex* (perl extended) pattern match in a patch or file. + For instance: + K: of_get_profile + matches patches or files that contain "of_get_profile" + K: \b(printk|pr_(info|err))\b + matches patches or files that contain one or more of the words + printk, pr_info or pr_err + One regex pattern per line. Multiple K: lines acceptable. + +Maintainers List +---------------- + +.. note:: The whole fstests are maintained by fstests@vger.kernel.org, so you + should send patch to fstests@ at least. Other relevant mailing list + or reviewer or co-maintainer can be in cc list. + +BTRFS +M: Anand Jain +R: Filipe Manana +L: linux-btrfs@vger.kernel.org +S: Supported +F: tests/btrfs/ +F: common/btrfs + +CEPH +L: ceph-devel@vger.kernel.org +S: Supported +F: tests/ceph/ +F: common/ceph + +CIFS +L: linux-cifs@vger.kernel.org +S: Supported +F: tests/cifs + +EXT4 +L: linux-ext4@vger.kernel.org +S: Supported +F: tests/ext4/ +F: common/ext4 + +F2FS +L: linux-f2fs-devel@lists.sourceforge.net +S: Supported +F: tests/f2fs/ +F: common/f2fs + +FSVERITY +R: Eric Biggers +L: fsverity@lists.linux.dev +S: Supported +F: common/verity + +FSCRYPT +R: Eric Biggers +L: linux-fscrypt@vger.kernel.org +S: Supported +F: common/encrypt + +NFS +L: linux-nfs@vger.kernel.org +S: Supported +F: tests/nfs/ +F: common/nfs + +OCFS2 +L: ocfs2-devel@lists.linux.dev +S: Supported +F: tests/ocfs2/ + +OVERLAYFS +R: Amir Goldstein +L: linux-unionfs@vger.kernel.org +S: Supported +F: tests/overlay +F: common/overlay + +UDF +R: Jan Kara +S: Supported +F: tests/udf/ + +VFS +R: Christian Brauner +L: linux-fsdevel@vger.kernel.org +S: Supported +F: src/vfs/ + +XFS +R: Darrick J. Wong +L: linux-xfs@vger.kernel.org +S: Supported +F: common/dump +F: common/fuzzy +F: common/inject +F: common/populate +F: common/repair +F: common/xfs +F: tests/xfs/ + +ALL +M: Zorro Lang +L: fstests@vger.kernel.org +S: Maintained +T: git git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git +F: * +F: */ diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..a48d8d62 --- /dev/null +++ b/Makefile @@ -0,0 +1,122 @@ +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (C) 2000-2008, 2011 SGI All Rights Reserved. +# +ifeq ("$(origin V)", "command line") + BUILD_VERBOSE = $(V) +endif +ifndef BUILD_VERBOSE + BUILD_VERBOSE = 0 +endif + +ifeq ($(BUILD_VERBOSE),1) + Q = +else + Q = @ +endif + +CHECK=sparse +CHECK_OPTS=-Wsparse-all -Wbitwise -Wno-transparent-union -Wno-return-void -Wno-undef \ + -Wno-non-pointer-null -D__CHECK_ENDIAN__ -D__linux__ + +ifeq ("$(origin C)", "command line") + CHECK_CMD=$(CHECK) $(CHECK_OPTS) + CHECKSRC=$(C) +else + CHECK_CMD=@true + CHECKSRC=0 +endif + +export CHECK_CMD CHECKSRC + +MAKEOPTS = --no-print-directory Q=$(Q) + +TOPDIR = . +HAVE_BUILDDEFS = $(shell test -f $(TOPDIR)/include/builddefs && echo yes || echo no) + +ifeq ($(HAVE_BUILDDEFS), yes) +include $(TOPDIR)/include/builddefs +else +export TESTS_DIR = tests +endif + +SRCTAR = $(PKG_NAME)-$(PKG_VERSION).tar.gz + +CONFIGURE = configure include/config.h include/config.h.in \ + aclocal.m4 config.guess config.sub install-sh ltmain.sh \ + m4/libtool.m4 m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 \ + m4/lt~obsolete.m4 +LSRCFILES = configure configure.ac aclocal.m4 README VERSION +LDIRT = config.log .ltdep .dep config.status config.cache confdefs.h \ + conftest* check.log check.time libtool include/builddefs + +ifeq ($(HAVE_BUILDDEFS), yes) +LDIRT += $(SRCTAR) +endif + +LIB_SUBDIRS = include lib +TOOL_SUBDIRS = ltp src m4 common tools + +SUBDIRS = $(LIB_SUBDIRS) $(TOOL_SUBDIRS) $(TESTS_DIR) + +default: include/builddefs +ifeq ($(HAVE_BUILDDEFS), no) + $(Q)$(MAKE) $(MAKEOPTS) $@ +else + $(Q)$(MAKE) $(MAKEOPTS) $(SUBDIRS) +endif + +# tool/lib dependencies +$(TOOL_SUBDIRS): $(LIB_SUBDIRS) + +ifeq ($(HAVE_BUILDDEFS), yes) +include $(BUILDRULES) +else +clean: # if configure hasn't run, nothing to clean +endif + +configure: configure.ac + libtoolize -cfi + cp include/install-sh . + aclocal -I m4 + autoheader + autoconf + +include/builddefs include/config.h: configure + ./configure \ + --libexecdir=/usr/lib \ + --exec_prefix=/var/lib + +aclocal.m4:: + aclocal --acdir=`pwd`/m4 --output=$@ + +depend: include/builddefs $(addsuffix -depend,$(SUBDIRS)) + +install: default $(addsuffix -install,$(SUBDIRS)) + $(INSTALL) -m 755 -d $(PKG_LIB_DIR) + $(INSTALL) -m 755 check $(PKG_LIB_DIR) + $(INSTALL) -m 644 randomize.awk $(PKG_LIB_DIR) + +# Nothing. +install-dev install-lib: + +%-install: + $(MAKE) $(MAKEOPTS) -C $* install + +realclean distclean: clean + $(Q)rm -f $(LDIRT) $(CONFIGURE) + $(Q)rm -rf autom4te.cache Logs + +dist: include/builddefs include/config.h default +ifeq ($(HAVE_BUILDDEFS), no) + $(Q)$(MAKE) $(MAKEOPTS) -C . $@ +else + $(Q)$(MAKE) $(MAKEOPTS) $(SRCTAR) +endif + +$(SRCTAR) : default + $(Q)git archive --prefix=$(PKG_NAME)-$(PKG_VERSION)/ --format=tar \ + v$(PKG_VERSION) > $(PKG_NAME)-$(PKG_VERSION).tar + $(Q)$(TAR) --transform "s,^,$(PKG_NAME)-$(PKG_VERSION)/," \ + -rf $(PKG_NAME)-$(PKG_VERSION).tar $(CONFIGURE) + $(Q)$(ZIP) $(PKG_NAME)-$(PKG_VERSION).tar + echo Wrote: $@ diff --git a/Makepkgs b/Makepkgs new file mode 100755 index 00000000..04e029a7 --- /dev/null +++ b/Makepkgs @@ -0,0 +1,88 @@ +#! /bin/bash +# +# Make whichever packages have been requested. +# Defaults to RPMs. +# +LOGDIR=Logs + +type=rpm +verbose=false + +MAKE=${MAKE:-make} +test ! -z "$MAKE" && make=$MAKE + +for opt in $* +do + case "$opt" in + clean) + ;; # ignored, kept for backward compatibility + rpm) + type=rpm ;; + debian) + type=debian ;; + verbose) + verbose=true ;; + *) + echo "Usage: Makepkgs [verbose] [debian|rpm]"; exit 1 ;; + esac +done + +# start with a clean manifest +test -f files.rpm && rm -f files.rpm +test -f filesdevel.rpm && rm -f filesdevel.rpm +test -f fileslib.rpm && rm -f fileslib.rpm + +test ! -d $LOGDIR && mkdir $LOGDIR +rm -rf $LOGDIR/* > /dev/null 2>&1 + +# build Debian packages, cleans itself before starting +SUDO=${SUDO:-sudo} +test ! -z "$SUDO" && sudo=$SUDO +if [ $type = debian ] ; then + LOGDEB=`pwd` + LOGDEB=../`basename $LOGDEB`.log + echo "== Debian build, log is $LOGDEB"; echo + if $verbose ; then + dpkg-buildpackage -r$SUDO | tee $LOGDEB + else + dpkg-buildpackage -r$SUDO > $LOGDEB || exit 1 + fi + exit 0 +fi + +# build RPM packages - manual clean before starting +echo "== clean, log is $LOGDIR/clean" +if $verbose ; then + $MAKE clean 2>&1 | tee $LOGDIR/clean +else + $MAKE clean > $LOGDIR/clean 2>&1 || exit 1 +fi + +echo +echo "== configure, log is $LOGDIR/configure" +rm -f .census # force configure to run here +if $verbose ; then + $MAKE configure 2>&1 | tee $LOGDIR/configure +else + $MAKE configure > $LOGDIR/configure 2>&1 || exit 1 +fi + +echo +echo "== default, log is $LOGDIR/default" +if $verbose ; then + $MAKE default 2>&1 | tee $LOGDIR/default +else + $MAKE default > $LOGDIR/default 2>&1 || exit 1 +fi + +echo +echo "== dist, log is $LOGDIR/dist" +[ ! -f .census ] && touch .census +if $verbose ; then + $MAKE -C build dist 2>&1 | tee $LOGDIR/dist +else + $MAKE -C build dist > $LOGDIR/dist 2>&1 || exit 1 + grep '^Wrote:' $LOGDIR/dist | sed -e 's/\.\.\/\.\.\///' +fi + +exit 0 diff --git a/README b/README new file mode 100644 index 00000000..196c79a2 --- /dev/null +++ b/README @@ -0,0 +1,612 @@ +_______________________ +SUPPORTED FS LIST +_______________________ + +History +------- + +Firstly, xfstests is the old name of this project, due to it was originally +developed for testing the XFS file system on the SGI's Irix operating system. +When xfs was ported to Linux, so was xfstests, now it only supports Linux. + +As xfstests has many test cases that can be run on some other filesystems, +we call them "generic" (and "shared", but it has been removed) cases, you +can find them in tests/generic/ directory. Then more and more filesystems +started to use xfstests, and contribute patches. Today xfstests is used +as a file system regression test suite for lots of Linux's major file systems. +So it's not "xfs"tests only, we tend to call it "fstests" now. + +Supported fs +------------ + +Firstly, there's not hard restriction about which filesystem can use fstests. +Any filesystem can give fstests a try. + +Although fstests supports many filesystems, they have different support level +by fstests. So mark it with 4 levels as below: + +L1: Fstests can be run on the specified fs basically. +L2: Rare support from the specified fs list to fix some generic test failures. +L3: Normal support from the specified fs list, has some own cases. +L4: Active support from the fs list, has lots of own cases. + +("+" means a slightly higher than the current level, but not reach to the next. +"-" is opposite, means a little bit lower than the current level.) + ++------------+-------+---------------------------------------------------------+ +| Filesystem | Level | Comment | ++------------+-------+---------------------------------------------------------+ +| XFS | L4+ | N/A | ++------------+-------+---------------------------------------------------------+ +| Btrfs | L4 | https://btrfs.readthedocs.io/en/latest/dev/\ | +| | | Development-notes.html#fstests-setup | ++------------+-------+---------------------------------------------------------+ +| Ext4 | L4 | N/A | ++------------+-------+---------------------------------------------------------+ +| Ext2 | L3 | N/A | ++------------+-------+---------------------------------------------------------+ +| Ext3 | L3 | N/A | ++------------+-------+---------------------------------------------------------+ +| overlay | L3 | N/A | ++------------+-------+---------------------------------------------------------+ +| f2fs | L3- | N/A | ++------------+-------+---------------------------------------------------------+ +| tmpfs | L3- | N/A | ++------------+-------+---------------------------------------------------------+ +| NFS | L2+ | https://linux-nfs.org/wiki/index.php/Xfstests | ++------------+-------+---------------------------------------------------------+ +| Ceph | L2 | N/A | ++------------+-------+---------------------------------------------------------+ +| CIFS | L2- | https://wiki.samba.org/index.php/Xfstesting-cifs | ++------------+-------+---------------------------------------------------------+ +| ocfs2 | L2- | N/A | ++------------+-------+---------------------------------------------------------+ +| Bcachefs | L2 | N/A | ++------------+-------+---------------------------------------------------------+ +| Exfat | L1+ | N/A | ++------------+-------+---------------------------------------------------------+ +| AFS | L1 | N/A | ++------------+-------+---------------------------------------------------------+ +| FUSE | L1 | N/A | ++------------+-------+---------------------------------------------------------+ +| GFS2 | L1 | N/A | ++------------+-------+---------------------------------------------------------+ +| Glusterfs | L1 | N/A | ++------------+-------+---------------------------------------------------------+ +| JFS | L1 | N/A | ++------------+-------+---------------------------------------------------------+ +| pvfs2 | L1 | N/A | ++------------+-------+---------------------------------------------------------+ +| Reiser4 | L1 | Reiserfs has been removed, only left reiser4 | ++------------+-------+---------------------------------------------------------+ +| ubifs | L1 | N/A | ++------------+-------+---------------------------------------------------------+ +| udf | L1 | N/A | ++------------+-------+---------------------------------------------------------+ +| Virtiofs | L1 | N/A | ++------------+-------+---------------------------------------------------------+ +| 9p | L1 | N/A | ++------------+-------+---------------------------------------------------------+ + +_______________________ +BUILDING THE FSQA SUITE +_______________________ + +Ubuntu or Debian +---------------- + +1. Make sure that package list is up-to-date and install all necessary packages: + + $ sudo apt-get update + $ sudo apt-get install acl attr automake bc dbench dump e2fsprogs fio gawk \ + gcc git indent libacl1-dev libaio-dev libcap-dev libgdbm-dev libtool \ + libtool-bin liburing-dev libuuid1 lvm2 make psmisc python3 quota sed \ + uuid-dev uuid-runtime xfsprogs linux-headers-$(uname -r) sqlite3 \ + libgdbm-compat-dev + +2. Install packages for the filesystem(s) being tested: + + $ sudo apt-get install exfatprogs f2fs-tools ocfs2-tools udftools xfsdump \ + xfslibs-dev + +3. Install packages for optional features: + + systemd coredump capture: + $ sudo apt install systemd-coredump systemd jq + +Fedora +------ + +1. Install all necessary packages from standard repository: + + $ sudo yum install acl attr automake bc dbench dump e2fsprogs fio gawk gcc \ + gdbm-devel git indent kernel-devel libacl-devel libaio-devel \ + libcap-devel libtool liburing-devel libuuid-devel lvm2 make psmisc \ + python3 quota sed sqlite udftools xfsprogs + +2. Install packages for the filesystem(s) being tested: + + $ sudo yum install btrfs-progs exfatprogs f2fs-tools ocfs2-tools xfsdump \ + xfsprogs-devel + +3. Install packages for optional features: + + systemd coredump capture: + $ sudo yum install systemd systemd-udev jq + +RHEL or CentOS +-------------- + +1. Enable EPEL repository: + - see https://docs.fedoraproject.org/en-US/epel/#How_can_I_use_these_extra_packages.3F + +2. Install all necessary packages which are available from standard repository + and EPEL: + + $ sudo yum install acl attr automake bc dbench dump e2fsprogs fio gawk gcc \ + gdbm-devel git indent kernel-devel libacl-devel libaio-devel \ + libcap-devel libtool libuuid-devel lvm2 make psmisc python3 quota sed \ + sqlite udftools xfsprogs + + Or, EPEL packages could be compiled from sources, see: + - https://dbench.samba.org/web/download.html + - https://www.gnu.org/software/indent/ + +3. Build and install 'liburing': + - see https://github.com/axboe/liburing. + +4. Install packages for the filesystem(s) being tested: + + For XFS install: + $ sudo yum install xfsdump xfsprogs-devel + + For exfat install: + $ sudo yum install exfatprogs + + For f2fs build and install: + - see https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs-tools.git/about/ + + For ocfs2 build and install: + - see https://github.com/markfasheh/ocfs2-tools + +5. Install packages for optional features: + + systemd coredump capture: + $ sudo yum install systemd systemd-udev jq + +SUSE Linux Enterprise or openSUSE +--------------------------------- + +1. Install all necessary packages from standard repositories: + + $ sudo zypper install acct automake bc dbench duperemove dump fio gcc git \ + indent libacl-devel libaio-devel libattr-devel libcap libcap-devel \ + libtool liburing-devel libuuid-devel lvm2 make quota sqlite3 xfsprogs + +2. Install packages for the filesystem(s) being tested: + + For btrfs install: + $ sudo zypper install btrfsprogs libbtrfs-devel + + For XFS install: + $ sudo zypper install xfsdump xfsprogs-devel + +3. Install packages for optional features: + + systemd coredump capture: + $ sudo yum install systemd systemd-coredump jq + +Build and install test, libs and utils +-------------------------------------- + +$ git clone git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git +$ cd xfstests-dev +$ make +$ sudo make install + +Setup Environment +----------------- + +1. Compile XFS/EXT4/BTRFS/etc. into your kernel or load as module. For example, + for XFS, enable XFS_FS in your kernel configuration, or compile it as a + module and load it with 'sudo modprobe xfs'. Most of the distributions will + have these filesystems already in the kernel/as module. + +2. Create TEST device: + - format as the filesystem type you wish to test. + - should be at least 10GB in size. + - optionally populate with destroyable data. + - device contents may be destroyed. + +3. (optional) Create SCRATCH device. + - many tests depend on the SCRATCH device existing. + - not need to be formatted. + - should be at least 10GB in size. + - must be different to TEST device. + - device contents will be destroyed. + +4. (optional) Create SCRATCH device pool. + - needed for BTRFS testing + - specifies 3 or more independent SCRATCH devices via the SCRATCH_DEV_POOL + variable e.g SCRATCH_DEV_POOL="/dev/sda /dev/sdb /dev/sdc" + - device contents will be destroyed. + - SCRATCH device should be left unset, it will be overridden + by the SCRATCH_DEV_POOL implementation. + +5. Copy local.config.example to local.config and edit as needed. The TEST_DEV + and TEST_DIR are required. + +6. (optional) Create fsgqa test users and groups: + + $ sudo useradd -m fsgqa + $ sudo useradd 123456-fsgqa + $ sudo useradd fsgqa2 + $ sudo groupadd fsgqa + + The "123456-fsgqa" user creation step can be safely skipped if your system + doesn't support names starting with digits, only a handful of tests require + it. + +7. (optional) If you wish to run the udf components of the suite install + mkudffs. Also download and build the Philips UDF Verification Software from + https://www.lscdweb.com/registered/udf_verifier.html, then copy the udf_test + binary to xfstests/src/. + +8. (optional) To do io_uring related testing, please make sure below 3 things: + 1) kernel is built with CONFIG_IO_URING=y + 2) sysctl -w kernel.io_uring_disabled=0 (or set it to 2 to disable io_uring + testing dynamically if kernel supports) + 3) install liburing development package contains liburing.h before building + fstests + +For example, to run the tests with loopback partitions: + + # xfs_io -f -c "falloc 0 10g" test.img + # xfs_io -f -c "falloc 0 10g" scratch.img + # mkfs.xfs test.img + # losetup /dev/loop0 ./test.img + # losetup /dev/loop1 ./scratch.img + # mkdir -p /mnt/test && mount /dev/loop0 /mnt/test + # mkdir -p /mnt/scratch + +The config for the setup above is: + + $ cat local.config + export TEST_DEV=/dev/loop0 + export TEST_DIR=/mnt/test + export SCRATCH_DEV=/dev/loop1 + export SCRATCH_MNT=/mnt/scratch + +From this point you can run some basic tests, see 'USING THE FSQA SUITE' below. + +Additional Setup +---------------- + +Some tests require additional configuration in your local.config. Add these +variables to a local.config and keep that file in your workarea. Or add a case +to the switch in common/config assigning these variables based on the hostname +of your test machine. Or use 'setenv' to set them. + +Extra TEST device specifications: + - Set TEST_LOGDEV to "device for test-fs external log" + - Set TEST_RTDEV to "device for test-fs realtime data" + - If TEST_LOGDEV and/or TEST_RTDEV, these will always be used. + - Set FSTYP to "the filesystem you want to test", the filesystem type is + devised from the TEST_DEV device, but you may want to override it; if + unset, the default is 'xfs' + +Extra SCRATCH device specifications: + - Set SCRATCH_LOGDEV to "device for scratch-fs external log" + - Set SCRATCH_RTDEV to "device for scratch-fs realtime data" + - If SCRATCH_LOGDEV and/or SCRATCH_RTDEV, the USE_EXTERNAL environment + +Tape device specification for xfsdump testing: + - Set TAPE_DEV to "tape device for testing xfsdump". + - Set RMT_TAPE_DEV to "remote tape device for testing xfsdump" + variable set to "yes" will enable their use. + - Note that if testing xfsdump, make sure the tape devices have a tape which + can be overwritten. + +Extra XFS specification: + - Set TEST_XFS_REPAIR_REBUILD=1 to have _check_xfs_filesystem run + xfs_repair -n to check the filesystem; xfs_repair to rebuild metadata + indexes; and xfs_repair -n (a third time) to check the results of the + rebuilding. + - The FORCE_XFS_CHECK_PROG option was removed in July 2024, along with all + support for xfs_check. + - Set TEST_XFS_SCRUB_REBUILD=1 to have _check_xfs_filesystem run xfs_scrub in + "force_repair" mode to rebuild the filesystem; and xfs_repair -n to check + the results of the rebuilding. + - xfs_scrub, if present, will always check the test and scratch + filesystems if they are still online at the end of the test. It is no + longer necessary to set TEST_XFS_SCRUB. + +Tools specification: + - dump: + - Set DUMP_CORRUPT_FS=1 to record metadata dumps of XFS, ext* or + btrfs filesystems if a filesystem check fails. + - Set DUMP_COMPRESSOR to a compression program to compress metadumps of + filesystems. This program must accept '-f' and the name of a file to + compress; and it must accept '-d -f -k' and the name of a file to + decompress. In other words, it must emulate gzip. + - dmesg: + - Set KEEP_DMESG=yes to keep dmesg log after test + - kmemleak: + - Set USE_KMEMLEAK=yes to scan for memory leaks in the kernel after every + test, if the kernel supports kmemleak. + - fsstress: + - Set FSSTRESS_AVOID and/or FSX_AVOID, which contain options added to + the end of fsstresss and fsx invocations, respectively, in case you wish + to exclude certain operational modes from these tests. + - core dumps: + - Set COREDUMP_COMPRESSOR to a compression program to compress crash dumps. + This program must accept '-f' and the name of a file to compress. In + other words, it must emulate gzip. + +Kernel/Modules related configuration: + - Set TEST_FS_MODULE_RELOAD=1 to unload the module and reload it between + test invocations. This assumes that the name of the module is the same + as FSTYP. + - Set MODPROBE_PATIENT_RM_TIMEOUT_SECONDS to specify the amount of time we + should try a patient module remove. The default is 50 seconds. Set this + to "forever" and we'll wait forever until the module is gone. + - Set KCONFIG_PATH to specify your preferred location of kernel config + file. The config is used by tests to check if kernel feature is enabled. + - Set REPORT_GCOV to a directory path to make lcov and genhtml generate + html reports from any gcov code coverage data collected by the kernel. + If REPORT_GCOV is set to 1, the report will be written to $REPORT_DIR/gcov/. + +Test control: + - Set LOAD_FACTOR to a nonzero positive integer to increase the amount of + load applied to the system during a test by the specified multiple. + - Set TIME_FACTOR to a nonzero positive integer to increase the amount of + time that a test runs by the specified multiple. + - For tests that are a member of the "soak" group, setting SOAK_DURATION + allows the test runner to specify exactly how long the test should continue + running. This setting overrides TIME_FACTOR. Floating point numbers are + allowed, and the unit suffixes m(inutes), h(ours), d(ays), and w(eeks) are + supported. + +Misc: + - If you wish to disable UDF verification test set the environment variable + DISABLE_UDF_TEST to 1. + - Set LOGWRITES_DEV to a block device to use for power fail testing. + - Set PERF_CONFIGNAME to a arbitrary string to be used for identifying + the test setup for running perf tests. This should be different for + each type of performance test you wish to run so that relevant results + are compared. For example 'spinningrust' for configurations that use + spinning disks and 'nvme' for tests using nvme drives. + - Set MIN_FSSIZE to specify the minimal size (bytes) of a filesystem we + can create. Setting this parameter will skip the tests creating a + filesystem less than MIN_FSSIZE. + - Set DIFF_LENGTH to "number of diff lines to print from a failed test", + by default 10, set to 0 to print the full diff + - set IDMAPPED_MOUNTS=true to run all tests on top of idmapped mounts. While + this option is supported for all filesystems currently only -overlay is + expected to run without issues. For other filesystems additional patches + and fixes to the test suite might be needed. + - Set REPORT_VARS_FILE to a file containing colon-separated name-value pairs + that will be recorded in the test section report. Names must be unique. + Whitespace surrounding the colon will be removed. + - set CANON_DEVS=yes to canonicalize device symlinks. This will let you + for example use something like TEST_DEV/dev/disk/by-id/nvme-* so the + device remains persistent between reboots. This is disabled by default. + +______________________ +USING THE FSQA SUITE +______________________ + +Running tests: + + - cd xfstests + - By default the tests suite will run all the tests in the auto group. These + are the tests that are expected to function correctly as regression tests, + and it excludes tests that exercise conditions known to cause machine + failures (i.e. the "dangerous" tests). + - ./check '*/001' '*/002' '*/003' + - ./check '*/06?' + - Groups of tests maybe ran by: ./check -g [group(s)] + See the tests/*/group.list files after building xfstests to learn about + each test's group memberships. + - If you want to run all tests regardless of what group they are in + (including dangerous tests), use the "all" group: ./check -g all + - To randomize test order: ./check -r [test(s)] + - You can explicitly specify NFS/AFS/CIFS/OVERLAY, otherwise + the filesystem type will be autodetected from $TEST_DEV: + - for running nfs tests: ./check -nfs [test(s)] + - for running afs tests: ./check -afs [test(s)] + - for running cifs/smb3 tests: ./check -cifs [test(s)] + - for overlay tests: ./check -overlay [test(s)] + The TEST and SCRATCH partitions should be pre-formatted + with another base fs, where the overlay dirs will be created + + + The check script tests the return value of each script, and + compares the output against the expected output. If the output + is not as expected, a diff will be output and an .out.bad file + will be produced for the failing test. + + Unexpected console messages, crashes and hangs may be considered + to be failures but are not necessarily detected by the QA system. + +__________________________ +ADDING TO THE FSQA SUITE +__________________________ + + +Creating new tests scripts: + + Use the "new" script. + +Test script environment: + + When developing a new test script keep the following things in + mind. All of the environment variables and shell procedures are + available to the script once the "common/preamble" file has been + sourced and the "_begin_fstest" function has been called. + + 1. The tests are run from an arbitrary directory. If you want to + do operations on an XFS filesystem (good idea, eh?), then do + one of the following: + + (a) Create directories and files at will in the directory + $TEST_DIR ... this is within an XFS filesystem and world + writeable. You should cleanup when your test is done, + e.g. use a _cleanup shell procedure in the trap ... see + 001 for an example. If you need to know, the $TEST_DIR + directory is within the filesystem on the block device + $TEST_DEV. + + (b) mkfs a new XFS filesystem on $SCRATCH_DEV, and mount this + on $SCRATCH_MNT. Call the the _require_scratch function + on startup if you require use of the scratch partition. + _require_scratch does some checks on $SCRATCH_DEV & + $SCRATCH_MNT and makes sure they're unmounted. You should + cleanup when your test is done, and in particular unmount + $SCRATCH_MNT. + Tests can make use of $SCRATCH_LOGDEV and $SCRATCH_RTDEV + for testing external log and realtime volumes - however, + these tests need to simply "pass" (e.g. cat $seq.out; exit + - or default to an internal log) in the common case where + these variables are not set. + + 2. You can safely create temporary files that are not part of the + filesystem tests (e.g. to catch output, prepare lists of things + to do, etc.) in files named $tmp.. The standard test + script framework created by "new" will initialize $tmp and + cleanup on exit. + + 3. By default, tests are run as the same uid as the person + executing the control script "check" that runs the test scripts. + + 4. Some other useful shell procedures: + + _get_fqdn - echo the host's fully qualified + domain name + + _get_pids_by_name - one argument is a process name, and + return all of the matching pids on + standard output + + _within_tolerance - fancy numerical "close enough is good + enough" filter for deterministic + output ... see comments in + common/filter for an explanation + + _filter_date - turn ctime(3) format dates into the + string DATE for deterministic + output + + _cat_passwd, - dump the content of the password + _cat_group or group file (both the local file + and the content of the NIS database + if it is likely to be present) + + 5. General recommendations, usage conventions, etc.: + - When the content of the password or group file is + required, get it using the _cat_passwd and _cat_group + functions, to ensure NIS information is included if NIS + is active. + - When calling getfacl in a test, pass the "-n" argument so + that numeric rather than symbolic identifiers are used in + the output. + - When creating a new test, it is possible to enter a custom name + for the file. Filenames are in form NNN-custom-name, where NNN + is automatically added by the ./new script as an unique ID, + and "custom-name" is the optional string entered into a prompt + in the ./new script. It can contain only alphanumeric characters + and dash. Note the "NNN-" part is added automatically. + + 6. Test group membership: Each test can be associated with any number + of groups for convenient selection of subsets of tests. Group names + must be human readable using only characters in the set [:alnum:_-]. + + Test authors associate a test with groups by passing the names of those + groups as arguments to the _begin_fstest function. While _begin_fstests + is a shell function that must be called at the start of a test to + initialise the test environment correctly, the the build infrastructure + also scans the test files for _begin_fstests invocations. It does this + to compile the group lists that are used to determine which tests to run + when `check` is executed. In other words, test files files must call + _begin_fstest with their intended groups or they will not be run. + + However, because the build infrastructure also uses _begin_fstests as + a defined keyword, addition restrictions are placed on how it must be + formatted: + + (a) It must be a single line with no multi-line continuations. + + (b) group names should be separated by spaces and not other whitespace + + (c) A '#' placed anywhere in the list, even in the middle of a group + name, will cause everything from the # to the end of the line to be + ignored. + + For example, the code: + + _begin_fstest auto quick subvol snapshot # metadata + + associates the current test with the "auto", "quick", "subvol", and + "snapshot" groups. Because "metadata" is after the "#" comment + delimiter, it is ignored by the build infrastructure and so it will not + be associated with that group. + + It is not necessary to specify the "all" group in the list because that + group is always computed at run time from the group lists. + + +Verified output: + + Each test script has a name, e.g. 007, and an associated + verified output, e.g. 007.out. + + It is important that the verified output is deterministic, and + part of the job of the test script is to filter the output to + make this so. Examples of the sort of things that need filtering: + + - dates + - pids + - hostnames + - filesystem names + - timezones + - variable directory contents + - imprecise numbers, especially sizes and times + +Pass/failure: + + The script "check" may be used to run one or more tests. + + Test number $seq is deemed to "pass" when: + (a) no "core" file is created, + (b) the file $seq.notrun is not created, + (c) the exit status is 0, and + (d) the output matches the verified output. + + In the "not run" case (b), the $seq.notrun file should contain a + short one-line summary of why the test was not run. The standard + output is not checked, so this can be used for a more verbose + explanation and to provide feedback when the QA test is run + interactively. + + + To force a non-zero exit status use: + status=1 + exit + + Note that: + exit 1 + won't have the desired effect because of the way the exit trap + works. + + The recent pass/fail history is maintained in the file "check.log". + The elapsed time for the most recent pass for each test is kept + in "check.time". + + The compare-failures script in tools/ may be used to compare failures + across multiple runs, given files containing stdout from those runs. + +__________________ +SUBMITTING PATCHES +__________________ + +Send patches to the fstests mailing list at fstests@vger.kernel.org. diff --git a/README.config-sections b/README.config-sections new file mode 100644 index 00000000..a42d9d7b --- /dev/null +++ b/README.config-sections @@ -0,0 +1,137 @@ +Configuration file with sections +================================ + +Configuration file with sections is useful for running xfstests on multiple +file systems, or multiple file system setups in a single run without any +help of external scripts. + + +Syntax +------ + +Syntax for defining a section is the following: + + [section_name] + +Section name should consist of alphanumeric characters and '_'. Anything +else is forbidden and the section will not be recognised. + +Each section in the configuration file should contain options in the format + + OPTION=value + +'OPTION' must not contain any white space characters. 'value' can contain +any character you want with one simple limitation - characters ' and " can +only appear at the start and end of the 'value', however it is not required. + +Note that options are carried between sections so the same options does not +have to be specified in each and every sections. However caution should be +exercised not to leave unwanted options set from previous sections. + + +Results +------- + +For every section xfstests will run with specified options and will produce +separate results in the '$RESULT_BASE/$section_name' directory. + + +Different mount options +----------------------- + +Specifying different mount options in difference config sections is allowed. +When TEST_FS_MOUNT_OPTS differs in the following section TEST_DEV will be +remounted with new TEST_FS_MOUNT_OPTS automatically before running the test. + + +Multiple file systems +--------------------- + +Having different file systems in different config sections is allowed. When +FSTYP differs in the following section the FSTYP file system will be created +automatically before running the test. + +Note that if TEST_FS_MOUNT_OPTS, MOUNT_OPTIONS, MKFS_OPTIONS, or FSCK_OPTIONS +are not directly specified in the section it will be reset to the default for a +given file system. + +You can also force the file system recreation by specifying RECREATE_TEST_DEV. + +Run specified section only +-------------------------- + +Specifying '-s' argument with section name will run only the section +specified. The '-s' argument can be specified multiple times to allow multiple +sections to be run. + +The options are still carried between section, that includes the sections +which are not going to be run. So you can do something like + +[ext4] +TEST_DEV=/dev/sda1 +TEST_DIR=/mnt/test +SCRATCH_DEV=/dev/sdb1 +SCRATCH_MNT=/mnt/test1 +FSTYP=ext4 + +[xfs] +FSTYP=xfs + +[btrfs] +FSTYP=btrfs + + +and run + +./check -s xfs -s btrfs + +to check xfs and btrfs only. All the devices and mounts are still going to +be parsed from the section [ext4]. + +Example +------- + +Here is an example of config file with sections: + +[ext4_4k_block_size] +TEST_DEV=/dev/sda +TEST_DIR=/mnt/test +SCRATCH_DEV=/dev/sdb +SCRATCH_MNT=/mnt/test1 +MKFS_OPTIONS="-q -F -b4096" +FSTYP=ext4 +RESULT_BASE="`pwd`/results/`date +%d%m%y_%H%M%S`" + +[ext4_1k_block_size] +MKFS_OPTIONS="-q -F -b1024" + +[ext4_nojournal] +MKFS_OPTIONS="-q -F -b4096 -O ^has_journal" + +[xfs_filesystem] +MKFS_OPTIONS="-f" +FSTYP=xfs + +[ext3_filesystem] +FSTYP=ext3 +MOUNT_OPTIONS="-o noatime" + +[cephfs] +TEST_DIR=/mnt/test +TEST_DEV=192.168.14.1:6789:/ +TEST_FS_MOUNT_OPTS="-o name=admin,secret=AQDuEBtYKEYRINGSECRETriSC8YJGDZsQHcr7g==" +FSTYP="ceph" + +[glusterfs] +FSTYP=glusterfs +TEST_DIR=/mnt/gluster/test +TEST_DEV=192.168.1.1:testvol +SCRATCH_MNT=/mnt/gluster/scratch +SCRATCH_DEV=192.168.1.1:scratchvol + +[afs] +FSTYP=afs +TEST_DEV=%example.com:xfstest.test +TEST_DIR=/mnt/xfstest.test +SCRATCH_DEV=%example.com:xfstest.scratch +SCRATCH_MNT=/mnt/xfstest.scratch diff --git a/README.device-mapper b/README.device-mapper new file mode 100644 index 00000000..4ff68121 --- /dev/null +++ b/README.device-mapper @@ -0,0 +1,8 @@ + +To use xfstests on device mapper always use the /dev/mapper/ symlinks, +not the /dev/dm-* devices, or the symlinks created by LVM. + +For example: + +TEST_DEV=/dev/mapper/test +SCRATCH_DEV=/dev/mapper/scratch diff --git a/README.fuse b/README.fuse new file mode 100644 index 00000000..969dbd5d --- /dev/null +++ b/README.fuse @@ -0,0 +1,26 @@ +Here are instructions for testing fuse using the passthrough_ll example +filesystem provided in the libfuse source tree: + +git clone git://github.com/libfuse/libfuse.git +cd libfuse +meson build +cd build +ninja +cat << EOF | sudo tee /sbin/mount.fuse.passthrough_ll +#!/bin/bash +ulimit -n 1048576 +exec $(pwd)/example/passthrough_ll -ofsname="\$@" +EOF +sudo chmod +x /sbin/mount.fuse.passthrough_ll +mkdir -p /mnt/test /mnt/scratch /home/test/test /home/test/scratch + +Use the following local.config file: + +export TEST_DEV=non1 +export TEST_DIR=/mnt/test +export SCRATCH_DEV=non2 +export SCRATCH_MNT=/mnt/scratch +export FSTYP=fuse +export FUSE_SUBTYP=.passthrough_ll +export MOUNT_OPTIONS="-osource=/home/test/scratch,allow_other,default_permissions" +export TEST_FS_MOUNT_OPTS="-osource=/home/test/test,allow_other,default_permissions" diff --git a/README.overlay b/README.overlay new file mode 100644 index 00000000..3093bf8c --- /dev/null +++ b/README.overlay @@ -0,0 +1,78 @@ +To run xfstest on overlayfs, configure the variables of TEST and SCRATCH +partitions to be used as the "base fs" and run './check -overlay'. + +For example, the following config file can be used to run tests on +xfs test/scratch partitions: + + TEST_DEV=/dev/sda5 + TEST_DIR=/mnt/test + SCRATCH_DEV=/dev/sda6 + SCRATCH_MNT=/mnt/scratch + FSTYP=xfs + +Using the same config file, but executing './check -overlay' will +use the same partitions as base fs for overlayfs directories +and set TEST_DIR/SCRATCH_MNT values to overlay mount points, i.e.: +/mnt/test/ovl-mnt and /mnt/scratch/ovl-mnt, for the context of +individual tests. + +'./check -overlay' does not support mkfs and fsck on the base fs, so +the base fs should be pre-formatted before starting the -overlay run. +An easy way to accomplish this is by running './check ' once, +before running './check -overlay'. + +'./check -overlay' support check overlay test and scratch dirs, +OVERLAY_FSCK_OPTIONS should be set instead of FSCK_OPTIONS if fsck +options need to given directly. + +Because of the lack of mkfs support, multi-section config files are only +partly supported with './check -overlay'. Only multi-section files that +do not change FSTYP and MKFS_OPTIONS can be safely used with -overlay. + +For example, the following multi-section config file can be used to +run overlay tests on the same base fs, but with different mount options, and on +top of idmapped mounts: + + [xfs] + TEST_DEV=/dev/sda5 + TEST_DIR=/mnt/test + SCRATCH_DEV=/dev/sda6 + SCRATCH_MNT=/mnt/scratch + FSTYP=xfs + + [xfs_pquota] + MOUNT_OPTIONS="-o pquota" + TEST_FS_MOUNT_OPTS="-o noatime" + OVERLAY_MOUNT_OPTIONS="-o redirect_dir=off" + OVERLAY_FSCK_OPTIONS="-n -o redirect_dir=off" + + [idmapped] + IDMAPPED_MOUNTS=true + +In the example above, MOUNT_OPTIONS will be used to mount the base scratch fs, +TEST_FS_MOUNT_OPTS will be used to mount the base test fs, +OVERLAY_MOUNT_OPTIONS will be used to mount both test and scratch overlay and +OVERLAY_FSCK_OPTIONS will be used to check both test and scratch overlay. + + +Unionmount Testsuite +==================== + +xfstests can be used as a test harness to run unionmount testsuite test cases +and provide extended test coverage for overlayfs. + +To enable running unionmount testsuite, clone the git repository from: + https://github.com/amir73il/unionmount-testsuite.git +under the xfstests src directory, or set the environment variable +UNIONMOUNT_TESTSUITE to the local path where the repository was cloned. + +Run './check -overlay -g overlay/union' to execute all the unionmount testsuite +test cases. + + +Overlayfs Tools +=============== + +A few tests require additional tools. For fsck.overlay [optional], +build and install: + https://github.com/kmxz/overlayfs-tools diff --git a/README.selftest b/README.selftest new file mode 100644 index 00000000..ee77931b --- /dev/null +++ b/README.selftest @@ -0,0 +1,29 @@ +Tests with consistent results are provided in the selftest folder. +Since many people develop testing infrastructure around xfstests, +these tests are helpful to confirm the testing setup is working as +expected. + +The provided tests include: +selftest/001 - pass +selftest/002 - fail from output mismatch +selftest/003 - fail via _fail +selftest/004 - skip +selftest/005 - crash +selftest/006 - hang + +Two groups are used for these tests: selftest and dangerous_selftest. +selftest/00[1-4] are in the selftest group and selftest/00[5-6] are +in the dangerous_selftest group. + +The selftest will only be run if explicitly speficied. To run the +selftest, you can specify individual tests, e.g. + +# ./check selftest/001 + +or use the groups under selftest/, e.g. + +# ./check -g selftest/selftest +# ./check -g selftest/dangerous_selftest + +Note, you cannot use the group names without including the folder name +(ie. "-g selftest"). diff --git a/VERSION b/VERSION new file mode 100644 index 00000000..afcab53e --- /dev/null +++ b/VERSION @@ -0,0 +1,7 @@ +# +# This file is used by configure to get version information +# +PACKAGE_MAJOR=1 +PACKAGE_MINOR=1 +PACKAGE_REVISION=1 +PACKAGE_BUILD=1 diff --git a/acinclude.m4 b/acinclude.m4 new file mode 100644 index 00000000..7632e5cc --- /dev/null +++ b/acinclude.m4 @@ -0,0 +1,34 @@ +dnl Copyright (C) 2016 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +AC_DEFUN([AC_PACKAGE_WANT_LINUX_FIEMAP_H], + [ AC_CHECK_HEADERS([linux/fiemap.h], [ have_fiemap=true ], [ have_fiemap=false ]) + AC_SUBST(have_fiemap) + ]) + +AC_DEFUN([AC_PACKAGE_WANT_LINUX_FS_H], + [ AC_CHECK_HEADER([linux/fs.h]) + ]) + +AC_DEFUN([AC_PACKAGE_WANT_FALLOCATE], + [ AC_MSG_CHECKING([for fallocate]) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ +#define _GNU_SOURCE +#define _FILE_OFFSET_BITS 64 +#include +#include ]], [[ fallocate(0, 0, 0, 0); ]])],[ have_fallocate=true; AC_MSG_RESULT(yes) ],[ have_fallocate=false; AC_MSG_RESULT(no) ]) + AC_SUBST(have_fallocate) + ]) + +AC_DEFUN([AC_PACKAGE_WANT_OPEN_BY_HANDLE_AT], + [ AC_MSG_CHECKING([for open_by_handle_at]) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ +#define _GNU_SOURCE +#include + ]], [[ + struct file_handle fh; + open_by_handle_at(0, &fh, 0); + ]])],[ have_open_by_handle_at=true; AC_MSG_RESULT(yes) ],[ have_open_by_handle_at=false; AC_MSG_RESULT(no) ]) + AC_SUBST(have_open_by_handle_at) + ]) diff --git a/build/Makefile b/build/Makefile new file mode 100644 index 00000000..ec04bed8 --- /dev/null +++ b/build/Makefile @@ -0,0 +1,49 @@ +# +# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. +# + +TOPDIR = .. +include $(TOPDIR)/include/builddefs + +MANIFEST=src-manifest +SRCTAR=$(PKG_NAME)-$(PKG_VERSION).src.tar.gz + +LDIRT = *-manifest *.gz $(TOPDIR)/$(PKG_NAME)-* + +# for clean and clobber +SUBDIRS = tar rpm + +# nothing to build here (it's all packaging) +default install install-dev install-lib: + +include $(BUILDRULES) + +# Symlink in the TOPDIR is used to pack files relative to +# product-version directory. +$(MANIFEST) : $(_FORCE) + @if [ ! -L $(TOPDIR)/$(PKG_NAME)-$(PKG_VERSION) ] ; then \ + $(LN_S) . $(TOPDIR)/$(PKG_NAME)-$(PKG_VERSION) ; \ + fi + @CDIR=`pwd`; cd $(TOPDIR); \ + $(MAKE) --no-print-directory source | \ + sed -e 's/^\./$(PKG_NAME)-$(PKG_VERSION)/' > $$CDIR/$@ ;\ + if [ $$? -ne 0 ] ; then \ + exit 1; \ + else \ + unset TAPE; \ + $(TAR) -T $$CDIR/$@ -cf - | $(ZIP) --best > $$CDIR/$(SRCTAR); \ + echo Wrote: $$CDIR/$(SRCTAR); \ + fi + +dist : default $(MANIFEST) + @DIST_MANIFEST=`pwd`/bin-manifest; DIST_ROOT=/tmp/$$$$; \ + export DIST_MANIFEST DIST_ROOT; \ + rm -f $$DIST_MANIFEST; \ + echo === install === && $(MAKE) -C $(TOPDIR) install || exit $$?; \ + if [ -x $(TAR) ]; then \ + ( echo "=== tar ===" && $(MAKEF) -C tar $@ || exit $$? ); \ + fi; \ + if [ -x $(RPMBUILD) ]; then \ + ( echo "=== rpm ===" && $(MAKEF) -C rpm $@ || exit $$? ); \ + fi; \ + test -z "$$KEEP_DIST_ROOT" || rm -rf $$DIST_ROOT; echo Done diff --git a/build/rpm/Makefile b/build/rpm/Makefile new file mode 100644 index 00000000..8bae7067 --- /dev/null +++ b/build/rpm/Makefile @@ -0,0 +1,62 @@ +# +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. +# + +TOPDIR = ../.. +TREEROOT = $(shell cd ${TOPDIR}; pwd) +include $(TOPDIR)/include/builddefs + +SPECF = $(PKG_NAME).spec +LDIRT = *.rpm $(SPECF) rpmmacros rpmfiles* rpm-*.rc + +LSRCFILES = macros.template $(SPECF).in rpm-2.rc.template + +default install install-dev install-lib: + +include $(BUILDRULES) + +# Generate a binary rpm file +dist : default $(SPECF) rpm-$(RPM_VERSION).rc + $(RPMBUILD) -ba --rcfile ./rpm-$(RPM_VERSION).rc $(SPECF) + +# Because rpm prior to v.2.90 does not support macros and old style config +# is not supported by rpm v.3, we have to resort to such ugly hacks +ifneq ($(RPM_VERSION),2) +rpm-$(RPM_VERSION).rc : rpmmacros + @$(SED) -e '/^macrofiles:/s|~/.rpmmacros|rpmmacros|' $@ + +rpmmacros : macros.template + @$(SED) -e 's|%topdir%|$(TREEROOT)|g' < $< > $@ +else +rpm-2.rc: rpm-2.rc.template + @$(SED) -e 's|%topdir%|$(TOPDIR)|g' < $< > $@ +endif + +# Generate the rpm specfile format file list from the install-sh manifest +rpmfiles rpmfiles-dev rpmfiles-lib: + $(SORT) -u $$DIST_MANIFEST | $(AWK) > $@ '\ +$$1 == "d" { printf ("%%%%dir %%%%attr(%s,%s,%s) %s\n", $$2, $$3, $$4, $$5); } \ +$$1 == "f" { if (match ($$6, "$(PKG_MAN_DIR)") || \ + match ($$6, "$(PKG_DOC_DIR)")) \ + printf ("%%%%doc "); \ + if (match ($$6, "$(PKG_MAN_DIR)")) \ + printf ("%%%%attr(%s,%s,%s) %s*\n", $$2, $$3, $$4, $$6); \ + else \ + printf ("%%%%attr(%s,%s,%s) %s\n", $$2, $$3, $$4, $$6); } \ +$$1 == "l" { if (match ($$3, "$(PKG_MAN_DIR)") || \ + match ($$3, "$(PKG_DOC_DIR)")) \ + printf ("%%%%doc "); \ + if (match ($$3, "$(PKG_MAN_DIR)")) \ + printf ("%%%%attr(0777,root,root) %s*\n", $$3); \ + else \ + printf ("%%%%attr(0777,root,root) %s\n", $$3); }' + +.PHONY: $(SPECF) +${SPECF} : ${SPECF}.in + $(SED) -e's|@pkg_name@|$(PKG_NAME)|g' \ + -e's|@pkg_version@|$(PKG_VERSION)|g' \ + -e's|@pkg_release@|$(PKG_RELEASE)|g' \ + -e's|@pkg_distribution@|$(PKG_DISTRIBUTION)|g' \ + -e's|@build_root@|$(DIST_ROOT)|g' \ + -e'/^BuildRoot: *$$/d' \ + -e's|@make@|$(MAKE)|g' < $< > $@ diff --git a/build/rpm/macros.template b/build/rpm/macros.template new file mode 100644 index 00000000..200ba39e --- /dev/null +++ b/build/rpm/macros.template @@ -0,0 +1,30 @@ +# +# rpmrc.template +# +# Template to fudge rpm directory structure inside IRIX-like build +# environment + +# Force 386 build on all platforms +%_target i386-pc-linux +%_target_cpu i386 +%_target_os linux + +# topdir == $(WORKAREA) +%_topdir %topdir% + +# Following directories are specific to the topdir +# This is where build is done. In our case it's the same as $WORKAREA +%_builddir %topdir% + +# This is where foo.1.99.tar.gz is living in the real world. +# Be careful not to run full rpm build as it will override the sources +%_sourcedir %topdir%/build + +# This is where binary RPM and source RPM would end up +%_rpmdir %topdir%/build/rpm +%_srcrpmdir %topdir%/build/rpm +%_specdir %topdir%/build/rpm + +# Leave RPM files in the same directory - we're not building for +# multiple architectures +%_rpmfilename %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm diff --git a/build/rpm/xfstests.spec.in b/build/rpm/xfstests.spec.in new file mode 100644 index 00000000..3dce41ef --- /dev/null +++ b/build/rpm/xfstests.spec.in @@ -0,0 +1,49 @@ +Summary: XFS regression test suite +Name: @pkg_name@ +Version: @pkg_version@ +Release: @pkg_release@ +Distribution: @pkg_distribution@ +Packager: Silicon Graphics, Inc. +BuildRoot: @build_root@ +BuildRequires: autoconf, xfsprogs-devel, e2fsprogs-devel +BuildREquires: libacl-devel, libaio-devel +Requires: bash, xfsprogs, xfsdump, perl, acl, attr, bind-utils +Requires: bc, indent, quota +Source: @pkg_name@-@pkg_version@.src.tar.gz +License: GPL2+ +Vendor: Silicon Graphics, Inc. +URL: http://oss.sgi.com/projects/xfs/ +Group: System Environment/Base + +%description +The XFS regression test suite. Also includes some support for +acl, attr, udf, nfs and afs testing. Contains around 200 specific tests +for userspace & kernelspace. + +%prep +if [ -f .census ] ; then + if [ ! -d ${RPM_PACKAGE_NAME}-${RPM_PACKAGE_VERSION} ] ; then + ln -s . ${RPM_PACKAGE_NAME}-${RPM_PACKAGE_VERSION} + fi +else +%setup +INSTALL_USER=root +INSTALL_GROUP=root +export INSTALL_USER INSTALL_GROUP +@make@ configure +fi + +%build +@make@ + +%install +DIST_ROOT="$RPM_BUILD_ROOT" +DIST_INSTALL=`pwd`/install.manifest +export DIST_ROOT DIST_INSTALL +@make@ install DIST_MANIFEST="$DIST_INSTALL" +@make@ -C build/rpm rpmfiles DIST_MANIFEST="$DIST_INSTALL" + +%clean +rm -rf $RPM_BUILD_ROOT + +%files -f build/rpm/rpmfiles diff --git a/build/tar/Makefile b/build/tar/Makefile new file mode 100644 index 00000000..94a3adaa --- /dev/null +++ b/build/tar/Makefile @@ -0,0 +1,22 @@ +# +# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. +# + +TOPDIR = ../.. +include $(TOPDIR)/include/builddefs + +BINTAR=$(PKG_NAME)-$(PKG_VERSION).tar.gz +LDIRT = *.gz + +default install install-dev install-lib: + +include $(BUILDRULES) + +dist : default + @HERE=`pwd`; cd $${DIST_ROOT:-/}; \ + $(SORT) -u $$HERE/../bin-manifest | $(AWK) ' \ + $$1 == "f" { printf (".%s\n", $$6); } \ + $$1 == "d" { next; } \ + $$1 == "l" { printf (".%s\n", $$3); }' \ + | $(TAR) -T - -cf - | $(ZIP) --best > $$HERE/$(BINTAR) + @echo Wrote: `pwd`/$(BINTAR) diff --git a/check b/check new file mode 100755 index 00000000..c897afbb --- /dev/null +++ b/check @@ -0,0 +1,1114 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved. +# +# Control script for QA +# +tmp=/tmp/$$ +status=0 +needwrap=true +needsum=true +try=() +sum_bad=0 +bad=() +notrun=() +interrupt=true +diff="diff -u" +showme=false +have_test_arg=false +randomize=false +exact_order=false +export here=`pwd` +xfile="" +subdir_xfile="" +brief_test_summary=false +do_report=false +DUMP_OUTPUT=false +iterations=1 +istop=false +loop_on_fail=0 +exclude_tests=() + +# This is a global variable used to pass test failure text to reporting gunk +_err_msg="" + +# start the initialisation work now +iam=check + +# mkfs.xfs uses the presence of both of these variables to enable formerly +# supported tiny filesystem configurations that fstests use for fuzz testing +# in a controlled environment +export MSGVERB="text:action" +export QA_CHECK_FS=${QA_CHECK_FS:=true} + +# number of diff lines from a failed test, 0 for whole output +export DIFF_LENGTH=${DIFF_LENGTH:=10} + +# by default don't output timestamps +timestamp=${TIMESTAMP:=false} +. common/exit +. common/test_names + +rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.report.* $tmp.arglist + +SRC_GROUPS="generic" +export SRC_DIR="tests" + +usage() +{ + echo "Usage: $0 [options] [testlist]"' + +check options + -nfs test NFS + -afs test AFS + -glusterfs test GlusterFS + -cifs test CIFS + -9p test 9p + -fuse test fuse + -virtiofs test virtiofs + -overlay test overlay + -pvfs2 test PVFS2 + -tmpfs test TMPFS + -ubifs test ubifs + -l line mode diff + -udiff show unified diff (default) + -n show me, do not run tests + -T output timestamps + -r randomize test order + --exact-order run tests in the exact order specified + -i iterate the test list times + -I iterate the test list times, but stops iterating further in case of any test failure + -d dump test output to stdout + -b brief test summary + -R fmt[,fmt] generate report in formats specified. Supported formats: xunit, xunit-quiet + --large-fs optimise scratch device for large filesystems + -s section run only specified section from config file + -S section exclude the specified section from the config file + -L loop tests times following a failure, measuring aggregate pass/fail metrics + +testlist options + -g group[,group...] include tests from these groups + -x group[,group...] exclude tests from these groups + -X exclude_file exclude individual tests + -e testlist exclude a specific list of tests + -E external_file exclude individual tests + [testlist] include tests matching names in testlist + +testlist argument is a list of tests in the form of /. + + is a directory under tests that contains a group file, +with a list of the names of the tests in that directory. + + may be either a specific test file name (e.g. xfs/001) or +a test file name match pattern (e.g. xfs/*). + +group argument is either a name of a tests group to collect from all +the test dirs (e.g. quick) or a name of a tests group to collect from +a specific tests dir in the form of / (e.g. xfs/quick). +If you want to run all the tests in the test suite, use "-g all" to specify all +groups. + +exclude_file argument refers to a name of a file inside each test directory. +for every test dir where this file is found, the listed test names are +excluded from the list of tests to run from that test dir. + +external_file argument is a path to a single file containing a list of tests +to exclude in the form of /. + +examples: + check xfs/001 + check -g quick + check -g xfs/quick + check -x stress xfs/* + check -X .exclude -g auto + check -E ~/.xfstests.exclude +' + _fatal +} + +get_sub_group_list() +{ + local d=$1 + local grp=$2 + + test -s "$SRC_DIR/$d/group.list" || return 1 + + local grpl=$(sed -n < $SRC_DIR/$d/group.list \ + -e 's/#.*//' \ + -e 's/$/ /' \ + -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p") + echo $grpl +} + +get_group_list() +{ + local grp=$1 + local grpl="" + local sub=$(dirname $grp) + local fsgroup="$FSTYP" + + if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then + # group is given as / (e.g. xfs/quick) + grp=$(basename $grp) + get_sub_group_list $sub $grp + return + fi + + if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then + fsgroup=ext4 + fi + for d in $SRC_GROUPS $fsgroup; do + if ! test -d "$SRC_DIR/$d" ; then + continue + fi + grpl="$grpl $(get_sub_group_list $d $grp)" + done + echo $grpl +} + +# Find all tests, excluding files that are test metadata such as group files. +# It matches test names against $VALID_TEST_NAME defined in common/rc +get_all_tests() +{ + touch $tmp.list + for d in $SRC_GROUPS $FSTYP; do + if ! test -d "$SRC_DIR/$d" ; then + continue + fi + ls $SRC_DIR/$d/* | \ + grep -v "\..*" | \ + grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \ + grep -v "group\|Makefile" >> $tmp.list 2>/dev/null + done +} + +# takes the list of tests to run in $tmp.list, and removes the tests passed to +# the function from that list. +trim_test_list() +{ + local test_list="$*" + + rm -f $tmp.grep + local numsed=0 + for t in $test_list + do + if [ $numsed -gt 100 ]; then + grep -v -f $tmp.grep <$tmp.list >$tmp.tmp + mv $tmp.tmp $tmp.list + numsed=0 + rm -f $tmp.grep + fi + echo "^$t\$" >>$tmp.grep + numsed=`expr $numsed + 1` + done + grep -v -f $tmp.grep <$tmp.list >$tmp.tmp + mv $tmp.tmp $tmp.list + rm -f $tmp.grep +} + +_timestamp() +{ + local now=`date "+%T"` + echo -n " [$now]" +} + +_prepare_test_list() +{ + unset list + # Tests specified on the command line + if [ -s $tmp.arglist ]; then + cat $tmp.arglist > $tmp.list + else + touch $tmp.list + fi + + # Specified groups to include + # Note that the CLI processing adds a leading space to the first group + # parameter, so we have to catch that here checking for "all" + if ! $have_test_arg && [ "$GROUP_LIST" == " all" ]; then + # no test numbers, do everything + get_all_tests + else + for group in $GROUP_LIST; do + list=$(get_group_list $group) + if [ -z "$list" ]; then + _fatal "Group \"$group\" is empty or not defined?" + fi + + for t in $list; do + grep -s "^$t\$" $tmp.list >/dev/null || \ + echo "$t" >>$tmp.list + done + done + fi + + # Specified groups to exclude + for xgroup in $XGROUP_LIST; do + list=$(get_group_list $xgroup) + if [ -z "$list" ]; then + echo "Group \"$xgroup\" is empty or not defined?" + continue + fi + + trim_test_list $list + done + + # sort the list of tests into numeric order unless we're running tests + # in the exact order specified + if ! $exact_order; then + if $randomize; then + if type shuf >& /dev/null; then + sorter="shuf" + else + sorter="awk -v seed=$RANDOM -f randomize.awk" + fi + else + sorter="cat" + fi + list=`sort -n $tmp.list | uniq | $sorter` + else + list=`cat $tmp.list` + fi + rm -f $tmp.list +} + +# Process command arguments first. +while [ $# -gt 0 ]; do + case "$1" in + -\? | -h | --help) usage ;; + + -nfs|-afs|-glusterfs|-cifs|-9p|-fuse|-virtiofs|-pvfs2|-tmpfs|-ubifs) + FSTYP="${1:1}" + ;; + -overlay) + [ "$FSTYP" == overlay ] || export OVL_BASE_FSTYP="$FSTYP" + FSTYP=overlay + export OVERLAY=true + ;; + + -g) group=$2 ; shift ; + GROUP_LIST="$GROUP_LIST ${group//,/ }" + ;; + + -x) xgroup=$2 ; shift ; + XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }" + ;; + + -X) subdir_xfile=$2; shift ; + ;; + -e) + xfile=$2; shift ; + readarray -t -O "${#exclude_tests[@]}" exclude_tests < \ + <(echo "$xfile" | tr ', ' '\n\n') + ;; + + -E) xfile=$2; shift ; + if [ -f $xfile ]; then + readarray -t -O ${#exclude_tests[@]} exclude_tests < \ + <(sed "s/#.*$//" $xfile) + fi + ;; + -s) RUN_SECTION="$RUN_SECTION $2"; shift ;; + -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;; + -l) diff="diff" ;; + -udiff) diff="$diff -u" ;; + + -n) showme=true ;; + -r) + if $exact_order; then + _fatal "Cannot specify -r and --exact-order." + fi + randomize=true + ;; + --exact-order) + if $randomize; then + _fatal "Cannnot specify --exact-order and -r." + fi + exact_order=true + ;; + -i) iterations=$2; shift ;; + -I) iterations=$2; istop=true; shift ;; + -T) timestamp=true ;; + -d) DUMP_OUTPUT=true ;; + -b) brief_test_summary=true;; + -R) report_fmt=$2 ; shift ; + REPORT_LIST="$REPORT_LIST ${report_fmt//,/ }" + do_report=true + ;; + --large-fs) export LARGE_SCRATCH_DEV=yes ;; + --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;; + -L) [[ $2 =~ ^[0-9]+$ ]] || usage + loop_on_fail=$2; shift + ;; + + -*) usage ;; + *) # not an argument, we've got tests now. + have_test_arg=true ;; + esac + + # if we've found a test specification, the break out of the processing + # loop before we shift the arguments so that this is the first argument + # that we process in the test arg loop below. + if $have_test_arg; then + break; + fi + + shift +done + +# we need common/rc, that also sources common/config. We need to source it +# after processing args, overlay needs FSTYP set before sourcing common/config +if ! . ./common/rc; then + _fatal "check: failed to source common/rc" +fi + +init_rc + +# If the test config specified a soak test duration, see if there are any +# unit suffixes that need converting to an integer seconds count. +if [ -n "$SOAK_DURATION" ]; then + SOAK_DURATION="$(echo "$SOAK_DURATION" | \ + sed -e 's/^\([.0-9]*\)\([a-z]\)*/\1 \2/g' | \ + $AWK_PROG -f $here/src/soak_duration.awk)" + if [ $? -ne 0 ]; then + _fatal + fi +fi + +# If the test config specified a fuzz rewrite test duration, see if there are +# any unit suffixes that need converting to an integer seconds count. +if [ -n "$FUZZ_REWRITE_DURATION" ]; then + FUZZ_REWRITE_DURATION="$(echo "$FUZZ_REWRITE_DURATION" | \ + sed -e 's/^\([.0-9]*\)\([a-z]\)*/\1 \2/g' | \ + $AWK_PROG -f $here/src/soak_duration.awk)" + if [ $? -ne 0 ]; then + _fatal + fi +fi + +if [ -n "$subdir_xfile" ]; then + for d in $SRC_GROUPS $FSTYP; do + [ -f $SRC_DIR/$d/$subdir_xfile ] || continue + for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do + exclude_tests+=($d/$f) + done + done +fi + +# Process tests from command line now. +if $have_test_arg; then + while [ $# -gt 0 ]; do + case "$1" in + -*) _fatal "Arguments before tests, please!" + ;; + *) # Expand test pattern (e.g. xfs/???, *fs/001) + list=$(cd $SRC_DIR; echo $1) + for t in $list; do + t=${t#$SRC_DIR/} + test_dir=${t%%/*} + test_name=${t##*/} + group_file=$SRC_DIR/$test_dir/group.list + + if grep -Eq "^$test_name" $group_file; then + # in group file ... OK + echo $SRC_DIR/$test_dir/$test_name \ + >>$tmp.arglist + else + # oops + echo "$t - unknown test, ignored" + fi + done + ;; + esac + + shift + done +elif [ -z "$GROUP_LIST" ]; then + # default group list is the auto group. If any other group or test is + # specified, we use that instead. + GROUP_LIST="auto" +fi + +if [ `id -u` -ne 0 ] +then + _fatal "check: QA must be run as root" +fi + +_wipe_counters() +{ + try=() + notrun=() + bad=() +} + +_global_log() { + echo "$1" >> $check.log + if $OPTIONS_HAVE_SECTIONS; then + echo "$1" >> ${REPORT_DIR}/check.log + fi +} + +if [ -n "$REPORT_GCOV" ]; then + . ./common/gcov + _gcov_check_report_gcov +fi + +_wrapup() +{ + seq="check.$$" + check="$RESULT_BASE/check" + $interrupt && sect_stop=`_wallclock` + + if $showme && $needwrap; then + if $do_report; then + # $showme = all selected tests are notrun (no tries) + _make_section_report "$section" "${#notrun[*]}" "0" \ + "${#notrun[*]}" \ + "$((sect_stop - sect_start))" + fi + needwrap=false + elif $needwrap; then + if [ -f $check.time -a -f $tmp.time ]; then + cat $check.time $tmp.time \ + | $AWK_PROG ' + { t[$1] = $2 } + END { + if (NR > 0) { + for (i in t) print i " " t[i] + } + }' \ + | sort -n >$tmp.out + mv $tmp.out $check.time + if $OPTIONS_HAVE_SECTIONS; then + cp $check.time ${REPORT_DIR}/check.time + fi + fi + + _global_log "" + _global_log "Kernel version: $(uname -r)" + _global_log "$(date)" + + echo "SECTION -- $section" >>$tmp.summary + echo "=========================" >>$tmp.summary + if ((${#try[*]} > 0)); then + if [ $brief_test_summary == "false" ]; then + echo "Ran: ${try[*]}" + echo "Ran: ${try[*]}" >>$tmp.summary + fi + _global_log "Ran: ${try[*]}" + fi + + $interrupt && echo "Interrupted!" | tee -a $check.log + if $OPTIONS_HAVE_SECTIONS; then + $interrupt && echo "Interrupted!" | tee -a \ + ${REPORT_DIR}/check.log + fi + + if ((${#notrun[*]} > 0)); then + if [ $brief_test_summary == "false" ]; then + echo "Not run: ${notrun[*]}" + echo "Not run: ${notrun[*]}" >>$tmp.summary + fi + _global_log "Not run: ${notrun[*]}" + fi + + if ((${#bad[*]} > 0)); then + echo "Failures: ${bad[*]}" + echo "Failed ${#bad[*]} of ${#try[*]} tests" + _global_log "Failures: ${bad[*]}" + _global_log "Failed ${#bad[*]} of ${#try[*]} tests" + echo "Failures: ${bad[*]}" >>$tmp.summary + echo "Failed ${#bad[*]} of ${#try[*]} tests" >>$tmp.summary + else + echo "Passed all ${#try[*]} tests" + _global_log "Passed all ${#try[*]} tests" + echo "Passed all ${#try[*]} tests" >>$tmp.summary + fi + echo "" >>$tmp.summary + if $do_report; then + _make_section_report "$section" "${#try[*]}" \ + "${#bad[*]}" "${#notrun[*]}" \ + "$((sect_stop - sect_start))" + fi + + # Generate code coverage report + if [ -n "$REPORT_GCOV" ]; then + # don't trigger multiple times if caller hits ^C + local gcov_report_dir="$REPORT_GCOV" + test "$gcov_report_dir" = "1" && \ + gcov_report_dir="$REPORT_DIR/gcov" + unset REPORT_GCOV + + _gcov_generate_report "$gcov_report_dir" + fi + + needwrap=false + fi + + sum_bad=`expr $sum_bad + ${#bad[*]}` + _wipe_counters + if ! $OPTIONS_HAVE_SECTIONS; then + rm -f $tmp.* + fi +} + +_summary() +{ + _wrapup + if $showme; then + : + elif $needsum; then + count=`wc -L $tmp.summary | cut -f1 -d" "` + cat $tmp.summary + needsum=false + fi + rm -f $tmp.* +} + +_check_filesystems() +{ + local ret=0 + + if [ -f ${RESULT_DIR}/require_test ]; then + if ! _check_test_fs ; then + ret=1 + echo "Trying to repair broken TEST_DEV file system" + _repair_test_fs + _test_mount + fi + rm -f ${RESULT_DIR}/require_test* + else + _test_unmount 2> /dev/null + fi + if [ -f ${RESULT_DIR}/require_scratch ]; then + _check_scratch_fs || ret=1 + rm -f ${RESULT_DIR}/require_scratch* + fi + _scratch_unmount 2> /dev/null + return $ret +} + +_expunge_test() +{ + local TEST_ID="$1" + + for f in "${exclude_tests[@]}"; do + # $f may contain traling spaces and comments + local id_regex="^${TEST_ID}\b" + if [[ "$f" =~ ${id_regex} ]]; then + echo " [expunged]" + return 0 + fi + done + return 1 +} + +# retain files which would be overwritten in subsequent reruns of the same test +_stash_fail_loop_files() { + local seq_prefix="${REPORT_DIR}/${1}" + local cp_suffix="$2" + + for i in ".full" ".dmesg" ".out.bad" ".notrun" ".core" ".hints"; do + rm -f "${seq_prefix}${i}${cp_suffix}" + if [ -f "${seq_prefix}${i}" ]; then + cp "${seq_prefix}${i}" "${seq_prefix}${i}${cp_suffix}" + fi + done +} + +# Retain in @bad / @notrun the result of the just-run @test_seq. @try array +# entries are added prior to execution. +_stash_test_status() { + local test_seq="$1" + local test_status="$2" + + if $do_report && [[ $test_status != "expunge" ]]; then + _make_testcase_report "$section" "$test_seq" \ + "$test_status" "$((stop - start))" + fi + + if ((${#loop_status[*]} > 0)); then + # continuing or completing rerun-on-failure loop + _stash_fail_loop_files "$test_seq" ".rerun${#loop_status[*]}" + loop_status+=("$test_status") + if ((${#loop_status[*]} > loop_on_fail)); then + printf "%s aggregate results across %d runs: " \ + "$test_seq" "${#loop_status[*]}" + awk "BEGIN { + n=split(\"${loop_status[*]}\", arr);"' + for (i = 1; i <= n; i++) + stats[arr[i]]++; + for (x in stats) + printf("%s=%d (%.1f%%)", + (i-- > n ? x : ", " x), + stats[x], 100 * stats[x] / n); + }' + echo + loop_status=() + fi + return # only stash @bad result for initial failure in loop + fi + + case "$test_status" in + fail) + if ((loop_on_fail > 0)); then + # initial failure, start rerun-on-failure loop + _stash_fail_loop_files "$test_seq" ".rerun0" + loop_status+=("$test_status") + fi + bad+=("$test_seq") + ;; + list|notrun) + notrun+=("$test_seq") + ;; + pass|expunge) + ;; + *) + echo "Unexpected test $test_seq status: $test_status" + ;; + esac +} + +# Figure out the maximum test name length, e.g. "generic/1212" => 12 +max_test_namelen=$(ls "$SRC_DIR"/*/* | \ + awk 'BEGIN {x = 0} /\/[0-9]*$/ {l = length($0) - 6; if (l > x) x = l;} END {print x}') + +# Can we run systemd scopes? +HAVE_SYSTEMD_SCOPES= +systemctl reset-failed "fstests-check" &>/dev/null +systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null +test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes + +# Make the check script unattractive to the OOM killer... +OOM_SCORE_ADJ="/proc/self/oom_score_adj" +function _adjust_oom_score() { + test -w "${OOM_SCORE_ADJ}" && echo "$1" > "${OOM_SCORE_ADJ}" +} +_adjust_oom_score -500 + +# ...and make the tests themselves somewhat more attractive to it, so that if +# the system runs out of memory it'll be the test that gets killed and not the +# test framework. The test is run in a separate process without any of our +# functions, so we open-code adjusting the OOM score. +# +# If systemd is available, run the entire test script in a scope so that we can +# kill all subprocesses of the test if it fails to clean up after itself. This +# is essential for ensuring that the post-test unmount succeeds. Note that +# systemd doesn't automatically remove transient scopes that fail to terminate +# when systemd tells them to terminate (e.g. programs stuck in D state when +# systemd sends SIGKILL), so we use reset-failed to tear down the scope. +_run_seq() { + local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq") + local res + + if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then + local unit="$(systemd-escape "fs$seq").scope" + systemctl reset-failed "${unit}" &> /dev/null + systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}" + res=$? + systemctl stop "${unit}" &> /dev/null + return "${res}" + else + "${cmd[@]}" + fi +} + +_detect_kmemleak +_prepare_test_list +fstests_start_time="$(date +"%F %T")" + +# We are not using _exit in the trap handler so that it is obvious to the reader +# that we are using the last set value of "status" before we finally exit +# from the check script. +if $OPTIONS_HAVE_SECTIONS; then + trap "_summary; exit \$status" 0 1 2 3 15 +else + trap "_wrapup; exit \$status" 0 1 2 3 15 +fi + +function run_section() +{ + local section=$1 skip + + OLD_FSTYP=$FSTYP + OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS + + # Do we need to run only some sections ? + if [ ! -z "$RUN_SECTION" ]; then + skip=true + for s in $RUN_SECTION; do + if [ $section == $s ]; then + skip=false + break; + fi + done + if $skip; then + return + fi + fi + + # Did this section get excluded? + if [ ! -z "$EXCLUDE_SECTION" ]; then + skip=false + for s in $EXCLUDE_SECTION; do + if [ $section == $s ]; then + skip=true + break; + fi + done + if $skip; then + return + fi + fi + + get_next_config $section + _canonicalize_devices + + mkdir -p $RESULT_BASE + if [ ! -d $RESULT_BASE ]; then + _fatal "failed to create results directory $RESULT_BASE" + fi + + if $OPTIONS_HAVE_SECTIONS; then + echo "SECTION -- $section" + fi + + sect_start=`_wallclock` + if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then + echo "RECREATING -- $FSTYP on $TEST_DEV" + _test_unmount 2> /dev/null + if ! _test_mkfs >$tmp.err 2>&1 + then + echo "our local _test_mkfs routine ..." + cat $tmp.err + _fatal "check: failed to mkfs \$TEST_DEV using specified options" + fi + # Previous FSTYP derived from TEST_DEV could be changed, source + # common/rc again with correct FSTYP to get FSTYP specific configs, + # e.g. common/xfs + . common/rc + _prepare_test_list + elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then + # Unmount TEST_DEV to apply the updated mount options. + # It will be mounted again by init_rc(), called shortly after. + _test_unmount 2> /dev/null + fi + + init_rc + + seq="check.$$" + check="$RESULT_BASE/check" + seqres="$check" + + # don't leave old full output behind on a clean run + rm -f $check.full + + [ -f $check.time ] || touch $check.time + + # print out our test configuration + echo "FSTYP -- `_full_fstyp_details`" + echo "PLATFORM -- `_full_platform_details`" + if [ ! -z "$SCRATCH_DEV" ]; then + echo "MKFS_OPTIONS -- `_scratch_mkfs_options`" + echo "MOUNT_OPTIONS -- `_scratch_mount_options`" + fi + echo + test -n "$REPORT_GCOV" && _gcov_reset + needwrap=true + + if [ ! -z "$SCRATCH_DEV" ]; then + _scratch_unmount 2> /dev/null + # call the overridden mkfs - make sure the FS is built + # the same as we'll create it later. + + if ! _scratch_mkfs >$tmp.err 2>&1 + then + echo "our local _scratch_mkfs routine ..." + cat $tmp.err + _fatal "check: failed to mkfs \$SCRATCH_DEV using specified options" + fi + + # call the overridden mount - make sure the FS mounts with + # the same options that we'll mount with later. + if ! _try_scratch_mount >$tmp.err 2>&1 + then + echo "our local mount routine ..." + cat $tmp.err + _fatal "check: failed to mount \$SCRATCH_DEV using specified options" + else + _scratch_unmount + fi + fi + + _check_test_fs + + loop_status=() # track rerun-on-failure state + local tc_status ix + local -a _list=( $list ) + for ((ix = 0; ix < ${#_list[*]}; !${#loop_status[*]} && ix++)); do + seq="${_list[$ix]}" + + if [ ! -f $seq ]; then + # Try to get full name in case the user supplied only + # seq id and the test has a name. A bit of hassle to + # find really the test and not its sample output or + # helping files. + bname=$(basename $seq) + full_seq=$(find $(dirname $seq) -name $bname* -executable | + awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\ + END { print shortest }') + if [ -f $full_seq ] && \ + [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then + seq=$full_seq + fi + fi + + # the filename for the test and the name output are different. + # we don't include the tests/ directory in the name output. + export seqnum=${seq#$SRC_DIR/} + group=${seqnum%%/*} + if $OPTIONS_HAVE_SECTIONS; then + REPORT_DIR="$RESULT_BASE/$section" + else + REPORT_DIR="$RESULT_BASE" + fi + export RESULT_DIR="$REPORT_DIR/$group" + seqres="$REPORT_DIR/$seqnum" + + # Generate the entire section report with whatever test results + # we have so far. Leave the $sect_time parameter empty so that + # it's a little more obvious that this test run is incomplete. + if $do_report; then + _make_section_report "$section" "${#try[*]}" \ + "${#bad[*]}" "${#notrun[*]}" \ + "" &> /dev/null + fi + + # Print test name and leave the cursor at a consistent column + # number for later reporting of test outcome. + printf "%-*s" "$max_test_namelen" "$seqnum" + + if $showme; then + if _expunge_test $seqnum; then + tc_status="expunge" + else + echo + start=0 + stop=0 + tc_status="list" + fi + _stash_test_status "$seqnum" "$tc_status" + continue + fi + + tc_status="pass" + if [ ! -f $seq ]; then + echo " - no such test?" + _stash_test_status "$seqnum" "$tc_status" + continue + fi + + # really going to try and run this one + mkdir -p $RESULT_DIR + rm -f ${RESULT_DIR}/require_scratch* + rm -f ${RESULT_DIR}/require_test* + rm -f $seqres.out.bad $seqres.hints + + # check if we really should run it + if _expunge_test $seqnum; then + tc_status="expunge" + _stash_test_status "$seqnum" "$tc_status" + continue + fi + + # record that we really tried to run this test. + if ((!${#loop_status[*]})); then + try+=("$seqnum") + fi + + awk 'BEGIN {lasttime=" "} \ + $1 == "'$seqnum'" {lasttime=" " $2 "s ... "; exit} \ + END {printf "%s", lasttime}' "$check.time" + rm -f core $seqres.notrun + _start_coredumpctl_collection + + start=`_wallclock` + $timestamp && _timestamp + [ ! -x $seq ] && chmod u+x $seq # ensure we can run it + $LOGGER_PROG "run xfstest $seqnum" + if [ -w /dev/kmsg ]; then + export date_time=`date +"%F %T"` + echo "run fstests $seqnum at $date_time" > /dev/kmsg + # _check_dmesg depends on this log in dmesg + touch ${RESULT_DIR}/check_dmesg + rm -f ${RESULT_DIR}/dmesg_filter + fi + _try_wipe_scratch_devs > /dev/null 2>&1 + + # clear the WARN_ONCE state to allow a potential problem + # to be reported for each test + (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1 + + test_start_time="$(date +"%F %T")" + if [ "$DUMP_OUTPUT" = true ]; then + _run_seq 2>&1 | tee $tmp.out + # Because $? would get tee's return code + sts=${PIPESTATUS[0]} + else + _run_seq >$tmp.out 2>&1 + sts=$? + fi + + # If someone sets kernel.core_pattern or kernel.core_uses_pid, + # coredumps generated by fstests might have a longer name than + # just "core". Use globbing to find the most common patterns, + # assuming there are no other coredump capture packages set up. + local cores=0 + _finish_coredumpctl_collection + for i in core core.*; do + test -f "$i" || continue + if ((cores++ == 0)); then + _dump_err_cont "[dumped core]" + fi + (_adjust_oom_score 250; _save_coredump "$i") + tc_status="fail" + done + + if [ -f $seqres.notrun ]; then + $timestamp && _timestamp + stop=`_wallclock` + $timestamp || echo -n "[not run] " + $timestamp && echo " [not run]" && \ + echo -n " $seqnum -- " + cat $seqres.notrun + tc_status="notrun" + _stash_test_status "$seqnum" "$tc_status" + + # Unmount the scratch fs so that we can wipe the scratch + # dev state prior to the next test run. + _scratch_unmount 2> /dev/null + continue; + fi + + if [ $sts -ne 0 ]; then + _dump_err_cont "[failed, exit status $sts]" + _test_unmount 2> /dev/null + _scratch_unmount 2> /dev/null + rm -f ${RESULT_DIR}/require_scratch* + + # Make sure the test filesystem is ready to go since + # we don't call _check_filesystems for failed tests + (_adjust_oom_score 250; _check_filesystems) || tc_status="fail" + + rm -f ${RESULT_DIR}/require_test* + # Even though we failed, there may be something interesting in + # dmesg which can help debugging. + _check_dmesg + tc_status="fail" + else + # The test apparently passed, so check for corruption + # and log messages that shouldn't be there. Run the + # checking tools from a subshell with adjusted OOM + # score so that the OOM killer will target them instead + # of the check script itself. + (_adjust_oom_score 250; _check_filesystems) || tc_status="fail" + _check_dmesg || tc_status="fail" + + # Save any coredumps from the post-test fs checks + for i in core core.*; do + test -f "$i" || continue + if ((cores++ == 0)); then + _dump_err_cont "[dumped core]" + fi + (_adjust_oom_score 250; _save_coredump "$i") + tc_status="fail" + done + fi + + # Reload the module after each test to check for leaks or + # other problems. + if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then + _test_unmount 2> /dev/null + _scratch_unmount 2> /dev/null + modprobe -r fs-$FSTYP + modprobe fs-$FSTYP + fi + + # Scan for memory leaks after every test so that associating + # a leak to a particular test will be as accurate as possible. + _check_kmemleak || tc_status="fail" + + # test ends after all checks are done. + $timestamp && _timestamp + stop=`_wallclock` + + if [ ! -f $seq.out ]; then + _dump_err "no qualified output" + tc_status="fail" + _stash_test_status "$seqnum" "$tc_status" + continue; + fi + + # coreutils 8.16+ changed quote formats in error messages + # from `foo' to 'foo'. Filter old versions to match the new + # version. + sed -i "s/\`/\'/g" $tmp.out + if diff $seq.out $tmp.out >/dev/null 2>&1 ; then + if [ "$tc_status" != "fail" ]; then + echo "$seqnum `expr $stop - $start`" >>$tmp.time + echo -n " `expr $stop - $start`s" + fi + echo "" + else + _dump_err "- output mismatch (see $seqres.out.bad)" + mv $tmp.out $seqres.out.bad + $diff $seq.out $seqres.out.bad | { + if test "$DIFF_LENGTH" -le 0; then + cat + else + head -n "$DIFF_LENGTH" + echo "..." + echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \ + " to see the entire diff)" + fi; } | sed -e 's/^\(.\)/ \1/' + tc_status="fail" + fi + if [ -f $seqres.hints ]; then + if [ "$tc_status" == "fail" ]; then + echo + cat $seqres.hints + else + rm -f $seqres.hints + fi + fi + _stash_test_status "$seqnum" "$tc_status" + done + + # Reset these three variables so that unmount output doesn't get + # written to $seqres.full of the last test to run. + seq="check.$$" + check="$RESULT_BASE/check" + seqres="$check" + + sect_stop=`_wallclock` + interrupt=false + _wrapup + interrupt=true + echo + + _test_unmount 2> /dev/null + _scratch_unmount 2> /dev/null +} + +for ((iters = 0; iters < $iterations; iters++)) do + for section in $HOST_OPTIONS_SECTIONS; do + run_section $section + if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then + interrupt=false + _exit `expr $sum_bad != 0` + fi + done +done + +interrupt=false +_exit `expr $sum_bad != 0` diff --git a/check-parallel b/check-parallel new file mode 100755 index 00000000..c8543725 --- /dev/null +++ b/check-parallel @@ -0,0 +1,205 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2024 Red Hat, Inc. All Rights Reserved. +# +# Run all tests in parallel +# +# This is a massive resource bomb script. For every test, it creates a +# pair of sparse loop devices for test and scratch devices, then mount points +# for them and runs the test in the background. When it completes, it tears down +# the loop devices. + +export SRC_DIR="tests" +basedir=$1 +shift +check_args="$*" +runners=64 +runner_list=() +runtimes=() + + +# tests in auto group +test_list=$(awk '/^[0-9].*auto/ { print "generic/" $1 }' tests/generic/group.list) +test_list+=$(awk '/^[0-9].*auto/ { print "xfs/" $1 }' tests/xfs/group.list) + +# grab all previously run tests and order them from highest runtime to lowest +# We are going to try to run the longer tests first, hopefully so we can avoid +# massive thundering herds trying to run lots of really short tests in parallel +# right off the bat. This will also tend to vary the order of tests from run to +# run somewhat. +# +# If we have tests in the test list that don't have runtimes recorded, then +# append them to be run last. + +build_runner_list() +{ + local runtimes + local run_list=() + local prev_results=`ls -tr $basedir/runner-0/ | grep results | tail -1` + + runtimes=$(cat $basedir/*/$prev_results/check.time | sort -k 2 -nr | cut -d " " -f 1) + + # Iterate the timed list first. For every timed list entry that + # is found in the test_list, add it to the local runner list. + local -a _list=( $runtimes ) + local -a _tlist=( $test_list ) + local rx=0 + local ix + local jx + #set -x + for ((ix = 0; ix < ${#_list[*]}; ix++)); do + echo $test_list | grep -q ${_list[$ix]} + if [ $? == 0 ]; then + # add the test to the new run list and remove + # it from the remaining test list. + run_list[rx++]=${_list[$ix]} + _tlist=( ${_tlist[*]/${_list[$ix]}/} ) + fi + + done + + # The final test list is all the time ordered tests followed by + # all the tests we didn't find time records for. + test_list="${run_list[*]} ${_tlist[*]}" +} + +if [ -f $basedir/runner-0/results/check.time ]; then + build_runner_list +fi + +# split the list amongst N runners + +split_runner_list() +{ + local ix + local rx + local -a _list=( $test_list ) + for ((ix = 0; ix < ${#_list[*]}; ix++)); do + seq="${_list[$ix]}" + rx=$((ix % $runners)) + runner_list[$rx]+="${_list[$ix]} " + #echo $seq + done +} + +_create_loop_device() +{ + local file=$1 dev + + dev=`losetup -f --show $file` || _fail "Cannot assign $file to a loop device" + + # Using buffered IO for the loop devices seems to run quite a bit + # faster. There are a lot of tests that hit the same regions of the + # filesystems, so avoiding read IO seems to really help. Results can + # vary, though, because many tests drop all caches unconditionally. + # Uncomment to use AIO+DIO loop devices instead. + #test -b "$dev" && losetup --direct-io=on $dev 2> /dev/null + + echo $dev +} + +_destroy_loop_device() +{ + local dev=$1 + blockdev --flushbufs $dev + umount $dev > /dev/null 2>&1 + losetup -d $dev || _fail "Cannot destroy loop device $dev" +} + +runner_go() +{ + local id=$1 + local me=$basedir/runner-$id + local _test=$me/test.img + local _scratch=$me/scratch.img + local _results=$me/results-$2 + + mkdir -p $me + + xfs_io -f -c 'truncate 2g' $_test + xfs_io -f -c 'truncate 8g' $_scratch + + mkfs.xfs -f $_test > /dev/null 2>&1 + + export TEST_DEV=$(_create_loop_device $_test) + export TEST_DIR=$me/test + export SCRATCH_DEV=$(_create_loop_device $_scratch) + export SCRATCH_MNT=$me/scratch + export FSTYP=xfs + export RESULT_BASE=$_results + + mkdir -p $TEST_DIR + mkdir -p $SCRATCH_MNT + mkdir -p $RESULT_BASE + rm -f $RESULT_BASE/check.* + +# export DUMP_CORRUPT_FS=1 + + # Run the tests in it's own mount namespace, as per the comment below + # that precedes making the basedir a private mount. + ./src/nsexec -m ./check $check_args -x unreliable_in_parallel --exact-order ${runner_list[$id]} > $me/log 2>&1 + + wait + sleep 1 + umount -R $TEST_DIR 2> /dev/null + umount -R $SCRATCH_MNT 2> /dev/null + _destroy_loop_device $TEST_DEV + _destroy_loop_device $SCRATCH_DEV + + grep -q Failures: $me/log + if [ $? -eq 0 ]; then + echo -n "Runner $id Failures: " + grep Failures: $me/log | uniq | sed -e "s/^.*Failures://" + fi + +} + +cleanup() +{ + killall -INT -q check + wait + umount -R $basedir/*/test 2> /dev/null + umount -R $basedir/*/scratch 2> /dev/null + losetup --detach-all +} + +trap "cleanup; exit" HUP INT QUIT TERM + + +# Each parallel test runner needs to only see it's own mount points. If we +# leave the basedir as shared, then all tests see all mounts and then we get +# mount propagation issues cropping up. For example, cloning a new mount +# namespace will take a reference to all visible shared mounts and hold them +# while the mount names space is active. This can cause unmount in the test that +# controls the mount to succeed without actually unmounting the filesytsem +# because a mount namespace still holds a reference to it. This causes other +# operations on the block device to fail as it is still busy (e.g. fsck, mkfs, +# etc). Hence we make the basedir private here and then run each check instance +# in it's own mount namespace so that they cannot see mounts that other tests +# are performing. +mount --make-private $basedir +split_runner_list +now=`date +%Y-%m-%d-%H:%M:%S` +for ((i = 0; i < $runners; i++)); do + + runner_go $i $now & + +done; +wait + +echo -n "Tests run: " +grep Ran /mnt/xfs/*/log | sed -e 's,^.*:,,' -e 's, ,\n,g' | sort | uniq | wc -l + +echo -n "Failure count: " +grep Failures: $basedir/*/log | uniq | sed -e "s/^.*Failures://" -e "s,\([0-9]\) \([gx]\),\1\n \2,g" |wc -l +echo + +echo Ten slowest tests - runtime in seconds: +cat $basedir/*/results/check.time | sort -k 2 -nr | head -10 + +echo +echo Cleanup on Aisle 5? +echo +losetup --list +ls -l /dev/mapper +df -h |grep xfs diff --git a/common/Makefile b/common/Makefile new file mode 100644 index 00000000..5f91e8c3 --- /dev/null +++ b/common/Makefile @@ -0,0 +1,16 @@ +# +# Copyright (c) 2003-2006 Silicon Graphics, Inc. All Rights Reserved. +# + +TOPDIR = .. +include $(TOPDIR)/include/builddefs + +COMMON_DIR = common + +include $(BUILDRULES) + +install: + $(INSTALL) -m 755 -d $(PKG_LIB_DIR)/$(COMMON_DIR) + $(INSTALL) -m 644 * $(PKG_LIB_DIR)/$(COMMON_DIR) + +install-dev install-lib: diff --git a/common/atomicwrites b/common/atomicwrites new file mode 100644 index 00000000..bbcc4e7c --- /dev/null +++ b/common/atomicwrites @@ -0,0 +1,157 @@ +##/bin/bash +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2025 Oracle. All Rights Reserved. +# +# Routines for testing atomic writes. + +export STATX_WRITE_ATOMIC=0x10000 + +_get_atomic_write_unit_min() +{ + $XFS_IO_PROG -c "statx -r -m $STATX_WRITE_ATOMIC" $1 | \ + grep atomic_write_unit_min | grep -o '[0-9]\+' +} + +_get_atomic_write_unit_max() +{ + $XFS_IO_PROG -c "statx -r -m $STATX_WRITE_ATOMIC" $1 | \ + grep -w atomic_write_unit_max | grep -o '[0-9]\+' +} + +_get_atomic_write_unit_max_opt() +{ + $XFS_IO_PROG -c "statx -r -m $STATX_WRITE_ATOMIC" $1 | \ + grep -w atomic_write_unit_max_opt | grep -o '[0-9]\+' +} + +_get_atomic_write_segments_max() +{ + $XFS_IO_PROG -c "statx -r -m $STATX_WRITE_ATOMIC" $1 | \ + grep -w atomic_write_segments_max | grep -o '[0-9]\+' +} + +_require_scratch_write_atomic_multi_fsblock() +{ + _require_scratch + + _scratch_mkfs > /dev/null 2>&1 || \ + _notrun "cannot format scratch device for atomic write checks" + _try_scratch_mount || \ + _notrun "cannot mount scratch device for atomic write checks" + + local testfile=$SCRATCH_MNT/testfile + touch $testfile + + local bsize=$(_get_file_block_size $SCRATCH_MNT) + local awu_max_fs=$(_get_atomic_write_unit_max $testfile) + + _scratch_unmount + + if [ -z "$awu_max_fs" -o $awu_max_fs -lt $((bsize * 2)) ];then + _notrun "multi-block atomic writes not supported by this filesystem" + fi +} + +_require_scratch_write_atomic() +{ + _require_scratch + + local awu_min_bdev=$(_get_atomic_write_unit_min $SCRATCH_DEV) + local awu_max_bdev=$(_get_atomic_write_unit_max $SCRATCH_DEV) + + if [ -z "$awu_min_bdev" -o -z "$awu_max_bdev" ] || \ + [ $awu_min_bdev -eq 0 -a $awu_max_bdev -eq 0 ];then + _notrun "write atomic not supported by this block device" + fi + + _scratch_mkfs > /dev/null 2>&1 || \ + _notrun "cannot format scratch device for atomic write checks" + _try_scratch_mount || \ + _notrun "cannot mount scratch device for atomic write checks" + + local testfile=$SCRATCH_MNT/testfile + touch $testfile + + local awu_min_fs=$(_get_atomic_write_unit_min $testfile) + local awu_max_fs=$(_get_atomic_write_unit_max $testfile) + + _scratch_unmount + + if [ -z "$awu_min_fs" -o -z "$awu_max_fs" ] || \ + [ $awu_min_fs -eq 0 -a $awu_max_fs -eq 0 ];then + _notrun "write atomic not supported by this filesystem" + fi +} + +# Check for xfs_io commands required to run _test_atomic_file_writes +_require_atomic_write_test_commands() +{ + _require_xfs_io_command "falloc" + _require_xfs_io_command "fpunch" + _require_xfs_io_command pwrite -A +} + +_test_atomic_file_writes() +{ + local bsize="$1" + local testfile="$2" + local bytes_written + local testfile_cp="$testfile.copy" + + # Check that we can perform an atomic write of len = FS block size + bytes_written=$($XFS_IO_PROG -dc "pwrite -A -D -V1 -b $bsize 0 $bsize" $testfile | \ + grep wrote | awk -F'[/ ]' '{print $2}') + test $bytes_written -eq $bsize || echo "atomic write len=$bsize failed" + + # Check that we can perform an atomic single-block cow write + if cp --reflink=always $testfile $testfile_cp 2>> $seqres.full; then + bytes_written=$($XFS_IO_PROG -dc "pwrite -A -D -V1 -b $bsize 0 $bsize" $testfile_cp | \ + grep wrote | awk -F'[/ ]' '{print $2}') + test $bytes_written -eq $bsize || echo "atomic write on reflinked file failed" + fi + + # Check that we can perform an atomic write on an unwritten block + $XFS_IO_PROG -c "falloc $bsize $bsize" $testfile + bytes_written=$($XFS_IO_PROG -dc "pwrite -A -D -V1 -b $bsize $bsize $bsize" $testfile | \ + grep wrote | awk -F'[/ ]' '{print $2}') + test $bytes_written -eq $bsize || echo "atomic write to unwritten block failed" + + # Check that we can perform an atomic write on a sparse hole + $XFS_IO_PROG -c "fpunch 0 $bsize" $testfile + bytes_written=$($XFS_IO_PROG -dc "pwrite -A -D -V1 -b $bsize 0 $bsize" $testfile | \ + grep wrote | awk -F'[/ ]' '{print $2}') + test $bytes_written -eq $bsize || echo "atomic write to sparse hole failed" + + # Check that we can perform an atomic write on a fully mapped block + bytes_written=$($XFS_IO_PROG -dc "pwrite -A -D -V1 -b $bsize 0 $bsize" $testfile | \ + grep wrote | awk -F'[/ ]' '{print $2}') + test $bytes_written -eq $bsize || echo "atomic write to mapped block failed" + + # Reject atomic write if len is out of bounds + $XFS_IO_PROG -dc "pwrite -A -D -V1 -b $bsize 0 $((bsize - 1))" $testfile 2>> $seqres.full && \ + echo "atomic write len=$((bsize - 1)) should fail" + $XFS_IO_PROG -dc "pwrite -A -D -V1 -b $bsize 0 $((bsize + 1))" $testfile 2>> $seqres.full && \ + echo "atomic write len=$((bsize + 1)) should fail" + + # Reject atomic write when iovecs > 1 + $XFS_IO_PROG -dc "pwrite -A -D -V2 -b $bsize 0 $bsize" $testfile 2>> $seqres.full && \ + echo "atomic write only supports iovec count of 1" + + # Reject atomic write when not using direct I/O + $XFS_IO_PROG -c "pwrite -A -V1 -b $bsize 0 $bsize" $testfile 2>> $seqres.full && \ + echo "atomic write requires direct I/O" + + # Reject atomic write when offset % bsize != 0 + $XFS_IO_PROG -dc "pwrite -A -D -V1 -b $bsize 1 $bsize" $testfile 2>> $seqres.full && \ + echo "atomic write requires offset to be aligned to bsize" +} + +_simple_atomic_write() { + local pos=$1 + local count=$2 + local file=$3 + local directio=$4 + + echo "testing pos=$pos count=$count file=$file directio=$directio" >> $seqres.full + $XFS_IO_PROG $directio -c "pwrite -b $count -V 1 -A -D $pos $count" $file >> $seqres.full +} diff --git a/common/attr b/common/attr new file mode 100644 index 00000000..eb5c8625 --- /dev/null +++ b/common/attr @@ -0,0 +1,271 @@ +##/bin/bash +# SPDX-License-Identifier: GPL-2.0+ +# Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. +# +# common extended attribute and ACL support + +# filesystems that want to test maximum supported acl counts need to +# add support in here +_acl_get_max() +{ + case $FSTYP in + xfs) + # CRC format filesystems have much larger ACL counts. The actual + # number is into the thousands, but testing that meany takes too + # long, so just test well past the old limit of 25. + $XFS_INFO_PROG $TEST_DIR | _filter_mkfs > /dev/null 2> $tmp.info + . $tmp.info + rm $tmp.info + if [ $_fs_has_crcs -eq 0 ]; then + echo 25 + else + echo 5461 + fi + ;; + jfs) + echo 8191 + ;; + f2fs) + # If noinline_xattr is enabled, max xattr size should be: + # (4096 - 24) - (24 + 4) = 4044 + # then ACL_MAX_ENTRIES should be: + # (4044 - (4 + 4 * 4)) / 8 + 4 = 507 + _fs_options $TEST_DEV | grep "noinline_xattr" >/dev/null 2>&1 + if [ $? -eq 0 ]; then + echo 507 + else + # If inline_xattr is enabled, max xattr size should be: + # (4096 - 24 + 200) - (24 + 4) = 4244 + # then ACL_MAX_ENTRIES should be: + # (4244 - (4 + 4 * 4)) / 8 + 4 = 532 + _fs_options $TEST_DEV | grep "inline_xattr" >/dev/null 2>&1 + if [ $? -eq 0 ]; then + echo 532 + else + echo 507 + fi + fi + ;; + bcachefs) + echo 251 + ;; + *) + echo 0 + ;; + esac +} + +_require_acl_get_max() +{ + if [ $(_acl_get_max) -eq 0 ]; then + _notrun "$FSTYP does not define maximum ACL count" + fi +} + +# pick three unused user/group ids, store them as $acl[1-3] +# +_acl_setup_ids() +{ + eval `(_cat_passwd; _cat_group) | awk -F: ' + { ids[$3]=1 } + END { + j=1 + for(i=1; i<1000000 && j<=3;i++){ + if (! (i in ids)) { + printf "acl%d=%d;", j, i; + j++ + } + } + }'` +} + +# filter for the acl ids selected above +# +_acl_filter_id() +{ + sed \ + -e "s/u:$acl1/u:id1/" \ + -e "s/u:$acl2/u:id2/" \ + -e "s/u:$acl3/u:id3/" \ + -e "s/g:$acl1/g:id1/" \ + -e "s/g:$acl2/g:id2/" \ + -e "s/g:$acl3/g:id3/" \ + -e "s/ $acl1 / id1 /" \ + -e "s/ $acl2 / id2 /" \ + -e "s/ $acl3 / id3 /" +} + +_getfacl_filter_id() +{ + sed \ + -e "s/user:$acl1/user:id1/" \ + -e "s/user:$acl2/user:id2/" \ + -e "s/user:$acl3/user:id3/" \ + -e "s/group:$acl1/group:id1/" \ + -e "s/group:$acl2/group:id2/" \ + -e "s/group:$acl3/group:id3/" \ + -e "s/: $acl1/: id1/" \ + -e "s/: $acl2/: id2/" \ + -e "s/: $acl3/: id3/" +} + +# filtered ls +# +_acl_ls() +{ + _ls_l -n $* | awk '{ print $1, $3, $4, $NF }' | _acl_filter_id +} + +# create an ACL with n ACEs in it +# +_create_n_aces() +{ + let n=$1-4 + acl='u::rwx,g::rwx,o::rwx,m::rwx' # 4 ace acl start + while [ $n -ne 0 ]; do + acl="$acl,u:$n:rwx" + let n=$n-1 + done + echo $acl +} + +# filter user ace names to user ids +# +_filter_aces() +{ + tmp_file=`mktemp /tmp/ace.XXXXXX` + + (_cat_passwd; _cat_group) > $tmp_file + + $AWK_PROG -v tmpfile=$tmp_file ' + BEGIN { + FS=":" + while ( getline 0 ) { + idlist[$1] = $3 + } + } + /^user/ { if ($2 in idlist) sub($2, idlist[$2]); print; next} + /^u/ { if ($2 in idlist) sub($2, idlist[$2]); print; next} + /^default:user/ { if ($3 in idlist) sub($3, idlist[$3]); print; next} + {print} + ' + rm -f $tmp_file +} + +_filter_aces_notypes() +{ + tr '\[' '\012' | tr ']' '\012' | tr ',' '\012' | _filter_aces|\ + sed -e 's/u:/user:/' -e 's/g:/group:/' -e 's/o:/other:/' -e 's/m:/mask:/' +} + +_require_acls() +{ + [ -n "$CHACL_PROG" ] || _notrun "chacl command not found" + + # + # Test if chacl is able to set an ACL on a file. On really old kernels + # the system calls might not be implemented at all, but the more common + # case is that the tested filesystem simply doesn't support ACLs. + # + touch $TEST_DIR/syscalltest + chacl 'u::rw-,g::---,o::---' $TEST_DIR/syscalltest > $TEST_DIR/syscalltest.out 2>&1 + cat $TEST_DIR/syscalltest.out >> $seqres.full + + if grep -q 'Function not implemented' $TEST_DIR/syscalltest.out; then + _notrun "kernel does not support ACLs" + fi + if grep -q 'Operation not supported' $TEST_DIR/syscalltest.out; then + _notrun "ACLs not supported by this filesystem type: $FSTYP" + fi + + rm -f $TEST_DIR/syscalltest.out +} + +_list_acl() +{ + file=$1 + + ls -dD $file | _acl_filter_id +} + +_require_attrs() +{ + local args + local nsp + + if [ $# -eq 0 ]; then + args="user" + else + args="$*" + fi + + [ -n "$ATTR_PROG" ] || _notrun "attr command not found" + [ -n "$GETFATTR_PROG" ] || _notrun "getfattr command not found" + [ -n "$SETFATTR_PROG" ] || _notrun "setfattr command not found" + + for nsp in $args; do + # + # Test if chacl is able to write an attribute on the target + # filesystems. On really old kernels the system calls might + # not be implemented at all, but the more common case is that + # the tested filesystem simply doesn't support attributes. + # Note that we can't simply list attributes as various security + # modules generate synthetic attributes not actually stored on + # disk. + # + touch $TEST_DIR/syscalltest + $SETFATTR_PROG -n "$nsp.xfstests" -v "attr" $TEST_DIR/syscalltest > $TEST_DIR/syscalltest.out 2>&1 + cat $TEST_DIR/syscalltest.out >> $seqres.full + + if grep -q 'Function not implemented' $TEST_DIR/syscalltest.out; then + _notrun "kernel does not support attrs" + fi + if grep -q 'Operation not supported' $TEST_DIR/syscalltest.out; then + _notrun "attr namespace $nsp not supported by this filesystem type: $FSTYP" + fi + + rm -f $TEST_DIR/syscalltest.out + done +} + +_require_attr_v1() +{ + _scratch_mkfs_xfs_supported -i attr=1 >/dev/null 2>&1 \ + || _notrun "attr v1 not supported on $SCRATCH_DEV" +} + +# check if we support the noattr2 mount option +_require_noattr2() +{ + _try_scratch_mkfs_xfs > /dev/null 2>&1 \ + || _fail "_try_scratch_mkfs_xfs failed on $SCRATCH_DEV" + _try_scratch_mount -o noattr2 > /dev/null 2>&1 \ + || _notrun "noattr2 mount option not supported on $SCRATCH_DEV" + grep -qw noattr2 <(findmnt -rncv -M "$SCRATCH_MNT" -o OPTIONS) + local res=$? + _scratch_unmount + test $res -eq 0 \ + || _notrun "noattr2 mount option no longer functional" +} + +# getfattr -R returns info in readdir order which varies from fs to fs. +# This sorts the output by filename +_sort_getfattr_output() +{ + awk '{a[FNR]=$0}END{n = asort(a); for(i=1; i <= n; i++) print a[i]"\n"}' RS='' +} + +# Previously, when getfattr dumps values of all extended attributes, it prints +# empty attr as 'user.name', but new getfattr (since attr-2.4.48) prints it as +# 'user.name=""'. Filter out the ending '=""' so that both old and new getfattr +# pints the same output. +# +# Note: This function returns the getfattr command result. +_getfattr() +{ + $GETFATTR_PROG "$@" | sed -e 's/=\"\"//' + return ${PIPESTATUS[0]} +} + +# make sure this script returns success +/bin/true diff --git a/common/btrfs b/common/btrfs new file mode 100644 index 00000000..6a1095ff --- /dev/null +++ b/common/btrfs @@ -0,0 +1,1050 @@ +# +# Common btrfs specific functions +# + +. common/module + +# The recommended way to execute simple "btrfs" command. +_btrfs() +{ + run_check $BTRFS_UTIL_PROG $* +} + +_btrfs_get_subvolid() +{ + local mnt=$1 + local name=$2 + + $BTRFS_UTIL_PROG subvolume list $mnt | grep -E "\s$name$" | $AWK_PROG '{ print $2 }' +} + +# _require_btrfs_command [|