These functional tests can be run with both clients.
Fixes: https://tracker.ceph.com/issues/23718
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
+++ /dev/null
-tasks:
- - cephfs_test_runner:
- fail_on_skip: false
- modules:
- - tasks.cephfs.test_acls
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - force file system read-only
- - bad backtrace
- - MDS in read-only mode
- - \(MDS_READ_ONLY\)
-
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_auto_repair
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_backtrace
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - responding to mclientcaps\(revoke\)
- - not advance its oldest_client_tid
- - failing to advance its oldest client/flush tid
- - Too many inodes in cache
- - failing to respond to cache pressure
- - slow requests are blocked
- - failing to respond to capability release
- - MDS cache is too large
- - \(MDS_CLIENT_OLDEST_TID\)
- - \(MDS_CACHE_OVERSIZED\)
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_client_limits
+++ /dev/null
-
-# The task interferes with the network, so we need
-# to permit OSDs to complain about that.
-overrides:
- ceph:
- log-ignorelist:
- - evicting unresponsive client
- - but it is still running
- - slow request
- - MDS_CLIENT_LATE_RELEASE
- - t responding to mclientcaps
-
-tasks:
- - cephfs_test_runner:
- fail_on_skip: false
- modules:
- - tasks.cephfs.test_client_recovery
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - bad backtrace
- - object missing on disk
- - error reading table object
- - error reading sessionmap
- - Error loading MDS rank
- - missing journal object
- - Error recovering journal
- - error decoding table object
- - failed to read JournalPointer
- - Corrupt directory entry
- - Corrupt fnode header
- - corrupt sessionmap header
- - Corrupt dentry
- - Scrub error on inode
- - Metadata damage detected
- - MDS_READ_ONLY
- - force file system read-only
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_damage
-
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - bad backtrace
- - object missing on disk
- - error reading table object
- - error reading sessionmap
- - unmatched fragstat
- - unmatched rstat
- - was unreadable, recreating it now
- - Scrub error on inode
- - Metadata damage detected
- - inconsistent rstat on inode
- - Error recovering journal
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_data_scan
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - inode wrongly marked free
- - bad backtrace on inode
- - inode table repaired for inode
- - Scrub error on inode
- - Metadata damage detected
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_forward_scrub
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - bad backtrace on directory inode
- - error reading table object
- - Metadata damage detected
- - slow requests are blocked
- - Behind on trimming
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_journal_repair
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_flush
+++ /dev/null
-
-overrides:
- ceph:
- cephfs_ec_profile:
- - disabled
- log-ignorelist:
- - OSD full dropping all updates
- - OSD near full
- - pausewr flag
- - failsafe engaged, dropping updates
- - failsafe disengaged, no longer dropping
- - is full \(reached quota
- - POOL_FULL
- - POOL_BACKFILLFULL
- conf:
- mon:
- mon osd nearfull ratio: 0.6
- mon osd backfillfull ratio: 0.6
- mon osd full ratio: 0.7
- osd:
- osd mon report interval: 5
- osd objectstore: memstore
- osd failsafe full ratio: 1.0
- memstore device bytes: 200000000
- client.0:
- debug client: 20
- debug objecter: 20
- debug objectcacher: 20
- client.1:
- debug client: 20
- debug objecter: 20
- debug objectcacher: 20
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_full
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_pool_perm
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - client session with non-allowable root
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_sessionmap
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_strays
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/begin.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/clusters/1-mds-4-client-coloc.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/conf
\ No newline at end of file
--- /dev/null
+.qa/cephfs/mount/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/objectstore/bluestore-bitmap.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/objectstore-ec/bluestore-ec-root.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/frag_enable.yaml
\ No newline at end of file
--- /dev/null
+.qa/overrides/no_client_pidfile.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/whitelist_health.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/supported-random-distro$
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+ - cephfs_test_runner:
+ fail_on_skip: false
+ modules:
+ - tasks.cephfs.test_acls
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - force file system read-only
+ - bad backtrace
+ - MDS in read-only mode
+ - \(MDS_READ_ONLY\)
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_auto_repair
--- /dev/null
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_backtrace
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - responding to mclientcaps\(revoke\)
+ - not advance its oldest_client_tid
+ - failing to advance its oldest client/flush tid
+ - Too many inodes in cache
+ - failing to respond to cache pressure
+ - slow requests are blocked
+ - failing to respond to capability release
+ - MDS cache is too large
+ - \(MDS_CLIENT_OLDEST_TID\)
+ - \(MDS_CACHE_OVERSIZED\)
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_client_limits
--- /dev/null
+# The task interferes with the network, so we need
+# to permit OSDs to complain about that.
+overrides:
+ ceph:
+ log-ignorelist:
+ - evicting unresponsive client
+ - but it is still running
+ - slow request
+ - MDS_CLIENT_LATE_RELEASE
+ - t responding to mclientcaps
+tasks:
+ - cephfs_test_runner:
+ fail_on_skip: false
+ modules:
+ - tasks.cephfs.test_client_recovery
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - bad backtrace
+ - object missing on disk
+ - error reading table object
+ - error reading sessionmap
+ - Error loading MDS rank
+ - missing journal object
+ - Error recovering journal
+ - error decoding table object
+ - failed to read JournalPointer
+ - Corrupt directory entry
+ - Corrupt fnode header
+ - corrupt sessionmap header
+ - Corrupt dentry
+ - Scrub error on inode
+ - Metadata damage detected
+ - MDS_READ_ONLY
+ - force file system read-only
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_damage
+
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - bad backtrace
+ - object missing on disk
+ - error reading table object
+ - error reading sessionmap
+ - unmatched fragstat
+ - unmatched rstat
+ - was unreadable, recreating it now
+ - Scrub error on inode
+ - Metadata damage detected
+ - inconsistent rstat on inode
+ - Error recovering journal
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_data_scan
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - inode wrongly marked free
+ - bad backtrace on inode
+ - inode table repaired for inode
+ - Scrub error on inode
+ - Metadata damage detected
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_forward_scrub
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - bad backtrace on directory inode
+ - error reading table object
+ - Metadata damage detected
+ - slow requests are blocked
+ - Behind on trimming
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_journal_repair
--- /dev/null
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_flush
--- /dev/null
+overrides:
+ ceph:
+ cephfs_ec_profile:
+ - disabled
+ log-ignorelist:
+ - OSD full dropping all updates
+ - OSD near full
+ - pausewr flag
+ - failsafe engaged, dropping updates
+ - failsafe disengaged, no longer dropping
+ - is full \(reached quota
+ - POOL_FULL
+ - POOL_BACKFILLFULL
+ conf:
+ mon:
+ mon osd nearfull ratio: 0.6
+ mon osd backfillfull ratio: 0.6
+ mon osd full ratio: 0.7
+ osd:
+ osd mon report interval: 5
+ osd objectstore: memstore
+ osd failsafe full ratio: 1.0
+ memstore device bytes: 200000000
+ client.0:
+ debug client: 20
+ debug objecter: 20
+ debug objectcacher: 20
+ client.1:
+ debug client: 20
+ debug objecter: 20
+ debug objectcacher: 20
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_full
--- /dev/null
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_pool_perm
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - client session with non-allowable root
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_sessionmap
--- /dev/null
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_strays
+++ /dev/null
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_acls
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - force file system read-only
- - bad backtrace
- - MDS in read-only mode
- - \(MDS_READ_ONLY\)
-
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_auto_repair
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_backtrace
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - responding to mclientcaps\(revoke\)
- - not advance its oldest_client_tid
- - failing to advance its oldest client/flush tid
- - Too many inodes in cache
- - failing to respond to cache pressure
- - slow requests are blocked
- - failing to respond to capability release
- - MDS cache is too large
- - \(MDS_CLIENT_OLDEST_TID\)
- - \(MDS_CACHE_OVERSIZED\)
-
-tasks:
- - cephfs_test_runner:
- fail_on_skip: false
- modules:
- - tasks.cephfs.test_client_limits
+++ /dev/null
-
-# The task interferes with the network, so we need
-# to permit OSDs to complain about that.
-overrides:
- ceph:
- log-ignorelist:
- - but it is still running
- - slow request
- - evicting unresponsive client
-
-tasks:
- - cephfs_test_runner:
- fail_on_skip: false
- modules:
- - tasks.cephfs.test_client_recovery
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - bad backtrace
- - object missing on disk
- - error reading table object
- - error reading sessionmap
- - Error loading MDS rank
- - missing journal object
- - Error recovering journal
- - error decoding table object
- - failed to read JournalPointer
- - Corrupt directory entry
- - Corrupt fnode header
- - corrupt sessionmap header
- - Corrupt dentry
- - Scrub error on inode
- - Metadata damage detected
- - MDS_READ_ONLY
- - force file system read-only
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_damage
-
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - bad backtrace
- - object missing on disk
- - error reading table object
- - error reading sessionmap
- - unmatched fragstat
- - was unreadable, recreating it now
- - Scrub error on inode
- - Metadata damage detected
- - inconsistent rstat on inode
- - Error recovering journal
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_data_scan
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - inode wrongly marked free
- - bad backtrace on inode
- - inode table repaired for inode
- - Scrub error on inode
- - Metadata damage detected
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_forward_scrub
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - bad backtrace on directory inode
- - error reading table object
- - Metadata damage detected
- - slow requests are blocked
- - Behind on trimming
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_journal_repair
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_flush
+++ /dev/null
-
-overrides:
- ceph:
- cephfs_ec_profile:
- - disabled
- log-ignorelist:
- - OSD full dropping all updates
- - OSD near full
- - pausewr flag
- - failsafe engaged, dropping updates
- - failsafe disengaged, no longer dropping
- - is full \(reached quota
- - POOL_FULL
- - POOL_BACKFILLFULL
- conf:
- mon:
- mon osd nearfull ratio: 0.6
- mon osd backfillfull ratio: 0.6
- mon osd full ratio: 0.7
- osd:
- osd mon report interval: 5
- osd objectstore: memstore
- osd failsafe full ratio: 1.0
- memstore device bytes: 200000000
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_full
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_pool_perm
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - client session with non-allowable root
-
-tasks:
- - cephfs_test_runner:
- fail_on_skip: false
- modules:
- - tasks.cephfs.test_sessionmap
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_strays