+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/cephfs/begin.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/cephfs/clusters/1-mds-4-client-coloc.yaml
\ No newline at end of file
+++ /dev/null
-.qa/cephfs/conf
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/cephfs/mount/fuse.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/objectstore/bluestore-bitmap.yaml
\ No newline at end of file
+++ /dev/null
-.qa/cephfs/objectstore-ec/bluestore-ec-root.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/cephfs/overrides/frag_enable.yaml
\ No newline at end of file
+++ /dev/null
-.qa/overrides/no_client_pidfile.yaml
\ No newline at end of file
+++ /dev/null
-.qa/cephfs/overrides/whitelist_health.yaml
\ No newline at end of file
+++ /dev/null
-.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
+++ /dev/null
-.qa/distros/supported-random-distro$
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-
-overrides:
- ceph:
- conf:
- global:
- lockdep: true
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_admin
+++ /dev/null
-
-overrides:
- ceph:
- log-ignorelist:
- - bad backtrace
- - object missing on disk
- - error reading table object
- - error reading sessionmap
- - unmatched fragstat
- - unmatched rstat
- - was unreadable, recreating it now
- - Scrub error on inode
- - Metadata damage detected
- - MDS_FAILED
- - MDS_DAMAGE
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_recovery_pool
+++ /dev/null
-tasks:
-- cephfs_test_runner:
- modules:
- - tasks.cephfs.test_dump_tree
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - Replacing daemon mds.a
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_cap_flush
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - Replacing daemon mds
- - Scrub error on inode
- - Behind on trimming
- - Metadata damage detected
- - bad backtrace on inode
- - overall HEALTH_
- - \(MDS_TRIM\)
- conf:
- mds:
- mds log max segments: 1
- mds cache max size: 1000
-tasks:
-- cephfs_test_runner:
- modules:
- - tasks.cephfs.test_scrub_checks
- - tasks.cephfs.test_scrub
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - fs/quota
+++ /dev/null
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_readahead
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_fragment
+++ /dev/null
-overrides:
- ceph-fuse:
- disabled: true
- kclient:
- disabled: true
-tasks:
-- workunit:
- clients:
- client.0:
- - fs/test_python.sh
+++ /dev/null
-tasks:
--mds_creation_failure:
-- workunit:
- clients:
- all: [fs/misc/trivial_sync.sh]
-
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_openfiletable
+++ /dev/null
-
-tasks:
- - cephfs_test_runner:
- modules:
- - tasks.cephfs.test_quota
+++ /dev/null
-
-tasks:
-- cephfs_test_runner:
- modules:
- - tasks.cephfs.test_journal_migration
+++ /dev/null
-.qa/cephfs/tasks/libcephfs_interface_tests.yaml
\ No newline at end of file
--- /dev/null
+
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_admin
--- /dev/null
+
+overrides:
+ ceph:
+ log-ignorelist:
+ - bad backtrace
+ - object missing on disk
+ - error reading table object
+ - error reading sessionmap
+ - unmatched fragstat
+ - unmatched rstat
+ - was unreadable, recreating it now
+ - Scrub error on inode
+ - Metadata damage detected
+ - MDS_FAILED
+ - MDS_DAMAGE
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_recovery_pool
--- /dev/null
+tasks:
+- cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_dump_tree
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - Replacing daemon mds.a
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_cap_flush
--- /dev/null
+tasks:
+ - cephfs_test_runner:
+ fail_on_skip: false
+ modules:
+ - tasks.cephfs.test_readahead
--- /dev/null
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_fragment
--- /dev/null
+tasks:
+-mds_creation_failure:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
+
--- /dev/null
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_openfiletable
--- /dev/null
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_quota
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - Replacing daemon mds
+ - Scrub error on inode
+ - Behind on trimming
+ - Metadata damage detected
+ - bad backtrace on inode
+ - overall HEALTH_
+ - \(MDS_TRIM\)
+ conf:
+ mds:
+ mds log max segments: 1
+ mds cache max size: 1000
+tasks:
+- cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_scrub_checks
+ - tasks.cephfs.test_scrub
--- /dev/null
+tasks:
+- cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_journal_migration
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - fs/quota
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/begin.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/clusters/1-mds-1-client-coloc.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/conf
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/objectstore/bluestore-bitmap.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/frag_enable.yaml
\ No newline at end of file
--- /dev/null
+.qa/overrides/no_client_pidfile.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/whitelist_health.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/supported-random-distro$
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 1
+ debug client: 20
+ mds:
+ debug ms: 1
+ debug mds: 20
+tasks:
+- check-counter:
+ counters:
+ mds:
+ - "mds.dir_split"
+- workunit:
+ clients:
+ client.0:
+ - libcephfs/test.sh
--- /dev/null
+overrides:
+ ceph-fuse:
+ disabled: true
+ kclient:
+ disabled: true
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - fs/test_python.sh
import os
import time
from textwrap import dedent
-from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
class TestCapFlush(CephFSTestCase):
creates inode hasn't been replayed.
"""
- if not isinstance(self.mount_a, FuseMount):
- self.skipTest("Require FUSE client to inject client release failure")
-
dir_path = os.path.join(self.mount_a.mountpoint, "testdir")
py_script = dedent("""
import os