From cd084c7d6d2fcb4eeaa90a19f57cc38b095cc88a Mon Sep 17 00:00:00 2001 From: John Spray Date: Wed, 10 Aug 2016 17:03:36 +0100 Subject: [PATCH] suites/kcephfs: add python tests Let's see which of these is happy running with a kernel client! Signed-off-by: John Spray --- suites/kcephfs/recovery/% | 0 .../recovery/clusters/4-remote-clients.yaml | 6 +++++ suites/kcephfs/recovery/debug/mds_client.yaml | 12 +++++++++ .../kcephfs/recovery/dirfrag/frag_enable.yaml | 11 ++++++++ suites/kcephfs/recovery/mounts/kmounts.yaml | 4 +++ .../kcephfs/recovery/tasks/auto-repair.yaml | 11 ++++++++ suites/kcephfs/recovery/tasks/backtrace.yaml | 5 ++++ suites/kcephfs/recovery/tasks/cap-flush.yam | 5 ++++ .../kcephfs/recovery/tasks/client-limits.yaml | 11 ++++++++ .../recovery/tasks/client-recovery.yaml | 13 ++++++++++ .../recovery/tasks/config-commands.yaml | 11 ++++++++ suites/kcephfs/recovery/tasks/damage.yaml | 24 ++++++++++++++++++ suites/kcephfs/recovery/tasks/data-scan.yaml | 15 +++++++++++ .../kcephfs/recovery/tasks/forward-scrub.yaml | 13 ++++++++++ .../recovery/tasks/journal-repair.yaml | 11 ++++++++ suites/kcephfs/recovery/tasks/mds-flush.yaml | 5 ++++ suites/kcephfs/recovery/tasks/mds-full.yaml | 25 +++++++++++++++++++ suites/kcephfs/recovery/tasks/pool-perm.yaml | 5 ++++ suites/kcephfs/recovery/tasks/sessionmap.yaml | 13 ++++++++++ suites/kcephfs/recovery/tasks/strays.yaml | 5 ++++ .../kcephfs/recovery/tasks/volume-client.yaml | 11 ++++++++ suites/kcephfs/recovery/xfs.yaml | 1 + 22 files changed, 217 insertions(+) create mode 100644 suites/kcephfs/recovery/% create mode 100644 suites/kcephfs/recovery/clusters/4-remote-clients.yaml create mode 100644 suites/kcephfs/recovery/debug/mds_client.yaml create mode 100644 suites/kcephfs/recovery/dirfrag/frag_enable.yaml create mode 100644 suites/kcephfs/recovery/mounts/kmounts.yaml create mode 100644 suites/kcephfs/recovery/tasks/auto-repair.yaml create mode 100644 suites/kcephfs/recovery/tasks/backtrace.yaml create mode 100644 suites/kcephfs/recovery/tasks/cap-flush.yam create mode 100644 suites/kcephfs/recovery/tasks/client-limits.yaml create mode 100644 suites/kcephfs/recovery/tasks/client-recovery.yaml create mode 100644 suites/kcephfs/recovery/tasks/config-commands.yaml create mode 100644 suites/kcephfs/recovery/tasks/damage.yaml create mode 100644 suites/kcephfs/recovery/tasks/data-scan.yaml create mode 100644 suites/kcephfs/recovery/tasks/forward-scrub.yaml create mode 100644 suites/kcephfs/recovery/tasks/journal-repair.yaml create mode 100644 suites/kcephfs/recovery/tasks/mds-flush.yaml create mode 100644 suites/kcephfs/recovery/tasks/mds-full.yaml create mode 100644 suites/kcephfs/recovery/tasks/pool-perm.yaml create mode 100644 suites/kcephfs/recovery/tasks/sessionmap.yaml create mode 100644 suites/kcephfs/recovery/tasks/strays.yaml create mode 100644 suites/kcephfs/recovery/tasks/volume-client.yaml create mode 120000 suites/kcephfs/recovery/xfs.yaml diff --git a/suites/kcephfs/recovery/% b/suites/kcephfs/recovery/% new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/suites/kcephfs/recovery/clusters/4-remote-clients.yaml b/suites/kcephfs/recovery/clusters/4-remote-clients.yaml new file mode 100644 index 0000000000000..702bb161e75b5 --- /dev/null +++ b/suites/kcephfs/recovery/clusters/4-remote-clients.yaml @@ -0,0 +1,6 @@ +roles: +- [mon.a, osd.0, mds.a, mds.b, client.1, client.2, client.3] +- [client.0, osd.1, osd.2] +log-rotate: + ceph-mds: 10G + ceph-osd: 10G diff --git a/suites/kcephfs/recovery/debug/mds_client.yaml b/suites/kcephfs/recovery/debug/mds_client.yaml new file mode 100644 index 0000000000000..76cc4d86848fe --- /dev/null +++ b/suites/kcephfs/recovery/debug/mds_client.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + conf: + mds: + debug ms: 1 + debug mds: 20 + client.0: + debug ms: 1 + debug client: 20 + client.1: + debug ms: 1 + debug client: 20 diff --git a/suites/kcephfs/recovery/dirfrag/frag_enable.yaml b/suites/kcephfs/recovery/dirfrag/frag_enable.yaml new file mode 100644 index 0000000000000..9913fa1dfbb6b --- /dev/null +++ b/suites/kcephfs/recovery/dirfrag/frag_enable.yaml @@ -0,0 +1,11 @@ + +overrides: + ceph: + conf: + mds: + mds bal frag: true + mds bal fragment size max: 10000 + mds bal split size: 100 + mds bal merge size: 5 + mds bal split bits: 3 + diff --git a/suites/kcephfs/recovery/mounts/kmounts.yaml b/suites/kcephfs/recovery/mounts/kmounts.yaml new file mode 100644 index 0000000000000..c18db8f5ea61d --- /dev/null +++ b/suites/kcephfs/recovery/mounts/kmounts.yaml @@ -0,0 +1,4 @@ +tasks: +- install: +- ceph: +- kclient: diff --git a/suites/kcephfs/recovery/tasks/auto-repair.yaml b/suites/kcephfs/recovery/tasks/auto-repair.yaml new file mode 100644 index 0000000000000..e331cdd65b487 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/auto-repair.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - force file system read-only + - bad backtrace + + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_auto_repair diff --git a/suites/kcephfs/recovery/tasks/backtrace.yaml b/suites/kcephfs/recovery/tasks/backtrace.yaml new file mode 100644 index 0000000000000..d740a5f6f7549 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/backtrace.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_backtrace diff --git a/suites/kcephfs/recovery/tasks/cap-flush.yam b/suites/kcephfs/recovery/tasks/cap-flush.yam new file mode 100644 index 0000000000000..0d26dc9e6b56f --- /dev/null +++ b/suites/kcephfs/recovery/tasks/cap-flush.yam @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_cap_flush diff --git a/suites/kcephfs/recovery/tasks/client-limits.yaml b/suites/kcephfs/recovery/tasks/client-limits.yaml new file mode 100644 index 0000000000000..288866c636fde --- /dev/null +++ b/suites/kcephfs/recovery/tasks/client-limits.yaml @@ -0,0 +1,11 @@ + +overrides: + ceph: + log-whitelist: + - responding to mclientcaps\(revoke\) + - not advance its oldest_client_tid + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_client_limits diff --git a/suites/kcephfs/recovery/tasks/client-recovery.yaml b/suites/kcephfs/recovery/tasks/client-recovery.yaml new file mode 100644 index 0000000000000..1433ee1d819f6 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/client-recovery.yaml @@ -0,0 +1,13 @@ + +# The task interferes with the network, so we need +# to permit OSDs to complain about that. +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - slow request + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_client_recovery diff --git a/suites/kcephfs/recovery/tasks/config-commands.yaml b/suites/kcephfs/recovery/tasks/config-commands.yaml new file mode 100644 index 0000000000000..2f51801d6cb14 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/config-commands.yaml @@ -0,0 +1,11 @@ + +overrides: + ceph: + conf: + global: + lockdep: true + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_config_commands diff --git a/suites/kcephfs/recovery/tasks/damage.yaml b/suites/kcephfs/recovery/tasks/damage.yaml new file mode 100644 index 0000000000000..4b84977712e3b --- /dev/null +++ b/suites/kcephfs/recovery/tasks/damage.yaml @@ -0,0 +1,24 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - Error loading MDS rank + - missing journal object + - Error recovering journal + - error decoding table object + - failed to read JournalPointer + - Corrupt directory entry + - Corrupt fnode header + - corrupt sessionmap header + - Corrupt dentry + - Scrub error on inode + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_damage + diff --git a/suites/kcephfs/recovery/tasks/data-scan.yaml b/suites/kcephfs/recovery/tasks/data-scan.yaml new file mode 100644 index 0000000000000..dd0a85f68c2e7 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/data-scan.yaml @@ -0,0 +1,15 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - unmatched fragstat + - was unreadable, recreating it now + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_data_scan diff --git a/suites/kcephfs/recovery/tasks/forward-scrub.yaml b/suites/kcephfs/recovery/tasks/forward-scrub.yaml new file mode 100644 index 0000000000000..dbbed4bf718b2 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/forward-scrub.yaml @@ -0,0 +1,13 @@ + +overrides: + ceph: + log-whitelist: + - inode wrongly marked free + - bad backtrace on inode + - inode table repaired for inode + - Scrub error on inode + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_forward_scrub diff --git a/suites/kcephfs/recovery/tasks/journal-repair.yaml b/suites/kcephfs/recovery/tasks/journal-repair.yaml new file mode 100644 index 0000000000000..c85f46cb82440 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/journal-repair.yaml @@ -0,0 +1,11 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace on dir ino + - error reading table object + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_journal_repair diff --git a/suites/kcephfs/recovery/tasks/mds-flush.yaml b/suites/kcephfs/recovery/tasks/mds-flush.yaml new file mode 100644 index 0000000000000..d59a8ad5fc941 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/mds-flush.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_flush diff --git a/suites/kcephfs/recovery/tasks/mds-full.yaml b/suites/kcephfs/recovery/tasks/mds-full.yaml new file mode 100644 index 0000000000000..64ece034099c8 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/mds-full.yaml @@ -0,0 +1,25 @@ + +overrides: + ceph: + log-whitelist: + - OSD full dropping all updates + - OSD near full + - is full \(reached quota + conf: + osd: + osd mon report interval max: 5 + osd objectstore: memstore + memstore device bytes: 100000000 + client.0: + debug client: 20 + debug objecter: 20 + debug objectcacher: 20 + client.1: + debug client: 20 + debug objecter: 20 + debug objectcacher: 20 + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_full diff --git a/suites/kcephfs/recovery/tasks/pool-perm.yaml b/suites/kcephfs/recovery/tasks/pool-perm.yaml new file mode 100644 index 0000000000000..f220626df62fe --- /dev/null +++ b/suites/kcephfs/recovery/tasks/pool-perm.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_pool_perm diff --git a/suites/kcephfs/recovery/tasks/sessionmap.yaml b/suites/kcephfs/recovery/tasks/sessionmap.yaml new file mode 100644 index 0000000000000..054fdb7079234 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/sessionmap.yaml @@ -0,0 +1,13 @@ + +overrides: + ceph: + conf: + global: + ms type: simple + log-whitelist: + - client session with invalid root + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_sessionmap diff --git a/suites/kcephfs/recovery/tasks/strays.yaml b/suites/kcephfs/recovery/tasks/strays.yaml new file mode 100644 index 0000000000000..2809fc141bfb2 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/strays.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_strays diff --git a/suites/kcephfs/recovery/tasks/volume-client.yaml b/suites/kcephfs/recovery/tasks/volume-client.yaml new file mode 100644 index 0000000000000..e8c850a05cdd6 --- /dev/null +++ b/suites/kcephfs/recovery/tasks/volume-client.yaml @@ -0,0 +1,11 @@ + +overrides: + ceph: + conf: + global: + ms type: simple + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_volume_client diff --git a/suites/kcephfs/recovery/xfs.yaml b/suites/kcephfs/recovery/xfs.yaml new file mode 120000 index 0000000000000..0350079c97619 --- /dev/null +++ b/suites/kcephfs/recovery/xfs.yaml @@ -0,0 +1 @@ +../../../fs/xfs.yaml \ No newline at end of file -- 2.39.5