From: Sage Weil Date: Wed, 14 Dec 2016 17:29:55 +0000 (-0600) Subject: move ceph-qa-suite dirs into qa/ X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=449ffc6521483cbd81f1dcf10ccfb11f0ce9345c;p=ceph.git move ceph-qa-suite dirs into qa/ --- diff --git a/.gitignore b/.gitignore deleted file mode 100644 index c4a1a681160..00000000000 --- a/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*~ -.*.sw[nmop] -*.pyc -.tox -__pycache__ diff --git a/README b/README deleted file mode 100644 index 0e32ce9f638..00000000000 --- a/README +++ /dev/null @@ -1,52 +0,0 @@ -ceph-qa-suite -------------- - -clusters/ - some predefined cluster layouts -suites/ - set suite - -The suites directory has a hierarchical collection of tests. This can be -freeform, but generally follows the convention of - - suites///... - -A test is described by a yaml fragment. - -A test can exist as a single .yaml file in the directory tree. For example: - - suites/foo/one.yaml - suites/foo/two.yaml - -is a simple group of two tests. - -A directory with a magic '+' file represents a test that combines all -other items in the directory into a single yaml fragment. For example: - - suites/foo/bar/+ - suites/foo/bar/a.yaml - suites/foo/bar/b.yaml - suites/foo/bar/c.yaml - -is a single test consisting of a + b + c. - -A directory with a magic '%' file represents a test matrix formed from -all other items in the directory. For example, - - suites/baz/% - suites/baz/a.yaml - suites/baz/b/b1.yaml - suites/baz/b/b2.yaml - suites/baz/c.yaml - suites/baz/d/d1.yaml - suites/baz/d/d2.yaml - -is a 4-dimensional test matrix. Two dimensions (a, c) are trivial (1 -item), so this is really 2x2 = 4 tests, which are - - a + b1 + c + d1 - a + b1 + c + d2 - a + b2 + c + d1 - a + b2 + c + d2 - -Symlinks are okay. - -The teuthology code can be found in https://github.com/ceph/teuthology.git diff --git a/clusters/extra-client.yaml b/clusters/extra-client.yaml deleted file mode 100644 index 70ccbd028b2..00000000000 --- a/clusters/extra-client.yaml +++ /dev/null @@ -1,5 +0,0 @@ -roles: -- [mon.a, mon.c, osd.0, osd.1, osd.2] -- [mon.b, mds.a, osd.3, osd.4, osd.5] -- [client.0] -- [client.1] diff --git a/clusters/fixed-1.yaml b/clusters/fixed-1.yaml deleted file mode 100644 index 8ec132330db..00000000000 --- a/clusters/fixed-1.yaml +++ /dev/null @@ -1,2 +0,0 @@ -roles: -- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, client.0] diff --git a/clusters/fixed-2.yaml b/clusters/fixed-2.yaml deleted file mode 100644 index 79ed8cb5e21..00000000000 --- a/clusters/fixed-2.yaml +++ /dev/null @@ -1,3 +0,0 @@ -roles: -- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0] -- [mon.b, mds.a, osd.3, osd.4, osd.5, client.1] diff --git a/clusters/fixed-3.yaml b/clusters/fixed-3.yaml deleted file mode 100644 index 0038432afa7..00000000000 --- a/clusters/fixed-3.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.c, osd.0, osd.1, osd.2] -- [mon.b, mds.a, osd.3, osd.4, osd.5] -- [client.0] diff --git a/debug/mds_client.yaml b/debug/mds_client.yaml deleted file mode 100644 index c6fec3fc6f9..00000000000 --- a/debug/mds_client.yaml +++ /dev/null @@ -1,9 +0,0 @@ -overrides: - ceph: - conf: - mds: - debug ms: 1 - debug mds: 20 - client: - debug ms: 1 - debug client: 20 \ No newline at end of file diff --git a/distros/all/centos_6.3.yaml b/distros/all/centos_6.3.yaml deleted file mode 100644 index 32187d6daf0..00000000000 --- a/distros/all/centos_6.3.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: centos -os_version: "6.3" diff --git a/distros/all/centos_6.4.yaml b/distros/all/centos_6.4.yaml deleted file mode 100644 index 02383cd5f8c..00000000000 --- a/distros/all/centos_6.4.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: centos -os_version: "6.4" diff --git a/distros/all/centos_6.5.yaml b/distros/all/centos_6.5.yaml deleted file mode 100644 index 77c9e41f73e..00000000000 --- a/distros/all/centos_6.5.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: centos -os_version: "6.5" diff --git a/distros/all/debian_6.0.yaml b/distros/all/debian_6.0.yaml deleted file mode 100644 index 6820fa3c702..00000000000 --- a/distros/all/debian_6.0.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: debian -os_version: "6.0" diff --git a/distros/all/debian_7.0.yaml b/distros/all/debian_7.0.yaml deleted file mode 100644 index 8100dc41e3d..00000000000 --- a/distros/all/debian_7.0.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: debian -os_version: "7.0" diff --git a/distros/all/fedora_17.yaml b/distros/all/fedora_17.yaml deleted file mode 100644 index 801053af0ae..00000000000 --- a/distros/all/fedora_17.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: fedora -os_version: "17" diff --git a/distros/all/fedora_18.yaml b/distros/all/fedora_18.yaml deleted file mode 100644 index 07872aa7edf..00000000000 --- a/distros/all/fedora_18.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: fedora -os_version: "18" diff --git a/distros/all/fedora_19.yaml b/distros/all/fedora_19.yaml deleted file mode 100644 index 5bac8aceea2..00000000000 --- a/distros/all/fedora_19.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: fedora -os_version: "19" diff --git a/distros/all/opensuse_12.2.yaml b/distros/all/opensuse_12.2.yaml deleted file mode 100644 index ee9f877a26b..00000000000 --- a/distros/all/opensuse_12.2.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: opensuse -os_version: "12.2" diff --git a/distros/all/rhel_6.3.yaml b/distros/all/rhel_6.3.yaml deleted file mode 100644 index 6a8edcd5626..00000000000 --- a/distros/all/rhel_6.3.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: rhel -os_version: "6.3" diff --git a/distros/all/rhel_6.4.yaml b/distros/all/rhel_6.4.yaml deleted file mode 100644 index 5225495834a..00000000000 --- a/distros/all/rhel_6.4.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: rhel -os_version: "6.4" diff --git a/distros/all/rhel_6.5.yaml b/distros/all/rhel_6.5.yaml deleted file mode 100644 index 7db54bea1bd..00000000000 --- a/distros/all/rhel_6.5.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: rhel -os_version: "6.5" diff --git a/distros/all/sles_11-sp2.yaml b/distros/all/sles_11-sp2.yaml deleted file mode 100644 index df9c3ca01c8..00000000000 --- a/distros/all/sles_11-sp2.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: sles -os_version: "11-sp2" diff --git a/distros/all/ubuntu_12.04.yaml b/distros/all/ubuntu_12.04.yaml deleted file mode 100644 index dbc3a8d9c58..00000000000 --- a/distros/all/ubuntu_12.04.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: ubuntu -os_version: "12.04" diff --git a/distros/all/ubuntu_12.10.yaml b/distros/all/ubuntu_12.10.yaml deleted file mode 100644 index ab655676e4c..00000000000 --- a/distros/all/ubuntu_12.10.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: ubuntu -os_version: "12.10" diff --git a/distros/all/ubuntu_14.04.yaml b/distros/all/ubuntu_14.04.yaml deleted file mode 100644 index 309e989feeb..00000000000 --- a/distros/all/ubuntu_14.04.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: ubuntu -os_version: "14.04" diff --git a/distros/supported/centos_6.5.yaml b/distros/supported/centos_6.5.yaml deleted file mode 120000 index 57e60fcf930..00000000000 --- a/distros/supported/centos_6.5.yaml +++ /dev/null @@ -1 +0,0 @@ -../all/centos_6.5.yaml \ No newline at end of file diff --git a/distros/supported/debian_7.0.yaml b/distros/supported/debian_7.0.yaml deleted file mode 120000 index 2a14a987db3..00000000000 --- a/distros/supported/debian_7.0.yaml +++ /dev/null @@ -1 +0,0 @@ -../all/debian_7.0.yaml \ No newline at end of file diff --git a/distros/supported/ubuntu_12.04.yaml b/distros/supported/ubuntu_12.04.yaml deleted file mode 120000 index 56160b8aa22..00000000000 --- a/distros/supported/ubuntu_12.04.yaml +++ /dev/null @@ -1 +0,0 @@ -../all/ubuntu_12.04.yaml \ No newline at end of file diff --git a/distros/supported/ubuntu_14.04.yaml b/distros/supported/ubuntu_14.04.yaml deleted file mode 120000 index cf7fff7a866..00000000000 --- a/distros/supported/ubuntu_14.04.yaml +++ /dev/null @@ -1 +0,0 @@ -../all/ubuntu_14.04.yaml \ No newline at end of file diff --git a/fs/btrfs.yaml b/fs/btrfs.yaml deleted file mode 100644 index 0b3f6fac7a5..00000000000 --- a/fs/btrfs.yaml +++ /dev/null @@ -1,7 +0,0 @@ -overrides: - ceph: - fs: btrfs - conf: - osd: - osd sloppy crc: true - osd op thread timeout: 60 diff --git a/fs/ext4.yaml b/fs/ext4.yaml deleted file mode 100644 index fde6751751d..00000000000 --- a/fs/ext4.yaml +++ /dev/null @@ -1,3 +0,0 @@ -overrides: - ceph: - fs: ext4 diff --git a/fs/xfs.yaml b/fs/xfs.yaml deleted file mode 100644 index 0d88e107df4..00000000000 --- a/fs/xfs.yaml +++ /dev/null @@ -1,6 +0,0 @@ -overrides: - ceph: - fs: xfs - conf: - osd: - osd sloppy crc: true \ No newline at end of file diff --git a/overrides/whitelist_wrongly_marked_down.yaml b/overrides/whitelist_wrongly_marked_down.yaml deleted file mode 100644 index 5cf329fa0e1..00000000000 --- a/overrides/whitelist_wrongly_marked_down.yaml +++ /dev/null @@ -1,10 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - conf: - mds: - debug mds: 20 - debug ms: 1 - client: - debug client: 10 \ No newline at end of file diff --git a/qa/.gitignore b/qa/.gitignore new file mode 100644 index 00000000000..c4a1a681160 --- /dev/null +++ b/qa/.gitignore @@ -0,0 +1,5 @@ +*~ +.*.sw[nmop] +*.pyc +.tox +__pycache__ diff --git a/qa/README b/qa/README new file mode 100644 index 00000000000..0e32ce9f638 --- /dev/null +++ b/qa/README @@ -0,0 +1,52 @@ +ceph-qa-suite +------------- + +clusters/ - some predefined cluster layouts +suites/ - set suite + +The suites directory has a hierarchical collection of tests. This can be +freeform, but generally follows the convention of + + suites///... + +A test is described by a yaml fragment. + +A test can exist as a single .yaml file in the directory tree. For example: + + suites/foo/one.yaml + suites/foo/two.yaml + +is a simple group of two tests. + +A directory with a magic '+' file represents a test that combines all +other items in the directory into a single yaml fragment. For example: + + suites/foo/bar/+ + suites/foo/bar/a.yaml + suites/foo/bar/b.yaml + suites/foo/bar/c.yaml + +is a single test consisting of a + b + c. + +A directory with a magic '%' file represents a test matrix formed from +all other items in the directory. For example, + + suites/baz/% + suites/baz/a.yaml + suites/baz/b/b1.yaml + suites/baz/b/b2.yaml + suites/baz/c.yaml + suites/baz/d/d1.yaml + suites/baz/d/d2.yaml + +is a 4-dimensional test matrix. Two dimensions (a, c) are trivial (1 +item), so this is really 2x2 = 4 tests, which are + + a + b1 + c + d1 + a + b1 + c + d2 + a + b2 + c + d1 + a + b2 + c + d2 + +Symlinks are okay. + +The teuthology code can be found in https://github.com/ceph/teuthology.git diff --git a/qa/clusters/extra-client.yaml b/qa/clusters/extra-client.yaml new file mode 100644 index 00000000000..70ccbd028b2 --- /dev/null +++ b/qa/clusters/extra-client.yaml @@ -0,0 +1,5 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mds.a, osd.3, osd.4, osd.5] +- [client.0] +- [client.1] diff --git a/qa/clusters/fixed-1.yaml b/qa/clusters/fixed-1.yaml new file mode 100644 index 00000000000..8ec132330db --- /dev/null +++ b/qa/clusters/fixed-1.yaml @@ -0,0 +1,2 @@ +roles: +- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, client.0] diff --git a/qa/clusters/fixed-2.yaml b/qa/clusters/fixed-2.yaml new file mode 100644 index 00000000000..79ed8cb5e21 --- /dev/null +++ b/qa/clusters/fixed-2.yaml @@ -0,0 +1,3 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0] +- [mon.b, mds.a, osd.3, osd.4, osd.5, client.1] diff --git a/qa/clusters/fixed-3.yaml b/qa/clusters/fixed-3.yaml new file mode 100644 index 00000000000..0038432afa7 --- /dev/null +++ b/qa/clusters/fixed-3.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mds.a, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/debug/mds_client.yaml b/qa/debug/mds_client.yaml new file mode 100644 index 00000000000..c6fec3fc6f9 --- /dev/null +++ b/qa/debug/mds_client.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + conf: + mds: + debug ms: 1 + debug mds: 20 + client: + debug ms: 1 + debug client: 20 \ No newline at end of file diff --git a/qa/distros/all/centos_6.3.yaml b/qa/distros/all/centos_6.3.yaml new file mode 100644 index 00000000000..32187d6daf0 --- /dev/null +++ b/qa/distros/all/centos_6.3.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "6.3" diff --git a/qa/distros/all/centos_6.4.yaml b/qa/distros/all/centos_6.4.yaml new file mode 100644 index 00000000000..02383cd5f8c --- /dev/null +++ b/qa/distros/all/centos_6.4.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "6.4" diff --git a/qa/distros/all/centos_6.5.yaml b/qa/distros/all/centos_6.5.yaml new file mode 100644 index 00000000000..77c9e41f73e --- /dev/null +++ b/qa/distros/all/centos_6.5.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "6.5" diff --git a/qa/distros/all/debian_6.0.yaml b/qa/distros/all/debian_6.0.yaml new file mode 100644 index 00000000000..6820fa3c702 --- /dev/null +++ b/qa/distros/all/debian_6.0.yaml @@ -0,0 +1,2 @@ +os_type: debian +os_version: "6.0" diff --git a/qa/distros/all/debian_7.0.yaml b/qa/distros/all/debian_7.0.yaml new file mode 100644 index 00000000000..8100dc41e3d --- /dev/null +++ b/qa/distros/all/debian_7.0.yaml @@ -0,0 +1,2 @@ +os_type: debian +os_version: "7.0" diff --git a/qa/distros/all/fedora_17.yaml b/qa/distros/all/fedora_17.yaml new file mode 100644 index 00000000000..801053af0ae --- /dev/null +++ b/qa/distros/all/fedora_17.yaml @@ -0,0 +1,2 @@ +os_type: fedora +os_version: "17" diff --git a/qa/distros/all/fedora_18.yaml b/qa/distros/all/fedora_18.yaml new file mode 100644 index 00000000000..07872aa7edf --- /dev/null +++ b/qa/distros/all/fedora_18.yaml @@ -0,0 +1,2 @@ +os_type: fedora +os_version: "18" diff --git a/qa/distros/all/fedora_19.yaml b/qa/distros/all/fedora_19.yaml new file mode 100644 index 00000000000..5bac8aceea2 --- /dev/null +++ b/qa/distros/all/fedora_19.yaml @@ -0,0 +1,2 @@ +os_type: fedora +os_version: "19" diff --git a/qa/distros/all/opensuse_12.2.yaml b/qa/distros/all/opensuse_12.2.yaml new file mode 100644 index 00000000000..ee9f877a26b --- /dev/null +++ b/qa/distros/all/opensuse_12.2.yaml @@ -0,0 +1,2 @@ +os_type: opensuse +os_version: "12.2" diff --git a/qa/distros/all/rhel_6.3.yaml b/qa/distros/all/rhel_6.3.yaml new file mode 100644 index 00000000000..6a8edcd5626 --- /dev/null +++ b/qa/distros/all/rhel_6.3.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "6.3" diff --git a/qa/distros/all/rhel_6.4.yaml b/qa/distros/all/rhel_6.4.yaml new file mode 100644 index 00000000000..5225495834a --- /dev/null +++ b/qa/distros/all/rhel_6.4.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "6.4" diff --git a/qa/distros/all/rhel_6.5.yaml b/qa/distros/all/rhel_6.5.yaml new file mode 100644 index 00000000000..7db54bea1bd --- /dev/null +++ b/qa/distros/all/rhel_6.5.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "6.5" diff --git a/qa/distros/all/sles_11-sp2.yaml b/qa/distros/all/sles_11-sp2.yaml new file mode 100644 index 00000000000..df9c3ca01c8 --- /dev/null +++ b/qa/distros/all/sles_11-sp2.yaml @@ -0,0 +1,2 @@ +os_type: sles +os_version: "11-sp2" diff --git a/qa/distros/all/ubuntu_12.04.yaml b/qa/distros/all/ubuntu_12.04.yaml new file mode 100644 index 00000000000..dbc3a8d9c58 --- /dev/null +++ b/qa/distros/all/ubuntu_12.04.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "12.04" diff --git a/qa/distros/all/ubuntu_12.10.yaml b/qa/distros/all/ubuntu_12.10.yaml new file mode 100644 index 00000000000..ab655676e4c --- /dev/null +++ b/qa/distros/all/ubuntu_12.10.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "12.10" diff --git a/qa/distros/all/ubuntu_14.04.yaml b/qa/distros/all/ubuntu_14.04.yaml new file mode 100644 index 00000000000..309e989feeb --- /dev/null +++ b/qa/distros/all/ubuntu_14.04.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "14.04" diff --git a/qa/distros/supported/centos_6.5.yaml b/qa/distros/supported/centos_6.5.yaml new file mode 120000 index 00000000000..57e60fcf930 --- /dev/null +++ b/qa/distros/supported/centos_6.5.yaml @@ -0,0 +1 @@ +../all/centos_6.5.yaml \ No newline at end of file diff --git a/qa/distros/supported/debian_7.0.yaml b/qa/distros/supported/debian_7.0.yaml new file mode 120000 index 00000000000..2a14a987db3 --- /dev/null +++ b/qa/distros/supported/debian_7.0.yaml @@ -0,0 +1 @@ +../all/debian_7.0.yaml \ No newline at end of file diff --git a/qa/distros/supported/ubuntu_12.04.yaml b/qa/distros/supported/ubuntu_12.04.yaml new file mode 120000 index 00000000000..56160b8aa22 --- /dev/null +++ b/qa/distros/supported/ubuntu_12.04.yaml @@ -0,0 +1 @@ +../all/ubuntu_12.04.yaml \ No newline at end of file diff --git a/qa/distros/supported/ubuntu_14.04.yaml b/qa/distros/supported/ubuntu_14.04.yaml new file mode 120000 index 00000000000..cf7fff7a866 --- /dev/null +++ b/qa/distros/supported/ubuntu_14.04.yaml @@ -0,0 +1 @@ +../all/ubuntu_14.04.yaml \ No newline at end of file diff --git a/qa/fs/btrfs.yaml b/qa/fs/btrfs.yaml new file mode 100644 index 00000000000..0b3f6fac7a5 --- /dev/null +++ b/qa/fs/btrfs.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + fs: btrfs + conf: + osd: + osd sloppy crc: true + osd op thread timeout: 60 diff --git a/qa/fs/ext4.yaml b/qa/fs/ext4.yaml new file mode 100644 index 00000000000..fde6751751d --- /dev/null +++ b/qa/fs/ext4.yaml @@ -0,0 +1,3 @@ +overrides: + ceph: + fs: ext4 diff --git a/qa/fs/xfs.yaml b/qa/fs/xfs.yaml new file mode 100644 index 00000000000..0d88e107df4 --- /dev/null +++ b/qa/fs/xfs.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + fs: xfs + conf: + osd: + osd sloppy crc: true \ No newline at end of file diff --git a/qa/overrides/whitelist_wrongly_marked_down.yaml b/qa/overrides/whitelist_wrongly_marked_down.yaml new file mode 100644 index 00000000000..5cf329fa0e1 --- /dev/null +++ b/qa/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + conf: + mds: + debug mds: 20 + debug ms: 1 + client: + debug client: 10 \ No newline at end of file diff --git a/qa/rgw_pool_type/erasure-coded.yaml b/qa/rgw_pool_type/erasure-coded.yaml new file mode 100644 index 00000000000..7c99b7f85c8 --- /dev/null +++ b/qa/rgw_pool_type/erasure-coded.yaml @@ -0,0 +1,5 @@ +overrides: + rgw: + ec-data-pool: true + s3tests: + slow_backend: true diff --git a/qa/rgw_pool_type/replicated.yaml b/qa/rgw_pool_type/replicated.yaml new file mode 100644 index 00000000000..c91709eaae7 --- /dev/null +++ b/qa/rgw_pool_type/replicated.yaml @@ -0,0 +1,3 @@ +overrides: + rgw: + ec-data-pool: false diff --git a/qa/suites/% b/qa/suites/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/big/rados-thrash/% b/qa/suites/big/rados-thrash/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/big/rados-thrash/ceph/ceph.yaml b/qa/suites/big/rados-thrash/ceph/ceph.yaml new file mode 100644 index 00000000000..2030acb9083 --- /dev/null +++ b/qa/suites/big/rados-thrash/ceph/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/big/rados-thrash/clusters/big.yaml b/qa/suites/big/rados-thrash/clusters/big.yaml new file mode 100644 index 00000000000..18197ad8571 --- /dev/null +++ b/qa/suites/big/rados-thrash/clusters/big.yaml @@ -0,0 +1,68 @@ +roles: +- [osd.0, osd.1, osd.2, client.0, mon.a] +- [osd.3, osd.4, osd.5, client.1, mon.b] +- [osd.6, osd.7, osd.8, client.2, mon.c] +- [osd.9, osd.10, osd.11, client.3, mon.d] +- [osd.12, osd.13, osd.14, client.4, mon.e] +- [osd.15, osd.16, osd.17, client.5] +- [osd.18, osd.19, osd.20, client.6] +- [osd.21, osd.22, osd.23, client.7] +- [osd.24, osd.25, osd.26, client.8] +- [osd.27, osd.28, osd.29, client.9] +- [osd.30, osd.31, osd.32, client.10] +- [osd.33, osd.34, osd.35, client.11] +- [osd.36, osd.37, osd.38, client.12] +- [osd.39, osd.40, osd.41, client.13] +- [osd.42, osd.43, osd.44, client.14] +- [osd.45, osd.46, osd.47, client.15] +- [osd.48, osd.49, osd.50, client.16] +- [osd.51, osd.52, osd.53, client.17] +- [osd.54, osd.55, osd.56, client.18] +- [osd.57, osd.58, osd.59, client.19] +- [osd.60, osd.61, osd.62, client.20] +- [osd.63, osd.64, osd.65, client.21] +- [osd.66, osd.67, osd.68, client.22] +- [osd.69, osd.70, osd.71, client.23] +- [osd.72, osd.73, osd.74, client.24] +- [osd.75, osd.76, osd.77, client.25] +- [osd.78, osd.79, osd.80, client.26] +- [osd.81, osd.82, osd.83, client.27] +- [osd.84, osd.85, osd.86, client.28] +- [osd.87, osd.88, osd.89, client.29] +- [osd.90, osd.91, osd.92, client.30] +- [osd.93, osd.94, osd.95, client.31] +- [osd.96, osd.97, osd.98, client.32] +- [osd.99, osd.100, osd.101, client.33] +- [osd.102, osd.103, osd.104, client.34] +- [osd.105, osd.106, osd.107, client.35] +- [osd.108, osd.109, osd.110, client.36] +- [osd.111, osd.112, osd.113, client.37] +- [osd.114, osd.115, osd.116, client.38] +- [osd.117, osd.118, osd.119, client.39] +- [osd.120, osd.121, osd.122, client.40] +- [osd.123, osd.124, osd.125, client.41] +- [osd.126, osd.127, osd.128, client.42] +- [osd.129, osd.130, osd.131, client.43] +- [osd.132, osd.133, osd.134, client.44] +- [osd.135, osd.136, osd.137, client.45] +- [osd.138, osd.139, osd.140, client.46] +- [osd.141, osd.142, osd.143, client.47] +- [osd.144, osd.145, osd.146, client.48] +- [osd.147, osd.148, osd.149, client.49] +- [osd.150, osd.151, osd.152, client.50] +#- [osd.153, osd.154, osd.155, client.51] +#- [osd.156, osd.157, osd.158, client.52] +#- [osd.159, osd.160, osd.161, client.53] +#- [osd.162, osd.163, osd.164, client.54] +#- [osd.165, osd.166, osd.167, client.55] +#- [osd.168, osd.169, osd.170, client.56] +#- [osd.171, osd.172, osd.173, client.57] +#- [osd.174, osd.175, osd.176, client.58] +#- [osd.177, osd.178, osd.179, client.59] +#- [osd.180, osd.181, osd.182, client.60] +#- [osd.183, osd.184, osd.185, client.61] +#- [osd.186, osd.187, osd.188, client.62] +#- [osd.189, osd.190, osd.191, client.63] +#- [osd.192, osd.193, osd.194, client.64] +#- [osd.195, osd.196, osd.197, client.65] +#- [osd.198, osd.199, osd.200, client.66] diff --git a/qa/suites/big/rados-thrash/clusters/medium.yaml b/qa/suites/big/rados-thrash/clusters/medium.yaml new file mode 100644 index 00000000000..48b66dd5ca3 --- /dev/null +++ b/qa/suites/big/rados-thrash/clusters/medium.yaml @@ -0,0 +1,22 @@ +roles: +- [osd.0, osd.1, osd.2, client.0, mon.a] +- [osd.3, osd.4, osd.5, client.1, mon.b] +- [osd.6, osd.7, osd.8, client.2, mon.c] +- [osd.9, osd.10, osd.11, client.3, mon.d] +- [osd.12, osd.13, osd.14, client.4, mon.e] +- [osd.15, osd.16, osd.17, client.5] +- [osd.18, osd.19, osd.20, client.6] +- [osd.21, osd.22, osd.23, client.7] +- [osd.24, osd.25, osd.26, client.8] +- [osd.27, osd.28, osd.29, client.9] +- [osd.30, osd.31, osd.32, client.10] +- [osd.33, osd.34, osd.35, client.11] +- [osd.36, osd.37, osd.38, client.12] +- [osd.39, osd.40, osd.41, client.13] +- [osd.42, osd.43, osd.44, client.14] +- [osd.45, osd.46, osd.47, client.15] +- [osd.48, osd.49, osd.50, client.16] +- [osd.51, osd.52, osd.53, client.17] +- [osd.54, osd.55, osd.56, client.18] +- [osd.57, osd.58, osd.59, client.19] +- [osd.60, osd.61, osd.62, client.20] diff --git a/qa/suites/big/rados-thrash/clusters/small.yaml b/qa/suites/big/rados-thrash/clusters/small.yaml new file mode 100644 index 00000000000..b5a79906c69 --- /dev/null +++ b/qa/suites/big/rados-thrash/clusters/small.yaml @@ -0,0 +1,6 @@ +roles: +- [osd.0, osd.1, osd.2, client.0, mon.a] +- [osd.3, osd.4, osd.5, client.1, mon.b] +- [osd.6, osd.7, osd.8, client.2, mon.c] +- [osd.9, osd.10, osd.11, client.3, mon.d] +- [osd.12, osd.13, osd.14, client.4, mon.e] diff --git a/qa/suites/big/rados-thrash/fs/btrfs.yaml b/qa/suites/big/rados-thrash/fs/btrfs.yaml new file mode 100644 index 00000000000..0b3f6fac7a5 --- /dev/null +++ b/qa/suites/big/rados-thrash/fs/btrfs.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + fs: btrfs + conf: + osd: + osd sloppy crc: true + osd op thread timeout: 60 diff --git a/qa/suites/big/rados-thrash/fs/xfs.yaml b/qa/suites/big/rados-thrash/fs/xfs.yaml new file mode 100644 index 00000000000..b4a82911a2f --- /dev/null +++ b/qa/suites/big/rados-thrash/fs/xfs.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + fs: xfs + conf: + osd: + osd sloppy crc: true diff --git a/qa/suites/big/rados-thrash/thrashers/default.yaml b/qa/suites/big/rados-thrash/thrashers/default.yaml new file mode 100644 index 00000000000..d67ff20a693 --- /dev/null +++ b/qa/suites/big/rados-thrash/thrashers/default.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 diff --git a/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml b/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml new file mode 100644 index 00000000000..b73bb6781dc --- /dev/null +++ b/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + ops: 4000 + max_seconds: 3600 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/ceph-deploy/fs/% b/qa/suites/ceph-deploy/fs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/ceph-deploy/fs/distros b/qa/suites/ceph-deploy/fs/distros new file mode 120000 index 00000000000..c5d59352cb5 --- /dev/null +++ b/qa/suites/ceph-deploy/fs/distros @@ -0,0 +1 @@ +../../../distros/supported \ No newline at end of file diff --git a/qa/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_blogbench.yaml new file mode 100644 index 00000000000..ee35e1a56c8 --- /dev/null +++ b/qa/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_blogbench.yaml @@ -0,0 +1,35 @@ +overrides: + ceph-deploy: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.3 + - osd.4 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- ceph-fuse: +- workunit: + clients: + client.0: + - suites/blogbench.sh +exclude_arch: armv7l diff --git a/qa/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_dbench.yaml new file mode 100644 index 00000000000..58f7a5456ae --- /dev/null +++ b/qa/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_dbench.yaml @@ -0,0 +1,35 @@ +overrides: + ceph-deploy: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.4 + - osd.3 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- ceph-fuse: +- workunit: + clients: + client.0: + - suites/dbench.sh +exclude_arch: armv7l diff --git a/qa/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..b912ffedf7b --- /dev/null +++ b/qa/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,35 @@ +overrides: + ceph-deploy: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.3 + - osd.4 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- ceph-fuse: +- workunit: + clients: + client.0: + - suites/fsstress.sh +exclude_arch: armv7l diff --git a/qa/suites/ceph-deploy/rados/% b/qa/suites/ceph-deploy/rados/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/ceph-deploy/rados/distros b/qa/suites/ceph-deploy/rados/distros new file mode 120000 index 00000000000..c5d59352cb5 --- /dev/null +++ b/qa/suites/ceph-deploy/rados/distros @@ -0,0 +1 @@ +../../../distros/supported \ No newline at end of file diff --git a/qa/suites/ceph-deploy/rados/tasks/rados_api_tests.yaml b/qa/suites/ceph-deploy/rados/tasks/rados_api_tests.yaml new file mode 100644 index 00000000000..32baa730f73 --- /dev/null +++ b/qa/suites/ceph-deploy/rados/tasks/rados_api_tests.yaml @@ -0,0 +1,33 @@ +overrides: + ceph-deploy: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.3 + - osd.4 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- workunit: + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/ceph-deploy/rados/tasks/rados_python.yaml b/qa/suites/ceph-deploy/rados/tasks/rados_python.yaml new file mode 100644 index 00000000000..634ece27633 --- /dev/null +++ b/qa/suites/ceph-deploy/rados/tasks/rados_python.yaml @@ -0,0 +1,34 @@ +overrides: + ceph-deploy: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.3 + - osd.4 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- workunit: + clients: + client.0: + - rados/test_python.sh + diff --git a/qa/suites/ceph-deploy/rados/tasks/rados_workunit_loadgen_big.yaml b/qa/suites/ceph-deploy/rados/tasks/rados_workunit_loadgen_big.yaml new file mode 100644 index 00000000000..9f3140393b8 --- /dev/null +++ b/qa/suites/ceph-deploy/rados/tasks/rados_workunit_loadgen_big.yaml @@ -0,0 +1,34 @@ +overrides: + ceph-deploy: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.3 + - osd.4 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- workunit: + clients: + all: + - rados/load-gen-big.sh + diff --git a/qa/suites/ceph-deploy/rbd/% b/qa/suites/ceph-deploy/rbd/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/ceph-deploy/rbd/distros b/qa/suites/ceph-deploy/rbd/distros new file mode 120000 index 00000000000..c5d59352cb5 --- /dev/null +++ b/qa/suites/ceph-deploy/rbd/distros @@ -0,0 +1 @@ +../../../distros/supported \ No newline at end of file diff --git a/qa/suites/ceph-deploy/rbd/tasks/rbd_api_tests_old_format.yaml b/qa/suites/ceph-deploy/rbd/tasks/rbd_api_tests_old_format.yaml new file mode 100644 index 00000000000..1333358a8ef --- /dev/null +++ b/qa/suites/ceph-deploy/rbd/tasks/rbd_api_tests_old_format.yaml @@ -0,0 +1,33 @@ +overrides: + ceph-deploy: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.3 + - osd.4 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh diff --git a/qa/suites/ceph-deploy/rbd/tasks/rbd_cli_tests.yaml b/qa/suites/ceph-deploy/rbd/tasks/rbd_cli_tests.yaml new file mode 100644 index 00000000000..27eb5299123 --- /dev/null +++ b/qa/suites/ceph-deploy/rbd/tasks/rbd_cli_tests.yaml @@ -0,0 +1,33 @@ +overrides: + ceph-deploy: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.3 + - osd.4 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- workunit: + clients: + client.0: + - rbd/run_cli_tests.sh diff --git a/qa/suites/ceph-deploy/rbd/tasks/rbd_cls_test.yaml b/qa/suites/ceph-deploy/rbd/tasks/rbd_cls_test.yaml new file mode 100644 index 00000000000..1abca13698c --- /dev/null +++ b/qa/suites/ceph-deploy/rbd/tasks/rbd_cls_test.yaml @@ -0,0 +1,33 @@ +overrides: + ceph-deploy: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.3 + - osd.4 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- workunit: + clients: + client.0: + - cls/test_cls_rbd.sh diff --git a/qa/suites/ceph-deploy/rbd/tasks/rbd_python_api_tests.yaml b/qa/suites/ceph-deploy/rbd/tasks/rbd_python_api_tests.yaml new file mode 100644 index 00000000000..9c663f53864 --- /dev/null +++ b/qa/suites/ceph-deploy/rbd/tasks/rbd_python_api_tests.yaml @@ -0,0 +1,33 @@ +overrides: + ceph-deploy: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.3 + - osd.4 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh diff --git a/qa/suites/ceph-deploy/singleton/% b/qa/suites/ceph-deploy/singleton/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/ceph-deploy/singleton/all/basic-test.yaml b/qa/suites/ceph-deploy/singleton/all/basic-test.yaml new file mode 100644 index 00000000000..14711d05856 --- /dev/null +++ b/qa/suites/ceph-deploy/singleton/all/basic-test.yaml @@ -0,0 +1,35 @@ +overrides: + ceph: + conf: + global: + debug ms: 1 + osd: + debug osd: 10 + mon: + debug mon: 10 +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - osd.3 + - osd.4 + - osd.5 +- - mon.c + - osd.6 + - osd.7 + - osd.8 +- - client.0 + +tasks: +- install: + extras: yes +- ssh_keys: +- ceph-deploy: +- workunit: + clients: + client.0: + - suites/blogbench.sh + diff --git a/qa/suites/ceph-deploy/singleton/distros b/qa/suites/ceph-deploy/singleton/distros new file mode 120000 index 00000000000..c5d59352cb5 --- /dev/null +++ b/qa/suites/ceph-deploy/singleton/distros @@ -0,0 +1 @@ +../../../distros/supported \ No newline at end of file diff --git a/qa/suites/clusters/samba-basic.yaml b/qa/suites/clusters/samba-basic.yaml new file mode 100644 index 00000000000..caced4a26d1 --- /dev/null +++ b/qa/suites/clusters/samba-basic.yaml @@ -0,0 +1,3 @@ +roles: +- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1] +- [samba.0, client.0, client.1] diff --git a/qa/suites/debug/mds_client.yaml b/qa/suites/debug/mds_client.yaml new file mode 120000 index 00000000000..2550b024ded --- /dev/null +++ b/qa/suites/debug/mds_client.yaml @@ -0,0 +1 @@ +../../../debug/mds_client.yaml \ No newline at end of file diff --git a/qa/suites/dummy/% b/qa/suites/dummy/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/dummy/all/nop.yaml b/qa/suites/dummy/all/nop.yaml new file mode 100644 index 00000000000..e027e553395 --- /dev/null +++ b/qa/suites/dummy/all/nop.yaml @@ -0,0 +1,6 @@ +roles: + - [mon.a, mds.a, osd.0, osd.1, client.0] + +tasks: + - nop: + diff --git a/qa/suites/experimental/multimds/% b/qa/suites/experimental/multimds/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/experimental/multimds/clusters/7-multimds.yaml b/qa/suites/experimental/multimds/clusters/7-multimds.yaml new file mode 100644 index 00000000000..17cfd7b3d79 --- /dev/null +++ b/qa/suites/experimental/multimds/clusters/7-multimds.yaml @@ -0,0 +1,8 @@ +roles: +- [mon.a, mds.a, mds.a-s] +- [mon.b, mds.b, mds.b-s] +- [mon.c, mds.c, mds.c-s] +- [osd.0] +- [osd.1] +- [osd.2] +- [client.0] diff --git a/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml b/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml new file mode 100644 index 00000000000..bee01a83586 --- /dev/null +++ b/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml @@ -0,0 +1,15 @@ +tasks: +- install: +- ceph: + conf: + mds: + mds thrash exports: 1 + mds debug subtrees: 1 + mds debug scatterstat: 1 + mds verify scatter: 1 +- ceph-fuse: +- workunit: + clients: + client.0: + - suites/fsstress.sh + diff --git a/qa/suites/fs/basic/% b/qa/suites/fs/basic/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/fs/basic/clusters/fixed-3.yaml b/qa/suites/fs/basic/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/fs/basic/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic/debug/mds_client.yaml b/qa/suites/fs/basic/debug/mds_client.yaml new file mode 120000 index 00000000000..335c1cafed7 --- /dev/null +++ b/qa/suites/fs/basic/debug/mds_client.yaml @@ -0,0 +1 @@ +../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic/fs/btrfs.yaml b/qa/suites/fs/basic/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/fs/basic/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic/inline/no.yaml b/qa/suites/fs/basic/inline/no.yaml new file mode 100644 index 00000000000..2030acb9083 --- /dev/null +++ b/qa/suites/fs/basic/inline/no.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/fs/basic/inline/yes.yaml b/qa/suites/fs/basic/inline/yes.yaml new file mode 100644 index 00000000000..72a285c590f --- /dev/null +++ b/qa/suites/fs/basic/inline/yes.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: +- exec: + client.0: + - ceph mds set inline_data true --yes-i-really-mean-it diff --git a/qa/suites/fs/basic/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/basic/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000000..08f746bf894 --- /dev/null +++ b/qa/suites/fs/basic/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_kernel_untar_build.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_kernel_untar_build.yaml new file mode 100644 index 00000000000..3e99204debb --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_kernel_untar_build.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_misc.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_misc.yaml new file mode 100644 index 00000000000..683d3f592c2 --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_misc.yaml @@ -0,0 +1,7 @@ +tasks: +- ceph-fuse: +- workunit: + timeout: 6h + clients: + all: + - fs/misc diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_misc_test_o_trunc.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_misc_test_o_trunc.yaml new file mode 100644 index 00000000000..c9720a2fd48 --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_misc_test_o_trunc.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - fs/test_o_trunc.sh diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_blogbench.yaml new file mode 100644 index 00000000000..09898e16bda --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_blogbench.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_dbench.yaml new file mode 100644 index 00000000000..ad96b4c5e7f --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_dbench.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_ffsb.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_ffsb.yaml new file mode 100644 index 00000000000..86008160034 --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_ffsb.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + conf: + osd: + filestore flush min: 0 +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..5908d951b2d --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsx.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsx.yaml new file mode 100644 index 00000000000..3c11ed74fc7 --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsx.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsync.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsync.yaml new file mode 100644 index 00000000000..c6043e209bd --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_fsync.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iogen.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iogen.yaml new file mode 100644 index 00000000000..6989990e22a --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iogen.yaml @@ -0,0 +1,7 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/iogen.sh + diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iozone.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iozone.yaml new file mode 100644 index 00000000000..1e23f670e28 --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_iozone.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: [client.0] +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000000..65bcd0d0333 --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + conf: + client: + debug ms: 1 + debug client: 20 + mds: + debug ms: 1 + debug mds: 20 +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_suites_truncate_delay.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_truncate_delay.yaml new file mode 100644 index 00000000000..911026e13bb --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_suites_truncate_delay.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + conf: + client: + ms_inject_delay_probability: 1 + ms_inject_delay_type: osd + ms_inject_delay_max: 5 + client_oc_max_dirty_age: 1 +tasks: +- ceph-fuse: +- exec: + client.0: + - cd $TESTDIR/mnt.* && dd if=/dev/zero of=./foo count=100 + - sleep 2 + - cd $TESTDIR/mnt.* && truncate --size 0 ./foo diff --git a/qa/suites/fs/basic/tasks/cfuse_workunit_trivial_sync.yaml b/qa/suites/fs/basic/tasks/cfuse_workunit_trivial_sync.yaml new file mode 100644 index 00000000000..9509650c76c --- /dev/null +++ b/qa/suites/fs/basic/tasks/cfuse_workunit_trivial_sync.yaml @@ -0,0 +1,5 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] diff --git a/qa/suites/fs/basic/tasks/libcephfs_interface_tests.yaml b/qa/suites/fs/basic/tasks/libcephfs_interface_tests.yaml new file mode 100644 index 00000000000..0b1d41fea5c --- /dev/null +++ b/qa/suites/fs/basic/tasks/libcephfs_interface_tests.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + client.0: + - libcephfs/test.sh diff --git a/qa/suites/fs/basic/tasks/libcephfs_java.yaml b/qa/suites/fs/basic/tasks/libcephfs_java.yaml new file mode 100644 index 00000000000..4330d50965e --- /dev/null +++ b/qa/suites/fs/basic/tasks/libcephfs_java.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + client.0: + - libcephfs-java/test.sh diff --git a/qa/suites/fs/basic/tasks/mds_creation_retry.yaml b/qa/suites/fs/basic/tasks/mds_creation_retry.yaml new file mode 100644 index 00000000000..76ceeafa8e7 --- /dev/null +++ b/qa/suites/fs/basic/tasks/mds_creation_retry.yaml @@ -0,0 +1,7 @@ +tasks: +-mds_creation_failure: +-ceph-fuse: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] + diff --git a/qa/suites/fs/multiclient/% b/qa/suites/fs/multiclient/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/fs/multiclient/clusters/three_clients.yaml b/qa/suites/fs/multiclient/clusters/three_clients.yaml new file mode 100644 index 00000000000..fd2535fd4a0 --- /dev/null +++ b/qa/suites/fs/multiclient/clusters/three_clients.yaml @@ -0,0 +1,5 @@ +roles: +- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2] +- [client.2] +- [client.1] +- [client.0] diff --git a/qa/suites/fs/multiclient/clusters/two_clients.yaml b/qa/suites/fs/multiclient/clusters/two_clients.yaml new file mode 100644 index 00000000000..2258befd8bf --- /dev/null +++ b/qa/suites/fs/multiclient/clusters/two_clients.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2] +- [client.1] +- [client.0] diff --git a/qa/suites/fs/multiclient/debug/mds_client.yaml b/qa/suites/fs/multiclient/debug/mds_client.yaml new file mode 120000 index 00000000000..335c1cafed7 --- /dev/null +++ b/qa/suites/fs/multiclient/debug/mds_client.yaml @@ -0,0 +1 @@ +../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/qa/suites/fs/multiclient/fs/btrfs.yaml b/qa/suites/fs/multiclient/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/fs/multiclient/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/multiclient/mount/ceph-fuse.yaml b/qa/suites/fs/multiclient/mount/ceph-fuse.yaml new file mode 100644 index 00000000000..37ac5b69e61 --- /dev/null +++ b/qa/suites/fs/multiclient/mount/ceph-fuse.yaml @@ -0,0 +1,4 @@ +tasks: +- install: +- ceph: +- ceph-fuse: diff --git a/qa/suites/fs/multiclient/mount/kclient.yaml.disabled b/qa/suites/fs/multiclient/mount/kclient.yaml.disabled new file mode 100644 index 00000000000..04adb48b63f --- /dev/null +++ b/qa/suites/fs/multiclient/mount/kclient.yaml.disabled @@ -0,0 +1,9 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: +- ceph: +- kclient: diff --git a/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled b/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled new file mode 100644 index 00000000000..e486c44c51e --- /dev/null +++ b/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled @@ -0,0 +1,20 @@ +# make sure we get the same MPI version on all hosts +os_type: ubuntu +os_version: "14.04" + +tasks: +- pexec: + clients: + - cd $TESTDIR + - wget http://ceph.com/qa/fsx-mpi.c + - mpicc fsx-mpi.c -o fsx-mpi + - rm fsx-mpi.c + - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt +- ssh_keys: +- mpi: + exec: $TESTDIR/fsx-mpi 1MB -N 50000 -p 10000 -l 1048576 + workdir: $TESTDIR/gmnt +- pexec: + all: + - rm $TESTDIR/gmnt + - rm $TESTDIR/fsx-mpi diff --git a/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml b/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml new file mode 100644 index 00000000000..dcf24247a92 --- /dev/null +++ b/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml @@ -0,0 +1,26 @@ +# make sure we get the same MPI version on all hosts +os_type: ubuntu +os_version: "14.04" + +tasks: +- pexec: + clients: + - cd $TESTDIR + - wget http://ceph.com/qa/ior.tbz2 + - tar xvfj ior.tbz2 + - cd ior + - ./configure + - make + - make install DESTDIR=$TESTDIR/binary/ + - cd $TESTDIR/ + - rm ior.tbz2 + - rm -r ior + - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt +- ssh_keys: +- mpi: + exec: $TESTDIR/binary/usr/local/bin/ior -e -w -r -W -b 10m -a POSIX -o $TESTDIR/gmnt/ior.testfile +- pexec: + all: + - rm -f $TESTDIR/gmnt/ior.testfile + - rm -f $TESTDIR/gmnt + - rm -rf $TESTDIR/binary diff --git a/qa/suites/fs/multiclient/tasks/mdtest.yaml b/qa/suites/fs/multiclient/tasks/mdtest.yaml new file mode 100644 index 00000000000..1dd95d954fb --- /dev/null +++ b/qa/suites/fs/multiclient/tasks/mdtest.yaml @@ -0,0 +1,23 @@ +# make sure we get the same MPI version on all hosts +os_type: ubuntu +os_version: "14.04" + +tasks: +- pexec: + clients: + - cd $TESTDIR + - wget http://ceph.com/qa/mdtest-1.9.3.tgz + - mkdir mdtest-1.9.3 + - cd mdtest-1.9.3 + - tar xvfz $TESTDIR/mdtest-1.9.3.tgz + - rm $TESTDIR/mdtest-1.9.3.tgz + - MPI_CC=mpicc make + - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt +- ssh_keys: +- mpi: + exec: $TESTDIR/mdtest-1.9.3/mdtest -d $TESTDIR/gmnt -I 20 -z 5 -b 2 -R +- pexec: + all: + - rm -f $TESTDIR/gmnt + - rm -rf $TESTDIR/mdtest-1.9.3 + - rm -rf $TESTDIR/._mdtest-1.9.3 \ No newline at end of file diff --git a/qa/suites/fs/snaps/% b/qa/suites/fs/snaps/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/fs/snaps/clusters/fixed-3.yaml b/qa/suites/fs/snaps/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/fs/snaps/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/fs/snaps/fs/btrfs.yaml b/qa/suites/fs/snaps/fs/btrfs.yaml new file mode 100644 index 00000000000..4c7af311538 --- /dev/null +++ b/qa/suites/fs/snaps/fs/btrfs.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + fs: btrfs + conf: + osd: + osd op thread timeout: 60 diff --git a/qa/suites/fs/snaps/mount/ceph-fuse.yaml b/qa/suites/fs/snaps/mount/ceph-fuse.yaml new file mode 100644 index 00000000000..37ac5b69e61 --- /dev/null +++ b/qa/suites/fs/snaps/mount/ceph-fuse.yaml @@ -0,0 +1,4 @@ +tasks: +- install: +- ceph: +- ceph-fuse: diff --git a/qa/suites/fs/snaps/tasks/snaptests.yaml b/qa/suites/fs/snaps/tasks/snaptests.yaml new file mode 100644 index 00000000000..7f7b0f21569 --- /dev/null +++ b/qa/suites/fs/snaps/tasks/snaptests.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + all: + - snaps/snaptest-0.sh + - snaps/snaptest-1.sh + - snaps/snaptest-2.sh diff --git a/qa/suites/fs/thrash/% b/qa/suites/fs/thrash/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/fs/thrash/ceph-thrash/default.yaml b/qa/suites/fs/thrash/ceph-thrash/default.yaml new file mode 100644 index 00000000000..aefdf826ce7 --- /dev/null +++ b/qa/suites/fs/thrash/ceph-thrash/default.yaml @@ -0,0 +1,2 @@ +tasks: +- mds_thrash: diff --git a/qa/suites/fs/thrash/ceph/base.yaml b/qa/suites/fs/thrash/ceph/base.yaml new file mode 100644 index 00000000000..2030acb9083 --- /dev/null +++ b/qa/suites/fs/thrash/ceph/base.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml b/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml new file mode 100644 index 00000000000..7e951b95889 --- /dev/null +++ b/qa/suites/fs/thrash/clusters/mds-1active-1standby.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mds.a, osd.3, osd.4, osd.5] +- [client.0, mds.b-s-a] diff --git a/qa/suites/fs/thrash/debug/mds_client.yaml b/qa/suites/fs/thrash/debug/mds_client.yaml new file mode 120000 index 00000000000..335c1cafed7 --- /dev/null +++ b/qa/suites/fs/thrash/debug/mds_client.yaml @@ -0,0 +1 @@ +../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/fs/btrfs.yaml b/qa/suites/fs/thrash/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/fs/thrash/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/msgr-failures/none.yaml b/qa/suites/fs/thrash/msgr-failures/none.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml b/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml new file mode 100644 index 00000000000..adcebc0baac --- /dev/null +++ b/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 2500 + mds inject delay type: osd mds + ms inject delay probability: .005 + ms inject delay max: 1 diff --git a/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000000..08f746bf894 --- /dev/null +++ b/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..5908d951b2d --- /dev/null +++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000000..930bf4a671d --- /dev/null +++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml new file mode 100644 index 00000000000..9509650c76c --- /dev/null +++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml @@ -0,0 +1,5 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] diff --git a/qa/suites/fs/traceless/% b/qa/suites/fs/traceless/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/fs/traceless/clusters/fixed-3.yaml b/qa/suites/fs/traceless/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/fs/traceless/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/debug/mds_client.yaml b/qa/suites/fs/traceless/debug/mds_client.yaml new file mode 120000 index 00000000000..335c1cafed7 --- /dev/null +++ b/qa/suites/fs/traceless/debug/mds_client.yaml @@ -0,0 +1 @@ +../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/fs/btrfs.yaml b/qa/suites/fs/traceless/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/fs/traceless/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000000..08f746bf894 --- /dev/null +++ b/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml new file mode 100644 index 00000000000..ed9d92d5bda --- /dev/null +++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml new file mode 100644 index 00000000000..e678ed47cc6 --- /dev/null +++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml new file mode 100644 index 00000000000..652a3a62f59 --- /dev/null +++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml @@ -0,0 +1,11 @@ +tasks: +- install: +- ceph: + conf: + osd: + filestore flush min: 0 +- ceph-fuse: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..b58487c0785 --- /dev/null +++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/fs/traceless/traceless/50pc.yaml b/qa/suites/fs/traceless/traceless/50pc.yaml new file mode 100644 index 00000000000..e0418bcb2be --- /dev/null +++ b/qa/suites/fs/traceless/traceless/50pc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + mds: + mds inject traceless reply probability: .5 diff --git a/qa/suites/fs/verify/% b/qa/suites/fs/verify/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/fs/verify/clusters/fixed-3.yaml b/qa/suites/fs/verify/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/fs/verify/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/debug/mds_client.yaml b/qa/suites/fs/verify/debug/mds_client.yaml new file mode 120000 index 00000000000..335c1cafed7 --- /dev/null +++ b/qa/suites/fs/verify/debug/mds_client.yaml @@ -0,0 +1 @@ +../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/fs/btrfs.yaml b/qa/suites/fs/verify/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/fs/verify/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000000..08f746bf894 --- /dev/null +++ b/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml new file mode 100644 index 00000000000..73319776f03 --- /dev/null +++ b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml @@ -0,0 +1,12 @@ +tasks: +- install: +- ceph: + conf: + client: + debug client: 1/20 + debug ms: 0/10 +- ceph-fuse: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..b58487c0785 --- /dev/null +++ b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/fs/verify/tasks/libcephfs_interface_tests.yaml b/qa/suites/fs/verify/tasks/libcephfs_interface_tests.yaml new file mode 100644 index 00000000000..22d1f142161 --- /dev/null +++ b/qa/suites/fs/verify/tasks/libcephfs_interface_tests.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - libcephfs/test.sh diff --git a/qa/suites/fs/verify/validater/lockdep.yaml b/qa/suites/fs/verify/validater/lockdep.yaml new file mode 100644 index 00000000000..25f84355c0b --- /dev/null +++ b/qa/suites/fs/verify/validater/lockdep.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + lockdep: true diff --git a/qa/suites/fs/verify/validater/valgrind.yaml b/qa/suites/fs/verify/validater/valgrind.yaml new file mode 100644 index 00000000000..c3d3aed4892 --- /dev/null +++ b/qa/suites/fs/verify/validater/valgrind.yaml @@ -0,0 +1,12 @@ +overrides: + install: + ceph: + flavor: notcmalloc + ceph: + valgrind: + mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + osd: [--tool=memcheck] + mds: [--tool=memcheck] + ceph-fuse: + client.0: + valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] diff --git a/qa/suites/hadoop/basic/% b/qa/suites/hadoop/basic/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/hadoop/basic/clusters/fixed-3.yaml b/qa/suites/hadoop/basic/clusters/fixed-3.yaml new file mode 100644 index 00000000000..708d751178c --- /dev/null +++ b/qa/suites/hadoop/basic/clusters/fixed-3.yaml @@ -0,0 +1,5 @@ +roles: +- [mon.0, mds.0, osd.0, hadoop.master.0] +- [mon.1, osd.1, hadoop.slave.0] +- [mon.2, hadoop.slave.1, client.0] + diff --git a/qa/suites/hadoop/basic/tasks/hadoop-internal.yaml b/qa/suites/hadoop/basic/tasks/hadoop-internal.yaml new file mode 100644 index 00000000000..5b52a15d56e --- /dev/null +++ b/qa/suites/hadoop/basic/tasks/hadoop-internal.yaml @@ -0,0 +1,8 @@ +tasks: +- ssh_keys: +- install: +- ceph: +- hadoop: +- workunit: + clients: + client.0: [hadoop-internal-tests] diff --git a/qa/suites/hadoop/basic/tasks/wordcount.yaml b/qa/suites/hadoop/basic/tasks/wordcount.yaml new file mode 100644 index 00000000000..50b29c78d8c --- /dev/null +++ b/qa/suites/hadoop/basic/tasks/wordcount.yaml @@ -0,0 +1,8 @@ +tasks: +- ssh_keys: +- install: +- ceph: +- hadoop: +- workunit: + clients: + client.0: [hadoop-wordcount] diff --git a/qa/suites/install/install.yaml b/qa/suites/install/install.yaml new file mode 100644 index 00000000000..12f1e852290 --- /dev/null +++ b/qa/suites/install/install.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- install: + project: samba + extra_packages: ['samba'] +- ceph: diff --git a/qa/suites/kcephfs/cephfs/% b/qa/suites/kcephfs/cephfs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/kcephfs/cephfs/clusters/fixed-3.yaml b/qa/suites/kcephfs/cephfs/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/kcephfs/cephfs/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/conf.yaml b/qa/suites/kcephfs/cephfs/conf.yaml new file mode 100644 index 00000000000..30da870b25d --- /dev/null +++ b/qa/suites/kcephfs/cephfs/conf.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false diff --git a/qa/suites/kcephfs/cephfs/fs/btrfs.yaml b/qa/suites/kcephfs/cephfs/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml new file mode 100644 index 00000000000..018a71f78ec --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - direct_io + diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml new file mode 100644 index 00000000000..d969e5561cb --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml new file mode 100644 index 00000000000..858ec334420 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml @@ -0,0 +1,12 @@ +tasks: +- install: +- ceph: + conf: + mds: + debug mds: 20 + debug ms: 1 +- kclient: +- workunit: + clients: + all: + - fs/misc diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml new file mode 100644 index 00000000000..6ec5e36cddb --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - fs/test_o_trunc.sh + diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml new file mode 100644 index 00000000000..77d045e8708 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml new file mode 100644 index 00000000000..2b88af692b4 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml @@ -0,0 +1,11 @@ +tasks: +- install: +- ceph: + conf: + osd: + filestore flush min: 0 +- kclient: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..10b84b8af4e --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml new file mode 100644 index 00000000000..a0d2e765bdb --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml new file mode 100644 index 00000000000..1b3f4d55501 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml new file mode 100644 index 00000000000..bfe25f2f837 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml new file mode 100644 index 00000000000..305de51e92b --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml new file mode 100644 index 00000000000..3503e12820f --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] diff --git a/qa/suites/kcephfs/mixed-clients/% b/qa/suites/kcephfs/mixed-clients/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/kcephfs/mixed-clients/clusters/fixed-3.yaml b/qa/suites/kcephfs/mixed-clients/clusters/fixed-3.yaml new file mode 100644 index 00000000000..e1d3c7b7932 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/clusters/fixed-3.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mds.a, osd.0, osd.1] +- [mon.b, mon.c, osd.2, osd.3, client.0] +- [client.1] diff --git a/qa/suites/kcephfs/mixed-clients/conf.yaml b/qa/suites/kcephfs/mixed-clients/conf.yaml new file mode 100644 index 00000000000..30da870b25d --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/conf.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false diff --git a/qa/suites/kcephfs/mixed-clients/fs/btrfs.yaml b/qa/suites/kcephfs/mixed-clients/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml new file mode 100644 index 00000000000..0121a01c538 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml @@ -0,0 +1,20 @@ +tasks: +- install: +- ceph: +- parallel: + - user-workload + - kclient-workload +user-workload: + sequential: + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/iozone.sh +kclient-workload: + sequential: + - kclient: [client.1] + - workunit: + clients: + client.1: + - suites/dbench.sh diff --git a/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml new file mode 100644 index 00000000000..7b0ce5b5d58 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml @@ -0,0 +1,20 @@ +tasks: +- install: +- ceph: +- parallel: + - user-workload + - kclient-workload +user-workload: + sequential: + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/blogbench.sh +kclient-workload: + sequential: + - kclient: [client.1] + - workunit: + clients: + client.1: + - kernel_untar_build.sh diff --git a/qa/suites/kcephfs/thrash/% b/qa/suites/kcephfs/thrash/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/kcephfs/thrash/clusters/fixed-3.yaml b/qa/suites/kcephfs/thrash/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/kcephfs/thrash/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/conf.yaml b/qa/suites/kcephfs/thrash/conf.yaml new file mode 100644 index 00000000000..30da870b25d --- /dev/null +++ b/qa/suites/kcephfs/thrash/conf.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false diff --git a/qa/suites/kcephfs/thrash/fs/btrfs.yaml b/qa/suites/kcephfs/thrash/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/kcephfs/thrash/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/thrashers/default.yaml b/qa/suites/kcephfs/thrash/thrashers/default.yaml new file mode 100644 index 00000000000..14d772583cf --- /dev/null +++ b/qa/suites/kcephfs/thrash/thrashers/default.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +- thrashosds: diff --git a/qa/suites/kcephfs/thrash/thrashers/mon-thrasher.yaml b/qa/suites/kcephfs/thrash/thrashers/mon-thrasher.yaml new file mode 100644 index 00000000000..90612f21865 --- /dev/null +++ b/qa/suites/kcephfs/thrash/thrashers/mon-thrasher.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 diff --git a/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml new file mode 100644 index 00000000000..0c4a1528d08 --- /dev/null +++ b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + conf: + osd: + filestore flush min: 0 +tasks: +- kclient: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml new file mode 100644 index 00000000000..832e0241b27 --- /dev/null +++ b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml @@ -0,0 +1,6 @@ +tasks: +- kclient: +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/knfs/basic/% b/qa/suites/knfs/basic/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/knfs/basic/ceph/base.yaml b/qa/suites/knfs/basic/ceph/base.yaml new file mode 100644 index 00000000000..7e80c462c37 --- /dev/null +++ b/qa/suites/knfs/basic/ceph/base.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false + +tasks: +- install: +- ceph: +- kclient: [client.0] +- knfsd: + client.0: + options: [rw,no_root_squash,async] diff --git a/qa/suites/knfs/basic/clusters/extra-client.yaml b/qa/suites/knfs/basic/clusters/extra-client.yaml new file mode 120000 index 00000000000..1582e308945 --- /dev/null +++ b/qa/suites/knfs/basic/clusters/extra-client.yaml @@ -0,0 +1 @@ +../../../../clusters/extra-client.yaml \ No newline at end of file diff --git a/qa/suites/knfs/basic/fs/btrfs.yaml b/qa/suites/knfs/basic/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/knfs/basic/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/knfs/basic/mount/v3.yaml b/qa/suites/knfs/basic/mount/v3.yaml new file mode 100644 index 00000000000..1b61119242b --- /dev/null +++ b/qa/suites/knfs/basic/mount/v3.yaml @@ -0,0 +1,5 @@ +tasks: +- nfs: + client.1: + server: client.0 + options: [rw,hard,intr,nfsvers=3] diff --git a/qa/suites/knfs/basic/mount/v4.yaml b/qa/suites/knfs/basic/mount/v4.yaml new file mode 100644 index 00000000000..88405666bfb --- /dev/null +++ b/qa/suites/knfs/basic/mount/v4.yaml @@ -0,0 +1,5 @@ +tasks: +- nfs: + client.1: + server: client.0 + options: [rw,hard,intr,nfsvers=4] diff --git a/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml b/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml new file mode 100644 index 00000000000..b9c0a5e05a3 --- /dev/null +++ b/qa/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + timeout: 6h + clients: + client.1: + - kernel_untar_build.sh diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml new file mode 100644 index 00000000000..135c4a74009 --- /dev/null +++ b/qa/suites/knfs/basic/tasks/nfs_workunit_misc.yaml @@ -0,0 +1,11 @@ +tasks: +- workunit: + clients: + client.1: + - fs/misc/chmod.sh + - fs/misc/i_complete_vs_rename.sh + - fs/misc/trivial_sync.sh + #- fs/misc/multiple_rsync.sh + #- fs/misc/xattrs.sh +# Once we can run multiple_rsync.sh and xattrs.sh we can change to this +# - misc diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml new file mode 100644 index 00000000000..e554a3d9a06 --- /dev/null +++ b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.1: + - suites/blogbench.sh diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml new file mode 100644 index 00000000000..1da1b768d02 --- /dev/null +++ b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.1: + - suites/dbench-short.sh diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml new file mode 100644 index 00000000000..3090f91ea43 --- /dev/null +++ b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + conf: + osd: + filestore flush min: 0 +tasks: +- workunit: + clients: + client.1: + - suites/ffsb.sh diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..bbe7b7a4045 --- /dev/null +++ b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.1: + - suites/fsstress.sh diff --git a/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml new file mode 100644 index 00000000000..7c3eec2ff3e --- /dev/null +++ b/qa/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.1: + - suites/iozone.sh diff --git a/qa/suites/krbd/rbd-nomount/% b/qa/suites/krbd/rbd-nomount/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml b/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/krbd/rbd-nomount/conf.yaml b/qa/suites/krbd/rbd-nomount/conf.yaml new file mode 100644 index 00000000000..30da870b25d --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/conf.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false diff --git a/qa/suites/krbd/rbd-nomount/fs/btrfs.yaml b/qa/suites/krbd/rbd-nomount/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/krbd/rbd-nomount/install/ceph.yaml b/qa/suites/krbd/rbd-nomount/install/ceph.yaml new file mode 100644 index 00000000000..2030acb9083 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/install/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml b/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml b/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml new file mode 100644 index 00000000000..86f8dde8a0e --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml new file mode 100644 index 00000000000..675b98e73a5 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml @@ -0,0 +1,10 @@ +tasks: +- workunit: + clients: + all: + - rbd/concurrent.sh +# Options for rbd/concurrent.sh (default values shown) +# env: +# RBD_CONCURRENT_ITER: 100 +# RBD_CONCURRENT_COUNT: 5 +# RBD_CONCURRENT_DELAY: 5 diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml new file mode 100644 index 00000000000..e5017e118d1 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml @@ -0,0 +1,15 @@ +tasks: +- workunit: + clients: + all: + - rbd/image_read.sh +# Options for rbd/image_read.sh (default values shown) +# env: +# IMAGE_READ_LOCAL_FILES: 'false' +# IMAGE_READ_FORMAT: '2' +# IMAGE_READ_VERBOSE: 'true' +# IMAGE_READ_PAGE_SIZE: '4096' +# IMAGE_READ_OBJECT_ORDER: '22' +# IMAGE_READ_TEST_CLONES: 'true' +# IMAGE_READ_DOUBLE_ORDER: 'true' +# IMAGE_READ_HALF_ORDER: 'false' diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml new file mode 100644 index 00000000000..aa155827c69 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/kernel.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml new file mode 100644 index 00000000000..c1529398b9e --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/map-snapshot-io.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml new file mode 100644 index 00000000000..c2160997c81 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/map-unmap.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml new file mode 100644 index 00000000000..c493cfaf420 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + all: + - rbd/simple_big.sh + diff --git a/qa/suites/krbd/rbd/% b/qa/suites/krbd/rbd/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/krbd/rbd/clusters/fixed-3.yaml b/qa/suites/krbd/rbd/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/krbd/rbd/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/krbd/rbd/conf.yaml b/qa/suites/krbd/rbd/conf.yaml new file mode 100644 index 00000000000..30da870b25d --- /dev/null +++ b/qa/suites/krbd/rbd/conf.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false diff --git a/qa/suites/krbd/rbd/fs/btrfs.yaml b/qa/suites/krbd/rbd/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/krbd/rbd/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/krbd/rbd/msgr-failures/few.yaml b/qa/suites/krbd/rbd/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/krbd/rbd/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/krbd/rbd/msgr-failures/many.yaml b/qa/suites/krbd/rbd/msgr-failures/many.yaml new file mode 100644 index 00000000000..86f8dde8a0e --- /dev/null +++ b/qa/suites/krbd/rbd/msgr-failures/many.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml new file mode 100644 index 00000000000..ef2a35dcc1d --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- rbd: + all: +- workunit: + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml new file mode 100644 index 00000000000..d779eea23ca --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- rbd: + all: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml new file mode 100644 index 00000000000..5204bb87ffe --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- rbd: + all: + image_size: 20480 +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..f9d62fefcac --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- rbd: + all: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_btrfs.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_btrfs.yaml new file mode 100644 index 00000000000..f3930a8986a --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_btrfs.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- rbd: + all: + fs_type: btrfs +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml new file mode 100644 index 00000000000..f765b74a6c7 --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- rbd: + all: + fs_type: ext4 +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml new file mode 100644 index 00000000000..98c0849c57e --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- rbd: + all: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml new file mode 100644 index 00000000000..eb8f18d60de --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- rbd: + all: + image_size: 20480 +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml new file mode 100644 index 00000000000..7c2796b2a88 --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- rbd: + all: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] diff --git a/qa/suites/krbd/singleton/% b/qa/suites/krbd/singleton/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/krbd/singleton/conf.yaml b/qa/suites/krbd/singleton/conf.yaml new file mode 100644 index 00000000000..30da870b25d --- /dev/null +++ b/qa/suites/krbd/singleton/conf.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false diff --git a/qa/suites/krbd/singleton/fs/btrfs.yaml b/qa/suites/krbd/singleton/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/krbd/singleton/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/krbd/singleton/msgr-failures/few.yaml b/qa/suites/krbd/singleton/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/krbd/singleton/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/krbd/singleton/msgr-failures/many.yaml b/qa/suites/krbd/singleton/msgr-failures/many.yaml new file mode 100644 index 00000000000..86f8dde8a0e --- /dev/null +++ b/qa/suites/krbd/singleton/msgr-failures/many.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 diff --git a/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml b/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml new file mode 100644 index 00000000000..2adb17c475c --- /dev/null +++ b/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml @@ -0,0 +1,22 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mds.a, osd.3, osd.4, osd.5] +- [client.0] +- [client.1] +- [client.2] +tasks: +- install: +- ceph: +- rbd.xfstests: + client.0: + tests: 1-9 11-15 17 19-21 26-29 31-34 41 46-54 56 61 63-67 69-70 74-76 78-79 84-89 91 + test_image: 'test_image-0' + scratch_image: 'scratch_image-0' + client.1: + tests: 92 100 103 105 108 110 116-121 124 126 129-132 + test_image: 'test_image-1' + scratch_image: 'scratch_image-1' + client.2: + tests: 133-135 137-141 164-167 184 187-190 192 194 196 199 201 203 214-216 220-227 234 236-238 241 243-249 253 257-259 261-262 269 273 275 277-278 + test_image: 'test_image-2' + scratch_image: 'scratch_image-2' diff --git a/qa/suites/krbd/thrash/% b/qa/suites/krbd/thrash/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/krbd/thrash/clusters/fixed-3.yaml b/qa/suites/krbd/thrash/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/krbd/thrash/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/krbd/thrash/conf.yaml b/qa/suites/krbd/thrash/conf.yaml new file mode 100644 index 00000000000..30da870b25d --- /dev/null +++ b/qa/suites/krbd/thrash/conf.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false diff --git a/qa/suites/krbd/thrash/fs/btrfs.yaml b/qa/suites/krbd/thrash/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/krbd/thrash/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/krbd/thrash/thrashers/default.yaml b/qa/suites/krbd/thrash/thrashers/default.yaml new file mode 100644 index 00000000000..14d772583cf --- /dev/null +++ b/qa/suites/krbd/thrash/thrashers/default.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +- thrashosds: diff --git a/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml b/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml new file mode 100644 index 00000000000..90612f21865 --- /dev/null +++ b/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 diff --git a/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml b/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml new file mode 100644 index 00000000000..4ae7d690905 --- /dev/null +++ b/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml @@ -0,0 +1,8 @@ +tasks: +- rbd: + all: + image_size: 20480 +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled b/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled new file mode 100644 index 00000000000..d61ede1bd66 --- /dev/null +++ b/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled @@ -0,0 +1,8 @@ +tasks: +- rbd: + all: + image_size: 20480 +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/marginal/basic/% b/qa/suites/marginal/basic/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/marginal/basic/clusters/fixed-3.yaml b/qa/suites/marginal/basic/clusters/fixed-3.yaml new file mode 100644 index 00000000000..0038432afa7 --- /dev/null +++ b/qa/suites/marginal/basic/clusters/fixed-3.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mds.a, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/marginal/basic/fs/btrfs.yaml b/qa/suites/marginal/basic/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/marginal/basic/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml new file mode 100644 index 00000000000..4f25d806313 --- /dev/null +++ b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml new file mode 100644 index 00000000000..a0d2e765bdb --- /dev/null +++ b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/marginal/fs-misc/% b/qa/suites/marginal/fs-misc/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/marginal/fs-misc/clusters/two_clients.yaml b/qa/suites/marginal/fs-misc/clusters/two_clients.yaml new file mode 100644 index 00000000000..2258befd8bf --- /dev/null +++ b/qa/suites/marginal/fs-misc/clusters/two_clients.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2] +- [client.1] +- [client.0] diff --git a/qa/suites/marginal/fs-misc/fs/btrfs.yaml b/qa/suites/marginal/fs-misc/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/marginal/fs-misc/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/marginal/fs-misc/tasks/locktest.yaml b/qa/suites/marginal/fs-misc/tasks/locktest.yaml new file mode 100644 index 00000000000..444bb1f19b3 --- /dev/null +++ b/qa/suites/marginal/fs-misc/tasks/locktest.yaml @@ -0,0 +1,5 @@ +tasks: +- install: +- ceph: +- kclient: +- locktest: [client.0, client.1] diff --git a/qa/suites/marginal/mds_restart/% b/qa/suites/marginal/mds_restart/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/marginal/mds_restart/clusters/one_mds.yaml b/qa/suites/marginal/mds_restart/clusters/one_mds.yaml new file mode 100644 index 00000000000..9e11c02a36c --- /dev/null +++ b/qa/suites/marginal/mds_restart/clusters/one_mds.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2] +- [mds.a] +- [client.0] diff --git a/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml b/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml new file mode 100644 index 00000000000..d086d4cf8d3 --- /dev/null +++ b/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml @@ -0,0 +1,11 @@ +tasks: +- install: +- ceph: + conf: + mds: + mds log segment size: 16384 + mds log max segments: 1 +- restart: + exec: + client.0: + - test-backtraces.py diff --git a/qa/suites/marginal/multimds/% b/qa/suites/marginal/multimds/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml b/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml new file mode 100644 index 00000000000..088d9f0d31d --- /dev/null +++ b/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml @@ -0,0 +1,5 @@ +roles: +- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2] +- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5] +- [client.0] +- [client.1] diff --git a/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml b/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml new file mode 100644 index 00000000000..be824f0f554 --- /dev/null +++ b/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml @@ -0,0 +1,5 @@ +roles: +- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2] +- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5] +- [client.0] +- [client.1] diff --git a/qa/suites/marginal/multimds/fs/btrfs.yaml b/qa/suites/marginal/multimds/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/marginal/multimds/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml b/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml new file mode 100644 index 00000000000..37ac5b69e61 --- /dev/null +++ b/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml @@ -0,0 +1,4 @@ +tasks: +- install: +- ceph: +- ceph-fuse: diff --git a/qa/suites/marginal/multimds/mounts/kclient.yaml b/qa/suites/marginal/multimds/mounts/kclient.yaml new file mode 100644 index 00000000000..c18db8f5ea6 --- /dev/null +++ b/qa/suites/marginal/multimds/mounts/kclient.yaml @@ -0,0 +1,4 @@ +tasks: +- install: +- ceph: +- kclient: diff --git a/qa/suites/marginal/multimds/tasks/workunit_misc.yaml b/qa/suites/marginal/multimds/tasks/workunit_misc.yaml new file mode 100644 index 00000000000..aa62b9e8c3a --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_misc.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - fs/misc diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml new file mode 100644 index 00000000000..4c1fcc11ed9 --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml new file mode 100644 index 00000000000..41b2bc8edaa --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..ddb18fb791a --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml new file mode 100644 index 00000000000..7efa1adb82d --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml new file mode 100644 index 00000000000..e8882134c72 --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml new file mode 100644 index 00000000000..3aa5f8825ac --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml @@ -0,0 +1,15 @@ +tasks: +- install: +- ceph: + conf: + client: + ms_inject_delay_probability: 1 + ms_inject_delay_type: osd + ms_inject_delay_max: 5 + client_oc_max_dirty_age: 1 +- ceph-fuse: +- exec: + client.0: + - dd if=/dev/zero of=./foo count=100 + - sleep 2 + - truncate --size 0 ./foo diff --git a/qa/suites/marginal/multimds/thrash/exports.yaml b/qa/suites/marginal/multimds/thrash/exports.yaml new file mode 100644 index 00000000000..240b46dfd8a --- /dev/null +++ b/qa/suites/marginal/multimds/thrash/exports.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + mds: + mds thrash exports: 1 diff --git a/qa/suites/marginal/multimds/thrash/normal.yaml b/qa/suites/marginal/multimds/thrash/normal.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml b/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml new file mode 100644 index 00000000000..e1d3c7b7932 --- /dev/null +++ b/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mds.a, osd.0, osd.1] +- [mon.b, mon.c, osd.2, osd.3, client.0] +- [client.1] diff --git a/qa/suites/mixed-clients/basic/fs/btrfs.yaml b/qa/suites/mixed-clients/basic/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/mixed-clients/basic/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml new file mode 100644 index 00000000000..bb347be7fd7 --- /dev/null +++ b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml @@ -0,0 +1,26 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: + branch: dumpling +- ceph: +- parallel: + - user-workload + - kclient-workload +user-workload: + sequential: + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/iozone.sh +kclient-workload: + sequential: + - kclient: [client.1] + - workunit: + clients: + client.1: + - suites/dbench.sh diff --git a/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml new file mode 100644 index 00000000000..2c32a61e864 --- /dev/null +++ b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml @@ -0,0 +1,26 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: + branch: dumpling +- ceph: +- parallel: + - user-workload + - kclient-workload +user-workload: + sequential: + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/blogbench.sh +kclient-workload: + sequential: + - kclient: [client.1] + - workunit: + clients: + client.1: + - kernel_untar_build.sh diff --git a/qa/suites/mount/fuse.yaml b/qa/suites/mount/fuse.yaml new file mode 100644 index 00000000000..d00ffdb4804 --- /dev/null +++ b/qa/suites/mount/fuse.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: [client.0] +- samba: + samba.0: + ceph: "{testdir}/mnt.0" + diff --git a/qa/suites/mount/kclient.yaml b/qa/suites/mount/kclient.yaml new file mode 100644 index 00000000000..56590adcb4f --- /dev/null +++ b/qa/suites/mount/kclient.yaml @@ -0,0 +1,6 @@ +tasks: +- kclient: [client.0] +- samba: + samba.0: + ceph: "{testdir}/mnt.0" + diff --git a/qa/suites/mount/native.yaml b/qa/suites/mount/native.yaml new file mode 100644 index 00000000000..09b8c1c4e3d --- /dev/null +++ b/qa/suites/mount/native.yaml @@ -0,0 +1,2 @@ +tasks: +- samba: diff --git a/qa/suites/mount/noceph.yaml b/qa/suites/mount/noceph.yaml new file mode 100644 index 00000000000..3cad4740d8b --- /dev/null +++ b/qa/suites/mount/noceph.yaml @@ -0,0 +1,5 @@ +tasks: +- localdir: [client.0] +- samba: + samba.0: + ceph: "{testdir}/mnt.0" diff --git a/qa/suites/multimds/basic/% b/qa/suites/multimds/basic/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/multimds/basic/clusters/3-mds.yaml b/qa/suites/multimds/basic/clusters/3-mds.yaml new file mode 100644 index 00000000000..c655b90c81c --- /dev/null +++ b/qa/suites/multimds/basic/clusters/3-mds.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2] +- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/multimds/basic/clusters/9-mds.yaml b/qa/suites/multimds/basic/clusters/9-mds.yaml new file mode 100644 index 00000000000..ed554c9fe3c --- /dev/null +++ b/qa/suites/multimds/basic/clusters/9-mds.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2] +- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/multimds/basic/debug/mds_client.yaml b/qa/suites/multimds/basic/debug/mds_client.yaml new file mode 120000 index 00000000000..335c1cafed7 --- /dev/null +++ b/qa/suites/multimds/basic/debug/mds_client.yaml @@ -0,0 +1 @@ +../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/fs/btrfs.yaml b/qa/suites/multimds/basic/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/multimds/basic/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/inline/no.yaml b/qa/suites/multimds/basic/inline/no.yaml new file mode 100644 index 00000000000..2030acb9083 --- /dev/null +++ b/qa/suites/multimds/basic/inline/no.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/multimds/basic/inline/yes.yaml b/qa/suites/multimds/basic/inline/yes.yaml new file mode 100644 index 00000000000..72a285c590f --- /dev/null +++ b/qa/suites/multimds/basic/inline/yes.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: +- exec: + client.0: + - ceph mds set inline_data true --yes-i-really-mean-it diff --git a/qa/suites/multimds/basic/mount/cfuse.yaml b/qa/suites/multimds/basic/mount/cfuse.yaml new file mode 100644 index 00000000000..e3c34a1f604 --- /dev/null +++ b/qa/suites/multimds/basic/mount/cfuse.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph-fuse: diff --git a/qa/suites/multimds/basic/mount/kclient.yaml b/qa/suites/multimds/basic/mount/kclient.yaml new file mode 100644 index 00000000000..f00f16aea22 --- /dev/null +++ b/qa/suites/multimds/basic/mount/kclient.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- kclient: diff --git a/qa/suites/multimds/basic/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/multimds/basic/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000000..08f746bf894 --- /dev/null +++ b/qa/suites/multimds/basic/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/tasks/kernel_untar_build.yaml b/qa/suites/multimds/basic/tasks/kernel_untar_build.yaml new file mode 100644 index 00000000000..8dbc24a9feb --- /dev/null +++ b/qa/suites/multimds/basic/tasks/kernel_untar_build.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + conf: + client: + fuse_default_permissions: 0 +tasks: +- workunit: + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/multimds/basic/tasks/misc.yaml b/qa/suites/multimds/basic/tasks/misc.yaml new file mode 100644 index 00000000000..6c8327bb0d7 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/misc.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + timeout: 5h + clients: + all: + - fs/misc diff --git a/qa/suites/multimds/basic/tasks/misc_test_o_trunc.yaml b/qa/suites/multimds/basic/tasks/misc_test_o_trunc.yaml new file mode 100644 index 00000000000..c9de5c38637 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/misc_test_o_trunc.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - fs/test_o_trunc.sh diff --git a/qa/suites/multimds/basic/tasks/suites_blogbench.yaml b/qa/suites/multimds/basic/tasks/suites_blogbench.yaml new file mode 100644 index 00000000000..4c1fcc11ed9 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/suites_blogbench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/multimds/basic/tasks/suites_dbench.yaml b/qa/suites/multimds/basic/tasks/suites_dbench.yaml new file mode 100644 index 00000000000..41b2bc8edaa --- /dev/null +++ b/qa/suites/multimds/basic/tasks/suites_dbench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/multimds/basic/tasks/suites_ffsb.yaml b/qa/suites/multimds/basic/tasks/suites_ffsb.yaml new file mode 100644 index 00000000000..4a2a627fe5d --- /dev/null +++ b/qa/suites/multimds/basic/tasks/suites_ffsb.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + conf: + osd: + filestore flush min: 0 +tasks: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/multimds/basic/tasks/suites_fsstress.yaml b/qa/suites/multimds/basic/tasks/suites_fsstress.yaml new file mode 100644 index 00000000000..ddb18fb791a --- /dev/null +++ b/qa/suites/multimds/basic/tasks/suites_fsstress.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/multimds/basic/tasks/suites_fsx.yaml b/qa/suites/multimds/basic/tasks/suites_fsx.yaml new file mode 100644 index 00000000000..8b2b1ab5c14 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/suites_fsx.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/multimds/basic/tasks/suites_fsync.yaml b/qa/suites/multimds/basic/tasks/suites_fsync.yaml new file mode 100644 index 00000000000..7efa1adb82d --- /dev/null +++ b/qa/suites/multimds/basic/tasks/suites_fsync.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/multimds/basic/tasks/suites_iogen.yaml b/qa/suites/multimds/basic/tasks/suites_iogen.yaml new file mode 100644 index 00000000000..d45d4ea3c3f --- /dev/null +++ b/qa/suites/multimds/basic/tasks/suites_iogen.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/iogen.sh diff --git a/qa/suites/multimds/basic/tasks/suites_iozone.yaml b/qa/suites/multimds/basic/tasks/suites_iozone.yaml new file mode 100644 index 00000000000..9270f3c51e2 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/suites_iozone.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/multimds/basic/tasks/suites_pjd.yaml b/qa/suites/multimds/basic/tasks/suites_pjd.yaml new file mode 100644 index 00000000000..de21f7c3464 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/suites_pjd.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + client: + debug ms: 1 + debug client: 20 + mds: + debug ms: 1 + debug mds: 20 +tasks: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/multimds/basic/tasks/suites_truncate_delay.yaml b/qa/suites/multimds/basic/tasks/suites_truncate_delay.yaml new file mode 100644 index 00000000000..ac5c9b13901 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/suites_truncate_delay.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + client: + ms_inject_delay_probability: 1 + ms_inject_delay_type: osd + ms_inject_delay_max: 5 + client_oc_max_dirty_age: 1 +tasks: +- exec: + client.0: + - dd if=/dev/zero of=./foo count=100 + - sleep 2 + - truncate --size 0 ./foo diff --git a/qa/suites/multimds/basic/tasks/trivial_sync.yaml b/qa/suites/multimds/basic/tasks/trivial_sync.yaml new file mode 100644 index 00000000000..36e7411b638 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/trivial_sync.yaml @@ -0,0 +1,4 @@ +tasks: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] diff --git a/qa/suites/multimds/libcephfs/% b/qa/suites/multimds/libcephfs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/multimds/libcephfs/clusters/3-mds.yaml b/qa/suites/multimds/libcephfs/clusters/3-mds.yaml new file mode 100644 index 00000000000..c655b90c81c --- /dev/null +++ b/qa/suites/multimds/libcephfs/clusters/3-mds.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2] +- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/multimds/libcephfs/clusters/9-mds.yaml b/qa/suites/multimds/libcephfs/clusters/9-mds.yaml new file mode 100644 index 00000000000..ed554c9fe3c --- /dev/null +++ b/qa/suites/multimds/libcephfs/clusters/9-mds.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2] +- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/multimds/libcephfs/debug/mds_client.yaml b/qa/suites/multimds/libcephfs/debug/mds_client.yaml new file mode 120000 index 00000000000..335c1cafed7 --- /dev/null +++ b/qa/suites/multimds/libcephfs/debug/mds_client.yaml @@ -0,0 +1 @@ +../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/qa/suites/multimds/libcephfs/fs/btrfs.yaml b/qa/suites/multimds/libcephfs/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/multimds/libcephfs/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/multimds/libcephfs/inline/no.yaml b/qa/suites/multimds/libcephfs/inline/no.yaml new file mode 100644 index 00000000000..2030acb9083 --- /dev/null +++ b/qa/suites/multimds/libcephfs/inline/no.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/multimds/libcephfs/inline/yes.yaml b/qa/suites/multimds/libcephfs/inline/yes.yaml new file mode 100644 index 00000000000..72a285c590f --- /dev/null +++ b/qa/suites/multimds/libcephfs/inline/yes.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: +- exec: + client.0: + - ceph mds set inline_data true --yes-i-really-mean-it diff --git a/qa/suites/multimds/libcephfs/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/multimds/libcephfs/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000000..08f746bf894 --- /dev/null +++ b/qa/suites/multimds/libcephfs/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/multimds/libcephfs/tasks/libcephfs_interface_tests.yaml b/qa/suites/multimds/libcephfs/tasks/libcephfs_interface_tests.yaml new file mode 100644 index 00000000000..0b1d41fea5c --- /dev/null +++ b/qa/suites/multimds/libcephfs/tasks/libcephfs_interface_tests.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + client.0: + - libcephfs/test.sh diff --git a/qa/suites/multimds/libcephfs/tasks/libcephfs_java.yaml b/qa/suites/multimds/libcephfs/tasks/libcephfs_java.yaml new file mode 100644 index 00000000000..4330d50965e --- /dev/null +++ b/qa/suites/multimds/libcephfs/tasks/libcephfs_java.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + client.0: + - libcephfs-java/test.sh diff --git a/qa/suites/multimds/libcephfs/tasks/mds_creation_retry.yaml b/qa/suites/multimds/libcephfs/tasks/mds_creation_retry.yaml new file mode 100644 index 00000000000..cd87f28ad08 --- /dev/null +++ b/qa/suites/multimds/libcephfs/tasks/mds_creation_retry.yaml @@ -0,0 +1,6 @@ +tasks: +-mds_creation_failure: +-ceph-fuse: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] diff --git a/qa/suites/multimds/verify/% b/qa/suites/multimds/verify/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/multimds/verify/clusters/3-mds.yaml b/qa/suites/multimds/verify/clusters/3-mds.yaml new file mode 100644 index 00000000000..c655b90c81c --- /dev/null +++ b/qa/suites/multimds/verify/clusters/3-mds.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2] +- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/multimds/verify/clusters/9-mds.yaml b/qa/suites/multimds/verify/clusters/9-mds.yaml new file mode 100644 index 00000000000..ed554c9fe3c --- /dev/null +++ b/qa/suites/multimds/verify/clusters/9-mds.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2] +- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/multimds/verify/debug/mds_client.yaml b/qa/suites/multimds/verify/debug/mds_client.yaml new file mode 120000 index 00000000000..335c1cafed7 --- /dev/null +++ b/qa/suites/multimds/verify/debug/mds_client.yaml @@ -0,0 +1 @@ +../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/qa/suites/multimds/verify/fs/btrfs.yaml b/qa/suites/multimds/verify/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/multimds/verify/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/multimds/verify/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/multimds/verify/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000000..08f746bf894 --- /dev/null +++ b/qa/suites/multimds/verify/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_dbench.yaml new file mode 100644 index 00000000000..73319776f03 --- /dev/null +++ b/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_dbench.yaml @@ -0,0 +1,12 @@ +tasks: +- install: +- ceph: + conf: + client: + debug client: 1/20 + debug ms: 0/10 +- ceph-fuse: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..b58487c0785 --- /dev/null +++ b/qa/suites/multimds/verify/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/multimds/verify/tasks/libcephfs_interface_tests.yaml b/qa/suites/multimds/verify/tasks/libcephfs_interface_tests.yaml new file mode 100644 index 00000000000..22d1f142161 --- /dev/null +++ b/qa/suites/multimds/verify/tasks/libcephfs_interface_tests.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - libcephfs/test.sh diff --git a/qa/suites/multimds/verify/validater/lockdep.yaml b/qa/suites/multimds/verify/validater/lockdep.yaml new file mode 100644 index 00000000000..25f84355c0b --- /dev/null +++ b/qa/suites/multimds/verify/validater/lockdep.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + lockdep: true diff --git a/qa/suites/multimds/verify/validater/valgrind.yaml b/qa/suites/multimds/verify/validater/valgrind.yaml new file mode 100644 index 00000000000..c3d3aed4892 --- /dev/null +++ b/qa/suites/multimds/verify/validater/valgrind.yaml @@ -0,0 +1,12 @@ +overrides: + install: + ceph: + flavor: notcmalloc + ceph: + valgrind: + mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + osd: [--tool=memcheck] + mds: [--tool=memcheck] + ceph-fuse: + client.0: + valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] diff --git a/qa/suites/powercycle/osd/% b/qa/suites/powercycle/osd/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml b/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml new file mode 100644 index 00000000000..d5503a40c86 --- /dev/null +++ b/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml @@ -0,0 +1,5 @@ +roles: +- [mon.0, mon.1, mon.2, mds.0, client.0] +- [osd.0] +- [osd.1] +- [osd.2] diff --git a/qa/suites/powercycle/osd/fs/btrfs.yaml b/qa/suites/powercycle/osd/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/powercycle/osd/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/powercycle/osd/fs/ext4.yaml b/qa/suites/powercycle/osd/fs/ext4.yaml new file mode 120000 index 00000000000..65d71886933 --- /dev/null +++ b/qa/suites/powercycle/osd/fs/ext4.yaml @@ -0,0 +1 @@ +../../../../fs/ext4.yaml \ No newline at end of file diff --git a/qa/suites/powercycle/osd/fs/xfs.yaml b/qa/suites/powercycle/osd/fs/xfs.yaml new file mode 120000 index 00000000000..4c28d731f6b --- /dev/null +++ b/qa/suites/powercycle/osd/fs/xfs.yaml @@ -0,0 +1 @@ +../../../../fs/xfs.yaml \ No newline at end of file diff --git a/qa/suites/powercycle/osd/powercycle/default.yaml b/qa/suites/powercycle/osd/powercycle/default.yaml new file mode 100644 index 00000000000..b632e83e621 --- /dev/null +++ b/qa/suites/powercycle/osd/powercycle/default.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: +- thrashosds: + chance_down: 1.0 + powercycle: true + timeout: 600 diff --git a/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml b/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml new file mode 100644 index 00000000000..b1ddad8d3b0 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + client.0: + admin socket: /var/run/ceph/ceph-$name.asok +tasks: +- radosbench: + clients: [client.0] + time: 60 +- admin_socket: + client.0: + objecter_requests: + test: "http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}" diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml new file mode 100644 index 00000000000..3e99204debb --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml new file mode 100644 index 00000000000..be3f1331990 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - fs/misc diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml new file mode 100644 index 00000000000..9f3fa7b1887 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + osd: + filestore flush min: 0 + mds: + debug ms: 1 + debug mds: 20 +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..5908d951b2d --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml new file mode 100644 index 00000000000..3c11ed74fc7 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml new file mode 100644 index 00000000000..c6043e209bd --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000000..930bf4a671d --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml new file mode 100644 index 00000000000..f3efafa2e9d --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + conf: + client: + ms_inject_delay_probability: 1 + ms_inject_delay_type: osd + ms_inject_delay_max: 5 + client_oc_max_dirty_age: 1 +tasks: +- ceph-fuse: +- exec: + client.0: + - dd if=/dev/zero of=./foo count=100 + - sleep 2 + - truncate --size 0 ./foo diff --git a/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml b/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml new file mode 100644 index 00000000000..b4708ebd7c0 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/powercycle/osd/tasks/radosbench.yaml b/qa/suites/powercycle/osd/tasks/radosbench.yaml new file mode 100644 index 00000000000..3940870fce0 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/radosbench.yaml @@ -0,0 +1,4 @@ +tasks: +- radosbench: + clients: [client.0] + time: 1800 diff --git a/qa/suites/powercycle/osd/tasks/readwrite.yaml b/qa/suites/powercycle/osd/tasks/readwrite.yaml new file mode 100644 index 00000000000..c53e52b0872 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/readwrite.yaml @@ -0,0 +1,9 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 45 + write: 45 + delete: 10 diff --git a/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml b/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml new file mode 100644 index 00000000000..aa82d973ae1 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml b/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml new file mode 100644 index 00000000000..1ffe4e14888 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/rados/basic/% b/qa/suites/rados/basic/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rados/basic/clusters/fixed-2.yaml b/qa/suites/rados/basic/clusters/fixed-2.yaml new file mode 120000 index 00000000000..cd0791a1486 --- /dev/null +++ b/qa/suites/rados/basic/clusters/fixed-2.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rados/basic/fs/btrfs.yaml b/qa/suites/rados/basic/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/rados/basic/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/rados/basic/msgr-failures/few.yaml b/qa/suites/rados/basic/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/rados/basic/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/rados/basic/msgr-failures/many.yaml b/qa/suites/rados/basic/msgr-failures/many.yaml new file mode 100644 index 00000000000..038c3a79908 --- /dev/null +++ b/qa/suites/rados/basic/msgr-failures/many.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 1500 diff --git a/qa/suites/rados/basic/tasks/rados_api_tests.yaml b/qa/suites/rados/basic/tasks/rados_api_tests.yaml new file mode 100644 index 00000000000..acfc597dec3 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_api_tests.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + log-whitelist: + - reached quota + - wrongly marked me down +tasks: +- install: +- ceph: +- workunit: + clients: + client.0: + - rados/test.sh + - rados/test_pool_quota.sh + diff --git a/qa/suites/rados/basic/tasks/rados_cls_all.yaml b/qa/suites/rados/basic/tasks/rados_cls_all.yaml new file mode 100644 index 00000000000..34f7cbbb4a0 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_cls_all.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: +- workunit: + clients: + client.0: + - cls diff --git a/qa/suites/rados/basic/tasks/rados_python.yaml b/qa/suites/rados/basic/tasks/rados_python.yaml new file mode 100644 index 00000000000..4faf10e39e0 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_python.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: +- workunit: + clients: + client.0: + - rados/test_python.sh diff --git a/qa/suites/rados/basic/tasks/rados_stress_watch.yaml b/qa/suites/rados/basic/tasks/rados_stress_watch.yaml new file mode 100644 index 00000000000..ae2e5fd0083 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_stress_watch.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: +- workunit: + clients: + client.0: + - rados/stress_watch.sh diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml new file mode 100644 index 00000000000..9432367e356 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/load-gen-big.sh diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml new file mode 100644 index 00000000000..7d882cac9c9 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/load-gen-mix.sh diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml new file mode 100644 index 00000000000..69c06b7b049 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/load-gen-mostlyread.sh diff --git a/qa/suites/rados/monthrash/% b/qa/suites/rados/monthrash/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rados/monthrash/ceph/ceph.yaml b/qa/suites/rados/monthrash/ceph/ceph.yaml new file mode 100644 index 00000000000..a2c0efc7779 --- /dev/null +++ b/qa/suites/rados/monthrash/ceph/ceph.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + conf: + mon: + mon min osdmap epochs: 25 + paxos service trim min: 5 +tasks: +- install: +- ceph: diff --git a/qa/suites/rados/monthrash/clusters/3-mons.yaml b/qa/suites/rados/monthrash/clusters/3-mons.yaml new file mode 100644 index 00000000000..6298ff23c55 --- /dev/null +++ b/qa/suites/rados/monthrash/clusters/3-mons.yaml @@ -0,0 +1,3 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mds.a, osd.3, osd.4, osd.5, client.0] diff --git a/qa/suites/rados/monthrash/clusters/9-mons.yaml b/qa/suites/rados/monthrash/clusters/9-mons.yaml new file mode 100644 index 00000000000..a22e6c5a0fc --- /dev/null +++ b/qa/suites/rados/monthrash/clusters/9-mons.yaml @@ -0,0 +1,3 @@ +roles: +- [mon.a, mon.b, mon.c, mon.d, mon.e, osd.0, osd.1, osd.2] +- [mon.f, mon.g, mon.h, mon.i, mds.a, osd.3, osd.4, osd.5, client.0] diff --git a/qa/suites/rados/monthrash/fs/xfs.yaml b/qa/suites/rados/monthrash/fs/xfs.yaml new file mode 120000 index 00000000000..4c28d731f6b --- /dev/null +++ b/qa/suites/rados/monthrash/fs/xfs.yaml @@ -0,0 +1 @@ +../../../../fs/xfs.yaml \ No newline at end of file diff --git a/qa/suites/rados/monthrash/msgr-failures/few.yaml b/qa/suites/rados/monthrash/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/rados/monthrash/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml b/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml new file mode 100644 index 00000000000..03b7e37f842 --- /dev/null +++ b/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 2500 + ms inject delay type: mon + ms inject delay probability: .005 + ms inject delay max: 1 + ms inject internal delays: .002 diff --git a/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml b/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml new file mode 100644 index 00000000000..2867f2db5ec --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml @@ -0,0 +1,6 @@ +tasks: +- mon_thrash: + revive_delay: 90 + thrash_delay: 1 + thrash_store: true + thrash_many: true diff --git a/qa/suites/rados/monthrash/thrashers/many.yaml b/qa/suites/rados/monthrash/thrashers/many.yaml new file mode 100644 index 00000000000..fe52bb2bbeb --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/many.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + osd: + mon client ping interval: 4 + mon client ping timeout: 12 +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 + thrash_many: true + freeze_mon_duration: 20 + freeze_mon_probability: 10 diff --git a/qa/suites/rados/monthrash/thrashers/one.yaml b/qa/suites/rados/monthrash/thrashers/one.yaml new file mode 100644 index 00000000000..2ce44c8601f --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/one.yaml @@ -0,0 +1,4 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 diff --git a/qa/suites/rados/monthrash/thrashers/sync-many.yaml b/qa/suites/rados/monthrash/thrashers/sync-many.yaml new file mode 100644 index 00000000000..9868f18159f --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/sync-many.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + conf: + mon: + paxos min: 10 + paxos trim min: 10 +tasks: +- mon_thrash: + revive_delay: 90 + thrash_delay: 1 + thrash_many: true diff --git a/qa/suites/rados/monthrash/thrashers/sync.yaml b/qa/suites/rados/monthrash/thrashers/sync.yaml new file mode 100644 index 00000000000..1e7054c271d --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/sync.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + conf: + mon: + paxos min: 10 + paxos trim min: 10 +tasks: +- mon_thrash: + revive_delay: 90 + thrash_delay: 1 diff --git a/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml b/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml new file mode 100644 index 00000000000..c0f0f2e35b4 --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml @@ -0,0 +1,56 @@ +overrides: + ceph: + log-whitelist: + - slow request +tasks: +- exec: + client.0: + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel diff --git a/qa/suites/rados/monthrash/workloads/rados_5925.yaml b/qa/suites/rados/monthrash/workloads/rados_5925.yaml new file mode 100644 index 00000000000..b49937f76df --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/rados_5925.yaml @@ -0,0 +1,4 @@ +tasks: +- exec: + client.0: + - ceph_test_rados_delete_pools_parallel --debug_objecter 20 --debug_ms 1 --debug_rados 20 --debug_monc 20 diff --git a/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml b/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml new file mode 100644 index 00000000000..cd11ae6ca0c --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml b/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml new file mode 100644 index 00000000000..31465cffe71 --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down +tasks: +- workunit: + clients: + client.0: + - mon/pool_ops.sh + - mon/crush_ops.sh + - mon/osd.sh + - mon/caps.sh + diff --git a/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml b/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml new file mode 100644 index 00000000000..aa82d973ae1 --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/rados/multimon/% b/qa/suites/rados/multimon/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rados/multimon/clusters/21.yaml b/qa/suites/rados/multimon/clusters/21.yaml new file mode 100644 index 00000000000..2d134788a6f --- /dev/null +++ b/qa/suites/rados/multimon/clusters/21.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.d, mon.g, mon.j, mon.m, mon.p, mon.s, osd.0] +- [mon.b, mon.e, mon.h, mon.k, mon.n, mon.q, mon.t, mds.a] +- [mon.c, mon.f, mon.i, mon.l, mon.o, mon.r, mon.u, osd.1] diff --git a/qa/suites/rados/multimon/clusters/3.yaml b/qa/suites/rados/multimon/clusters/3.yaml new file mode 100644 index 00000000000..703cc664f87 --- /dev/null +++ b/qa/suites/rados/multimon/clusters/3.yaml @@ -0,0 +1,2 @@ +roles: +- [mon.a, mon.b, mon.c, osd.0, osd.1, mds.a] diff --git a/qa/suites/rados/multimon/clusters/6.yaml b/qa/suites/rados/multimon/clusters/6.yaml new file mode 100644 index 00000000000..62780660361 --- /dev/null +++ b/qa/suites/rados/multimon/clusters/6.yaml @@ -0,0 +1,3 @@ +roles: +- [mon.a, mon.c, mon.e, osd.0] +- [mon.b, mon.d, mon.f, osd.1, mds.a] diff --git a/qa/suites/rados/multimon/clusters/9.yaml b/qa/suites/rados/multimon/clusters/9.yaml new file mode 100644 index 00000000000..b87a158dbb0 --- /dev/null +++ b/qa/suites/rados/multimon/clusters/9.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.d, mon.g, osd.0] +- [mon.b, mon.e, mon.h, mds.a] +- [mon.c, mon.f, mon.i, osd.1] diff --git a/qa/suites/rados/multimon/msgr-failures/few.yaml b/qa/suites/rados/multimon/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/rados/multimon/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/rados/multimon/msgr-failures/many.yaml b/qa/suites/rados/multimon/msgr-failures/many.yaml new file mode 100644 index 00000000000..86f8dde8a0e --- /dev/null +++ b/qa/suites/rados/multimon/msgr-failures/many.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 diff --git a/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml b/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml new file mode 100644 index 00000000000..e86bdde1d7d --- /dev/null +++ b/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: + log-whitelist: + - slow request + - .*clock.*skew.* + - clocks not synchronized +- mon_clock_skew_check: + expect-skew: false diff --git a/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml b/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml new file mode 100644 index 00000000000..2953e0d6dc2 --- /dev/null +++ b/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + conf: + mon.b: + clock offset: 10 +tasks: +- install: +- ceph: + wait-for-healthy: false + log-whitelist: + - slow request + - .*clock.*skew.* + - clocks not synchronized +- mon_clock_skew_check: + expect-skew: true diff --git a/qa/suites/rados/multimon/tasks/mon_recovery.yaml b/qa/suites/rados/multimon/tasks/mon_recovery.yaml new file mode 100644 index 00000000000..94721ea53a4 --- /dev/null +++ b/qa/suites/rados/multimon/tasks/mon_recovery.yaml @@ -0,0 +1,4 @@ +tasks: +- install: +- ceph: +- mon_recovery: diff --git a/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml b/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml new file mode 100644 index 00000000000..698e6e2679e --- /dev/null +++ b/qa/suites/rados/objectstore/ceph_objectstore_tool.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.0, osd.0, osd.1, osd.2] +- [osd.3, osd.4, osd.5] +- [client.0] + +tasks: +- install: +- ceph: +- ceph_objectstore_tool: + objects: 20 diff --git a/qa/suites/rados/singleton-nomsgr/% b/qa/suites/rados/singleton-nomsgr/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rados/singleton-nomsgr/all/11429.yaml b/qa/suites/rados/singleton-nomsgr/all/11429.yaml new file mode 100644 index 00000000000..06fdc3b557b --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/11429.yaml @@ -0,0 +1,105 @@ +overrides: + ceph: + conf: + mon: + debug mon: 20 + debug ms: 1 + debug paxos: 20 + mon warn on legacy crush tunables: false + mon min osdmap epochs: 3 + osd: + osd map cache size: 2 + osd map max advance: 1 + debug filestore: 20 + debug journal: 20 + debug ms: 1 + debug osd: 20 + log-whitelist: + - osd_map_cache_size + - slow request + - scrub mismatch + - ScrubResult +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - mon.b + - mon.c + - osd.2 + - client.0 +tasks: +- install: + branch: v0.80.8 +- print: '**** done installing firefly' +- ceph: + fs: xfs +- print: '**** done ceph' +- full_sequential: + - ceph_manager.create_pool: + args: ['toremove'] + kwargs: + pg_num: 4096 + - sleep: + duration: 30 + - ceph_manager.wait_for_clean: null + - radosbench: + clients: [client.0] + time: 120 + size: 1 + pool: toremove + create_pool: false + - ceph_manager.remove_pool: + args: ['toremove'] + - sleep: + duration: 10 + - ceph.restart: + daemons: + - osd.0 + - osd.1 + - osd.2 + - sleep: + duration: 30 + - ceph_manager.wait_for_clean: null + - radosbench: + clients: [client.0] + time: 60 + size: 1 + - ceph_manager.create_pool: + args: ['newpool'] + - loop: + count: 100 + body: + - ceph_manager.set_pool_property: + args: ['newpool', 'min_size', 2] + - ceph_manager.set_pool_property: + args: ['newpool', 'min_size', 1] + - sleep: + duration: 30 + - ceph_manager.wait_for_clean: null + - loop: + count: 100 + body: + - ceph_manager.set_pool_property: + args: ['newpool', 'min_size', 2] + - ceph_manager.set_pool_property: + args: ['newpool', 'min_size', 1] + - sleep: + duration: 30 + - ceph_manager.wait_for_clean: null + - sleep: + duration: 30 + - install.upgrade: + mon.a: null + - ceph.restart: + daemons: + - osd.0 + - osd.1 + - osd.2 + - sleep: + duration: 30 + - radosbench: + clients: [client.0] + time: 30 + size: 1 + - ceph_manager.wait_for_clean: null diff --git a/qa/suites/rados/singleton-nomsgr/all/alloc-hint.yaml b/qa/suites/rados/singleton-nomsgr/all/alloc-hint.yaml new file mode 100644 index 00000000000..dca38c67bb5 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/alloc-hint.yaml @@ -0,0 +1,17 @@ +roles: +- [mon.a, mds.a, osd.0, osd.1, osd.2, client.0] + +overrides: + ceph: + fs: xfs + conf: + osd: + filestore xfs extsize: true + +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/test_alloc_hint.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/filejournal.yaml b/qa/suites/rados/singleton-nomsgr/all/filejournal.yaml new file mode 100644 index 00000000000..28a0c041d9e --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/filejournal.yaml @@ -0,0 +1,8 @@ +roles: +- [mon.0, osd.0, osd.1, mds.a, client.0] +tasks: +- install: +- ceph: +- exec: + client.0: + - ceph_test_filejournal diff --git a/qa/suites/rados/singleton-nomsgr/all/filestore-idempotent-aio-journal.yaml b/qa/suites/rados/singleton-nomsgr/all/filestore-idempotent-aio-journal.yaml new file mode 100644 index 00000000000..15437cf65c7 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/filestore-idempotent-aio-journal.yaml @@ -0,0 +1,9 @@ +roles: +- [mon.0, osd.0, osd.1, mds.a, client.0] +tasks: +- install: +- ceph: + conf: + global: + journal aio: true +- filestore_idempotent: diff --git a/qa/suites/rados/singleton-nomsgr/all/filestore-idempotent.yaml b/qa/suites/rados/singleton-nomsgr/all/filestore-idempotent.yaml new file mode 100644 index 00000000000..c6af200d57f --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/filestore-idempotent.yaml @@ -0,0 +1,6 @@ +roles: +- [mon.0, osd.0, osd.1, mds.a, client.0] +tasks: +- install: +- ceph: +- filestore_idempotent: diff --git a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml new file mode 100644 index 00000000000..2089c9f56ae --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml @@ -0,0 +1,31 @@ +roles: +- - mon.a + - osd.0 + - osd.1 + - osd.2 + - client.0 +- - mds.a + - osd.3 + - osd.4 + - osd.5 +tasks: +- install: +- ceph: + conf: + osd: + osd debug reject backfill probability: .3 + osd min pg log entries: 25 + osd max pg log entries: 100 +- exec: + client.0: + - ceph osd pool create foo 64 + - rados -p foo bench 60 write -b 1024 --no-cleanup + - ceph osd pool set foo size 3 + - ceph osd out 0 1 +- sleep: + duration: 60 +- exec: + client.0: + - ceph osd in 0 1 +- sleep: + duration: 60 diff --git a/qa/suites/rados/singleton-nomsgr/all/objectcacher-stress.yaml b/qa/suites/rados/singleton-nomsgr/all/objectcacher-stress.yaml new file mode 100644 index 00000000000..bc5a2838ef9 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/objectcacher-stress.yaml @@ -0,0 +1,9 @@ +roles: +- [mon.0, osd.0, osd.1, mds.a, client.0] +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - osdc/stress_objectcacher.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/objectstore.yaml b/qa/suites/rados/singleton-nomsgr/all/objectstore.yaml new file mode 100644 index 00000000000..2cab026638e --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/objectstore.yaml @@ -0,0 +1,9 @@ +roles: +- [mon.0, osd.0, osd.1, mds.a, client.0] +tasks: +- install: +- ceph: +- exec: + client.0: + - mkdir $TESTDIR/ostest && cd $TESTDIR/ostest && ceph_test_objectstore + - rm -rf $TESTDIR/ostest diff --git a/qa/suites/rados/singleton/% b/qa/suites/rados/singleton/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rados/singleton/all/admin-socket.yaml b/qa/suites/rados/singleton/all/admin-socket.yaml new file mode 100644 index 00000000000..9e580f29db6 --- /dev/null +++ b/qa/suites/rados/singleton/all/admin-socket.yaml @@ -0,0 +1,18 @@ +roles: +- - mon.a + - osd.0 + - mds.a + - osd.1 + - client.a +tasks: +- install: +- ceph: +- admin_socket: + osd.0: + version: + git_version: + help: + config show: + config set filestore_dump_file /tmp/foo: + perf dump: + perf schema: diff --git a/qa/suites/rados/singleton/all/cephtool.yaml b/qa/suites/rados/singleton/all/cephtool.yaml new file mode 100644 index 00000000000..2ed5434960c --- /dev/null +++ b/qa/suites/rados/singleton/all/cephtool.yaml @@ -0,0 +1,22 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mds.a + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - had wrong client addr + - had wrong cluster addr + - must scrub before tier agent can activate +- workunit: + clients: + all: + - cephtool + - mon/pool_ops.sh diff --git a/qa/suites/rados/singleton/all/dump-stuck.yaml b/qa/suites/rados/singleton/all/dump-stuck.yaml new file mode 100644 index 00000000000..9bdcb0c3c73 --- /dev/null +++ b/qa/suites/rados/singleton/all/dump-stuck.yaml @@ -0,0 +1,11 @@ +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down +- dump_stuck: diff --git a/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml new file mode 100644 index 00000000000..1dd47518a8a --- /dev/null +++ b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml @@ -0,0 +1,15 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mds.a + - osd.0 + - osd.1 + - osd.2 + - osd.3 +tasks: +- install: +- ceph: + log-whitelist: + - objects unfound and apparently lost +- ec_lost_unfound: diff --git a/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml new file mode 100644 index 00000000000..3df9e2ed601 --- /dev/null +++ b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml @@ -0,0 +1,14 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mds.a + - osd.0 + - osd.1 + - osd.2 +tasks: +- install: +- ceph: + log-whitelist: + - objects unfound and apparently lost +- rep_lost_unfound_delete: diff --git a/qa/suites/rados/singleton/all/lost-unfound.yaml b/qa/suites/rados/singleton/all/lost-unfound.yaml new file mode 100644 index 00000000000..6014723ed08 --- /dev/null +++ b/qa/suites/rados/singleton/all/lost-unfound.yaml @@ -0,0 +1,14 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mds.a + - osd.0 + - osd.1 + - osd.2 +tasks: +- install: +- ceph: + log-whitelist: + - objects unfound and apparently lost +- lost_unfound: diff --git a/qa/suites/rados/singleton/all/mon-config-keys.yaml b/qa/suites/rados/singleton/all/mon-config-keys.yaml new file mode 100644 index 00000000000..524c6b6f570 --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-config-keys.yaml @@ -0,0 +1,16 @@ +roles: +- - mon.0 + - mon.1 + - mon.2 + - mds.a + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - mon/test_mon_config_key.py diff --git a/qa/suites/rados/singleton/all/mon-thrasher.yaml b/qa/suites/rados/singleton/all/mon-thrasher.yaml new file mode 100644 index 00000000000..4e4e8571b35 --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-thrasher.yaml @@ -0,0 +1,22 @@ +roles: +- - mon.a + - mon.b + - mon.c + - osd.0 + - osd.1 + - mds.0 + - client.0 +tasks: +- install: +- ceph: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + clients: + all: + - mon/workloadgen.sh + env: + LOADGEN_NUM_OSDS: "5" + VERBOSE: "1" + DURATION: "600" diff --git a/qa/suites/rados/singleton/all/osd-backfill.yaml b/qa/suites/rados/singleton/all/osd-backfill.yaml new file mode 100644 index 00000000000..7c18a3b9bb0 --- /dev/null +++ b/qa/suites/rados/singleton/all/osd-backfill.yaml @@ -0,0 +1,17 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mds.a + - osd.0 + - osd.1 + - osd.2 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + conf: + osd: + osd min pg log entries: 5 +- osd_backfill: diff --git a/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml new file mode 100644 index 00000000000..e6f99983e02 --- /dev/null +++ b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml @@ -0,0 +1,18 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mds.a + - osd.0 + - osd.1 + - osd.2 + - osd.3 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + conf: + osd: + osd min pg log entries: 5 +- osd_recovery.test_incomplete_pgs: diff --git a/qa/suites/rados/singleton/all/osd-recovery.yaml b/qa/suites/rados/singleton/all/osd-recovery.yaml new file mode 100644 index 00000000000..8307d424533 --- /dev/null +++ b/qa/suites/rados/singleton/all/osd-recovery.yaml @@ -0,0 +1,17 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mds.a + - osd.0 + - osd.1 + - osd.2 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + conf: + osd: + osd min pg log entries: 5 +- osd_recovery: diff --git a/qa/suites/rados/singleton/all/peer.yaml b/qa/suites/rados/singleton/all/peer.yaml new file mode 100644 index 00000000000..a441059bbb8 --- /dev/null +++ b/qa/suites/rados/singleton/all/peer.yaml @@ -0,0 +1,17 @@ +roles: +- - mon.0 + - mon.1 + - mon.2 + - mds.a + - osd.0 + - osd.1 + - osd.2 +tasks: +- install: +- ceph: + config: + global: + osd pool default min size : 1 + log-whitelist: + - objects unfound and apparently lost +- peer: diff --git a/qa/suites/rados/singleton/all/radostool.yaml b/qa/suites/rados/singleton/all/radostool.yaml new file mode 100644 index 00000000000..05ab4a3f7c2 --- /dev/null +++ b/qa/suites/rados/singleton/all/radostool.yaml @@ -0,0 +1,16 @@ +roles: +- - mon.a + - osd.0 + - osd.1 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - had wrong client addr + - had wrong cluster addr +- workunit: + clients: + all: + - rados/test_rados_tool.sh diff --git a/qa/suites/rados/singleton/all/rest-api.yaml b/qa/suites/rados/singleton/all/rest-api.yaml new file mode 100644 index 00000000000..425db55660d --- /dev/null +++ b/qa/suites/rados/singleton/all/rest-api.yaml @@ -0,0 +1,20 @@ +roles: +- - mon.0 + - mon.1 + - mon.2 + - mds.a + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - had wrong client addr +- rest-api: [client.0] +- workunit: + clients: + all: + - rest/test.py diff --git a/qa/suites/rados/singleton/all/thrash-rados.yaml b/qa/suites/rados/singleton/all/thrash-rados.yaml new file mode 100644 index 00000000000..4bdcf226546 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-rados.yaml @@ -0,0 +1,23 @@ +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down +- thrashosds: + op_delay: 30 + clean_interval: 120 + chance_down: .5 +- workunit: + clients: + all: + - rados/load-gen-mix-small.sh diff --git a/qa/suites/rados/singleton/all/thrash_cache_writeback_forward_none.yaml b/qa/suites/rados/singleton/all/thrash_cache_writeback_forward_none.yaml new file mode 100644 index 00000000000..0c971e034bc --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash_cache_writeback_forward_none.yaml @@ -0,0 +1,61 @@ +roles: +- - mon.a + - mds.0 + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - slow request +- exec: + client.0: + - ceph osd pool create base 4 + - ceph osd pool create cache 4 + - ceph osd tier add base cache + - ceph osd tier cache-mode cache writeback + - ceph osd tier set-overlay base cache + - ceph osd pool set cache hit_set_type bloom + - ceph osd pool set cache hit_set_count 8 + - ceph osd pool set cache hit_set_period 60 + - ceph osd pool set cache target_max_objects 500 +- background_exec: + mon.a: + - while true + - do sleep 30 + - echo forward + - ceph osd tier cache-mode cache forward + - sleep 10 + - ceph osd pool set cache cache_target_full_ratio .001 + - echo cache-try-flush-evict-all + - rados -p cache cache-try-flush-evict-all + - sleep 5 + - echo cache-flush-evict-all + - rados -p cache cache-flush-evict-all + - sleep 5 + - echo remove overlay + - ceph osd tier remove-overlay base + - sleep 20 + - echo add writeback overlay + - ceph osd tier cache-mode cache writeback + - ceph osd pool set cache cache_target_full_ratio .8 + - ceph osd tier set-overlay base cache + - done +- rados: + clients: [client.0] + pools: [base] + max_seconds: 600 + ops: 400000 + objects: 10000 + size: 1024 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 diff --git a/qa/suites/rados/singleton/fs/btrfs.yaml b/qa/suites/rados/singleton/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/rados/singleton/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/rados/singleton/msgr-failures/few.yaml b/qa/suites/rados/singleton/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/rados/singleton/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/rados/singleton/msgr-failures/many.yaml b/qa/suites/rados/singleton/msgr-failures/many.yaml new file mode 100644 index 00000000000..86f8dde8a0e --- /dev/null +++ b/qa/suites/rados/singleton/msgr-failures/many.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 diff --git a/qa/suites/rados/thrash/% b/qa/suites/rados/thrash/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rados/thrash/clusters/+ b/qa/suites/rados/thrash/clusters/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rados/thrash/clusters/fixed-2.yaml b/qa/suites/rados/thrash/clusters/fixed-2.yaml new file mode 120000 index 00000000000..cd0791a1486 --- /dev/null +++ b/qa/suites/rados/thrash/clusters/fixed-2.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/clusters/openstack.yaml b/qa/suites/rados/thrash/clusters/openstack.yaml new file mode 100644 index 00000000000..00d927ac232 --- /dev/null +++ b/qa/suites/rados/thrash/clusters/openstack.yaml @@ -0,0 +1,8 @@ +openstack: + machine: + disk: 40 # GB + ram: 8000 # MB + cpus: 1 + volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/rados/thrash/fs/btrfs.yaml b/qa/suites/rados/thrash/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/rados/thrash/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/fs/ext4.yaml b/qa/suites/rados/thrash/fs/ext4.yaml new file mode 120000 index 00000000000..65d71886933 --- /dev/null +++ b/qa/suites/rados/thrash/fs/ext4.yaml @@ -0,0 +1 @@ +../../../../fs/ext4.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/fs/xfs.yaml b/qa/suites/rados/thrash/fs/xfs.yaml new file mode 120000 index 00000000000..4c28d731f6b --- /dev/null +++ b/qa/suites/rados/thrash/fs/xfs.yaml @@ -0,0 +1 @@ +../../../../fs/xfs.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/msgr-failures/fastclose.yaml b/qa/suites/rados/thrash/msgr-failures/fastclose.yaml new file mode 100644 index 00000000000..77fd730aff7 --- /dev/null +++ b/qa/suites/rados/thrash/msgr-failures/fastclose.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 2500 + ms tcp read timeout: 5 diff --git a/qa/suites/rados/thrash/msgr-failures/few.yaml b/qa/suites/rados/thrash/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/rados/thrash/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml b/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml new file mode 100644 index 00000000000..a33ba89e14f --- /dev/null +++ b/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 2500 + ms inject delay type: osd + ms inject delay probability: .005 + ms inject delay max: 1 + ms inject internal delays: .002 diff --git a/qa/suites/rados/thrash/thrashers/default.yaml b/qa/suites/rados/thrash/thrashers/default.yaml new file mode 100644 index 00000000000..a5958b6d6ed --- /dev/null +++ b/qa/suites/rados/thrash/thrashers/default.yaml @@ -0,0 +1,13 @@ +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + conf: + osd: + osd debug reject backfill probability: .3 +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 diff --git a/qa/suites/rados/thrash/thrashers/mapgap.yaml b/qa/suites/rados/thrash/thrashers/mapgap.yaml new file mode 100644 index 00000000000..fd7fd17957d --- /dev/null +++ b/qa/suites/rados/thrash/thrashers/mapgap.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + conf: + mon: + mon min osdmap epochs: 2 + osd: + osd map cache size: 1 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - osd_map_cache_size +- thrashosds: + timeout: 1800 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_test_map_discontinuity: 0.5 diff --git a/qa/suites/rados/thrash/thrashers/morepggrow.yaml b/qa/suites/rados/thrash/thrashers/morepggrow.yaml new file mode 100644 index 00000000000..93379a82c33 --- /dev/null +++ b/qa/suites/rados/thrash/thrashers/morepggrow.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 3 + chance_pgpnum_fix: 1 diff --git a/qa/suites/rados/thrash/thrashers/pggrow.yaml b/qa/suites/rados/thrash/thrashers/pggrow.yaml new file mode 100644 index 00000000000..6131b00012d --- /dev/null +++ b/qa/suites/rados/thrash/thrashers/pggrow.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 2 + chance_pgpnum_fix: 1 diff --git a/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml b/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml new file mode 100644 index 00000000000..b1ddad8d3b0 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + client.0: + admin socket: /var/run/ceph/ceph-$name.asok +tasks: +- radosbench: + clients: [client.0] + time: 60 +- admin_socket: + client.0: + objecter_requests: + test: "http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}" diff --git a/qa/suites/rados/thrash/workloads/cache-agent-big.yaml b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml new file mode 100644 index 00000000000..d3c404b5414 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml @@ -0,0 +1,29 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate +tasks: +- exec: + client.0: + - ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd + m=1 k=2 + - ceph osd pool create base 4 erasure teuthologyprofile + - ceph osd pool create cache 4 + - ceph osd tier add base cache + - ceph osd tier cache-mode cache writeback + - ceph osd tier set-overlay base cache + - ceph osd pool set cache hit_set_type bloom + - ceph osd pool set cache hit_set_count 8 + - ceph osd pool set cache hit_set_period 60 + - ceph osd pool set cache target_max_objects 5000 +- rados: + clients: [client.0] + pools: [base] + ops: 4000 + objects: 10000 + size: 1024 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash/workloads/cache-agent-small.yaml b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml new file mode 100644 index 00000000000..50bb3ac3c33 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml @@ -0,0 +1,26 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate +tasks: +- exec: + client.0: + - ceph osd pool create base 4 + - ceph osd pool create cache 4 + - ceph osd tier add base cache + - ceph osd tier cache-mode cache writeback + - ceph osd tier set-overlay base cache + - ceph osd pool set cache hit_set_type bloom + - ceph osd pool set cache hit_set_count 8 + - ceph osd pool set cache hit_set_period 60 + - ceph osd pool set cache target_max_objects 250 +- rados: + clients: [client.0] + pools: [base] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash/workloads/cache-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-snaps.yaml new file mode 100644 index 00000000000..199d6b2b1be --- /dev/null +++ b/qa/suites/rados/thrash/workloads/cache-snaps.yaml @@ -0,0 +1,31 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate +tasks: +- exec: + client.0: + - ceph osd pool create base 4 + - ceph osd pool create cache 4 + - ceph osd tier add base cache + - ceph osd tier cache-mode cache writeback + - ceph osd tier set-overlay base cache + - ceph osd pool set cache hit_set_type bloom + - ceph osd pool set cache hit_set_count 8 + - ceph osd pool set cache hit_set_period 3600 +- rados: + clients: [client.0] + pools: [base] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + flush: 50 + try_flush: 50 + evict: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/rados/thrash/workloads/cache.yaml b/qa/suites/rados/thrash/workloads/cache.yaml new file mode 100644 index 00000000000..0f15cad6763 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/cache.yaml @@ -0,0 +1,28 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate +tasks: +- exec: + client.0: + - ceph osd pool create base 4 + - ceph osd pool create cache 4 + - ceph osd tier add base cache + - ceph osd tier cache-mode cache writeback + - ceph osd tier set-overlay base cache + - ceph osd pool set cache hit_set_type bloom + - ceph osd pool set cache hit_set_count 8 + - ceph osd pool set cache hit_set_period 3600 +- rados: + clients: [client.0] + pools: [base] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + flush: 50 + try_flush: 50 + evict: 50 diff --git a/qa/suites/rados/thrash/workloads/ec-radosbench.yaml b/qa/suites/rados/thrash/workloads/ec-radosbench.yaml new file mode 100644 index 00000000000..70875fb2325 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/ec-radosbench.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - shard.*missing +tasks: +- radosbench: + clients: [client.0] + time: 1800 + unique_pool: true + ec_pool: true diff --git a/qa/suites/rados/thrash/workloads/ec-readwrite.yaml b/qa/suites/rados/thrash/workloads/ec-readwrite.yaml new file mode 100644 index 00000000000..80b9140e707 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/ec-readwrite.yaml @@ -0,0 +1,11 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + ec_pool: true + op_weights: + read: 45 + write: 0 + append: 45 + delete: 10 diff --git a/qa/suites/rados/thrash/workloads/ec-small-objects.yaml b/qa/suites/rados/thrash/workloads/ec-small-objects.yaml new file mode 100644 index 00000000000..a8ac39716e5 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/ec-small-objects.yaml @@ -0,0 +1,20 @@ +tasks: +- rados: + clients: [client.0] + ops: 400000 + max_seconds: 600 + max_in_flight: 64 + objects: 1024 + size: 16384 + ec_pool: true + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/suites/rados/thrash/workloads/ec-snaps-few-objects.yaml b/qa/suites/rados/thrash/workloads/ec-snaps-few-objects.yaml new file mode 100644 index 00000000000..c64d4ffb35b --- /dev/null +++ b/qa/suites/rados/thrash/workloads/ec-snaps-few-objects.yaml @@ -0,0 +1,15 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash/workloads/rados_api_tests.yaml b/qa/suites/rados/thrash/workloads/rados_api_tests.yaml new file mode 100644 index 00000000000..cd11ae6ca0c --- /dev/null +++ b/qa/suites/rados/thrash/workloads/rados_api_tests.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/rados/thrash/workloads/radosbench.yaml b/qa/suites/rados/thrash/workloads/radosbench.yaml new file mode 100644 index 00000000000..3940870fce0 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/radosbench.yaml @@ -0,0 +1,4 @@ +tasks: +- radosbench: + clients: [client.0] + time: 1800 diff --git a/qa/suites/rados/thrash/workloads/readwrite.yaml b/qa/suites/rados/thrash/workloads/readwrite.yaml new file mode 100644 index 00000000000..c53e52b0872 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/readwrite.yaml @@ -0,0 +1,9 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 45 + write: 45 + delete: 10 diff --git a/qa/suites/rados/thrash/workloads/small-objects.yaml b/qa/suites/rados/thrash/workloads/small-objects.yaml new file mode 100644 index 00000000000..bb5a934de34 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/small-objects.yaml @@ -0,0 +1,18 @@ +tasks: +- rados: + clients: [client.0] + ops: 400000 + max_seconds: 600 + max_in_flight: 64 + objects: 1024 + size: 16384 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml b/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml new file mode 100644 index 00000000000..aa82d973ae1 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/rados/verify/% b/qa/suites/rados/verify/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rados/verify/1thrash/default.yaml b/qa/suites/rados/verify/1thrash/default.yaml new file mode 100644 index 00000000000..9435b146af6 --- /dev/null +++ b/qa/suites/rados/verify/1thrash/default.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 diff --git a/qa/suites/rados/verify/1thrash/none.yaml b/qa/suites/rados/verify/1thrash/none.yaml new file mode 100644 index 00000000000..2030acb9083 --- /dev/null +++ b/qa/suites/rados/verify/1thrash/none.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rados/verify/clusters/fixed-2.yaml b/qa/suites/rados/verify/clusters/fixed-2.yaml new file mode 120000 index 00000000000..cd0791a1486 --- /dev/null +++ b/qa/suites/rados/verify/clusters/fixed-2.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rados/verify/fs/btrfs.yaml b/qa/suites/rados/verify/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/rados/verify/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/rados/verify/msgr-failures/few.yaml b/qa/suites/rados/verify/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/rados/verify/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/rados/verify/tasks/mon_recovery.yaml b/qa/suites/rados/verify/tasks/mon_recovery.yaml new file mode 100644 index 00000000000..6986303409e --- /dev/null +++ b/qa/suites/rados/verify/tasks/mon_recovery.yaml @@ -0,0 +1,2 @@ +tasks: +- mon_recovery: diff --git a/qa/suites/rados/verify/tasks/rados_api_tests.yaml b/qa/suites/rados/verify/tasks/rados_api_tests.yaml new file mode 100644 index 00000000000..0031704784e --- /dev/null +++ b/qa/suites/rados/verify/tasks/rados_api_tests.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + client: + debug ms: 1 + debug objecter: 20 + debug rados: 20 + debug monc: 20 +tasks: +- workunit: + timeout: 6h + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/rados/verify/tasks/rados_cls_all.yaml b/qa/suites/rados/verify/tasks/rados_cls_all.yaml new file mode 100644 index 00000000000..853da39ad99 --- /dev/null +++ b/qa/suites/rados/verify/tasks/rados_cls_all.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - cls diff --git a/qa/suites/rados/verify/validater/lockdep.yaml b/qa/suites/rados/verify/validater/lockdep.yaml new file mode 100644 index 00000000000..25f84355c0b --- /dev/null +++ b/qa/suites/rados/verify/validater/lockdep.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + lockdep: true diff --git a/qa/suites/rados/verify/validater/valgrind.yaml b/qa/suites/rados/verify/validater/valgrind.yaml new file mode 100644 index 00000000000..7b8f7a28629 --- /dev/null +++ b/qa/suites/rados/verify/validater/valgrind.yaml @@ -0,0 +1,9 @@ +overrides: + install: + ceph: + flavor: notcmalloc + ceph: + valgrind: + mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + osd: [--tool=memcheck] + mds: [--tool=memcheck] diff --git a/qa/suites/rbd/basic/% b/qa/suites/rbd/basic/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rbd/basic/base/install.yaml b/qa/suites/rbd/basic/base/install.yaml new file mode 100644 index 00000000000..2030acb9083 --- /dev/null +++ b/qa/suites/rbd/basic/base/install.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rbd/basic/cachepool/none.yaml b/qa/suites/rbd/basic/cachepool/none.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rbd/basic/cachepool/small.yaml b/qa/suites/rbd/basic/cachepool/small.yaml new file mode 100644 index 00000000000..f8ed11040fa --- /dev/null +++ b/qa/suites/rbd/basic/cachepool/small.yaml @@ -0,0 +1,11 @@ +tasks: +- exec: + client.0: + - ceph osd pool create cache 4 + - ceph osd tier add rbd cache + - ceph osd tier cache-mode cache writeback + - ceph osd tier set-overlay rbd cache + - ceph osd pool set cache hit_set_type bloom + - ceph osd pool set cache hit_set_count 8 + - ceph osd pool set cache hit_set_period 60 + - ceph osd pool set cache target_max_objects 250 diff --git a/qa/suites/rbd/basic/clusters/fixed-1.yaml b/qa/suites/rbd/basic/clusters/fixed-1.yaml new file mode 120000 index 00000000000..435ea3c7546 --- /dev/null +++ b/qa/suites/rbd/basic/clusters/fixed-1.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-1.yaml \ No newline at end of file diff --git a/qa/suites/rbd/basic/fs/btrfs.yaml b/qa/suites/rbd/basic/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/rbd/basic/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/rbd/basic/msgr-failures/few.yaml b/qa/suites/rbd/basic/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/rbd/basic/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/rbd/basic/msgr-failures/many.yaml b/qa/suites/rbd/basic/msgr-failures/many.yaml new file mode 100644 index 00000000000..86f8dde8a0e --- /dev/null +++ b/qa/suites/rbd/basic/msgr-failures/many.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 diff --git a/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml b/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml new file mode 100644 index 00000000000..a98768540ba --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh diff --git a/qa/suites/rbd/basic/tasks/rbd_cli_copy.yaml b/qa/suites/rbd/basic/tasks/rbd_cli_copy.yaml new file mode 100644 index 00000000000..ae95e51e066 --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_cli_copy.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/copy.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/rbd/basic/tasks/rbd_cli_copy_old_format.yaml b/qa/suites/rbd/basic/tasks/rbd_cli_copy_old_format.yaml new file mode 100644 index 00000000000..2f99f8990de --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_cli_copy_old_format.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/copy.sh diff --git a/qa/suites/rbd/basic/tasks/rbd_cli_import_export.yaml b/qa/suites/rbd/basic/tasks/rbd_cli_import_export.yaml new file mode 100644 index 00000000000..49070827be0 --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_cli_import_export.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/rbd/basic/tasks/rbd_cli_import_export_old_format.yaml b/qa/suites/rbd/basic/tasks/rbd_cli_import_export_old_format.yaml new file mode 100644 index 00000000000..b08f2612f7a --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_cli_import_export_old_format.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/import_export.sh diff --git a/qa/suites/rbd/basic/tasks/rbd_cli_tests.yaml b/qa/suites/rbd/basic/tasks/rbd_cli_tests.yaml new file mode 100644 index 00000000000..a37db057b5d --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_cli_tests.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/run_cli_tests.sh + diff --git a/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml b/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml new file mode 100644 index 00000000000..9ccd57c4a82 --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - cls/test_cls_rbd.sh diff --git a/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml b/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml new file mode 100644 index 00000000000..d2c80ad6585 --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_lock_fence.sh diff --git a/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml b/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml new file mode 100644 index 00000000000..263b784e27d --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh diff --git a/qa/suites/rbd/librbd/% b/qa/suites/rbd/librbd/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rbd/librbd/cache/none.yaml b/qa/suites/rbd/librbd/cache/none.yaml new file mode 100644 index 00000000000..42fd9c95562 --- /dev/null +++ b/qa/suites/rbd/librbd/cache/none.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: + conf: + client: + rbd cache: false diff --git a/qa/suites/rbd/librbd/cache/writeback.yaml b/qa/suites/rbd/librbd/cache/writeback.yaml new file mode 100644 index 00000000000..86fe06afa05 --- /dev/null +++ b/qa/suites/rbd/librbd/cache/writeback.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: + conf: + client: + rbd cache: true diff --git a/qa/suites/rbd/librbd/cache/writethrough.yaml b/qa/suites/rbd/librbd/cache/writethrough.yaml new file mode 100644 index 00000000000..6dc29e16c02 --- /dev/null +++ b/qa/suites/rbd/librbd/cache/writethrough.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: + conf: + client: + rbd cache: true + rbd cache max dirty: 0 diff --git a/qa/suites/rbd/librbd/cachepool/none.yaml b/qa/suites/rbd/librbd/cachepool/none.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rbd/librbd/cachepool/small.yaml b/qa/suites/rbd/librbd/cachepool/small.yaml new file mode 100644 index 00000000000..f8ed11040fa --- /dev/null +++ b/qa/suites/rbd/librbd/cachepool/small.yaml @@ -0,0 +1,11 @@ +tasks: +- exec: + client.0: + - ceph osd pool create cache 4 + - ceph osd tier add rbd cache + - ceph osd tier cache-mode cache writeback + - ceph osd tier set-overlay rbd cache + - ceph osd pool set cache hit_set_type bloom + - ceph osd pool set cache hit_set_count 8 + - ceph osd pool set cache hit_set_period 60 + - ceph osd pool set cache target_max_objects 250 diff --git a/qa/suites/rbd/librbd/clusters/fixed-3.yaml b/qa/suites/rbd/librbd/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/rbd/librbd/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/rbd/librbd/fs b/qa/suites/rbd/librbd/fs new file mode 120000 index 00000000000..3658920363d --- /dev/null +++ b/qa/suites/rbd/librbd/fs @@ -0,0 +1 @@ +../basic/fs \ No newline at end of file diff --git a/qa/suites/rbd/librbd/msgr-failures/few.yaml b/qa/suites/rbd/librbd/msgr-failures/few.yaml new file mode 100644 index 00000000000..a8bc68355ea --- /dev/null +++ b/qa/suites/rbd/librbd/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - wrongly marked me down diff --git a/qa/suites/rbd/librbd/workloads/c_api_tests.yaml b/qa/suites/rbd/librbd/workloads/c_api_tests.yaml new file mode 100644 index 00000000000..188ddc56c60 --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/c_api_tests.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/rbd/librbd/workloads/fsx.yaml b/qa/suites/rbd/librbd/workloads/fsx.yaml new file mode 100644 index 00000000000..ef512d8a9b4 --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/fsx.yaml @@ -0,0 +1,4 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 5000 diff --git a/qa/suites/rbd/librbd/workloads/python_api_tests.yaml b/qa/suites/rbd/librbd/workloads/python_api_tests.yaml new file mode 100644 index 00000000000..a7b3ce7d3e6 --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/python_api_tests.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/rbd/librbd/workloads/qemu_bonnie.yaml b/qa/suites/rbd/librbd/workloads/qemu_bonnie.yaml new file mode 100644 index 00000000000..7c964265a0b --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/qemu_bonnie.yaml @@ -0,0 +1,5 @@ +tasks: +- qemu: + all: + test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/bonnie.sh +exclude_arch: armv7l diff --git a/qa/suites/rbd/librbd/workloads/qemu_fsstress.yaml b/qa/suites/rbd/librbd/workloads/qemu_fsstress.yaml new file mode 100644 index 00000000000..bae9e009935 --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/qemu_fsstress.yaml @@ -0,0 +1,5 @@ +tasks: +- qemu: + all: + test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/fsstress.sh;h=firefly +exclude_arch: armv7l diff --git a/qa/suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled b/qa/suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled new file mode 100644 index 00000000000..3dae6e78a2f --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled @@ -0,0 +1,6 @@ +tasks: +- qemu: + all: + test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/iozone.sh + image_size: 20480 +exclude_arch: armv7l diff --git a/qa/suites/rbd/librbd/workloads/qemu_xfstests.yaml b/qa/suites/rbd/librbd/workloads/qemu_xfstests.yaml new file mode 100644 index 00000000000..c4b2327cc8e --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/qemu_xfstests.yaml @@ -0,0 +1,7 @@ +tasks: +- qemu: + all: + type: block + num_rbd: 2 + test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/run_xfstests_qemu.sh +exclude_arch: armv7l diff --git a/qa/suites/rbd/singleton/% b/qa/suites/rbd/singleton/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rbd/singleton/all/formatted-output.yaml b/qa/suites/rbd/singleton/all/formatted-output.yaml new file mode 100644 index 00000000000..de930bc02bd --- /dev/null +++ b/qa/suites/rbd/singleton/all/formatted-output.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.a, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: +- cram: + clients: + client.0: + - http://git.ceph.com/?p=ceph.git;a=blob_plain;hb=firefly;f=src/test/cli-integration/rbd/formatted-output.t + diff --git a/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml b/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml new file mode 100644 index 00000000000..2771d4e8db1 --- /dev/null +++ b/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml @@ -0,0 +1,12 @@ +exclude_arch: armv7l +roles: +- [mon.a, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + conf: + client: + rbd cache: false +- workunit: + clients: + all: [rbd/qemu-iotests.sh] diff --git a/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml b/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml new file mode 100644 index 00000000000..f6768df5a22 --- /dev/null +++ b/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml @@ -0,0 +1,12 @@ +exclude_arch: armv7l +roles: +- [mon.a, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + conf: + client: + rbd cache: true +- workunit: + clients: + all: [rbd/qemu-iotests.sh] diff --git a/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml b/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml new file mode 100644 index 00000000000..287509e4953 --- /dev/null +++ b/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml @@ -0,0 +1,13 @@ +exclude_arch: armv7l +roles: +- [mon.a, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + conf: + client: + rbd cache: true + rbd cache max dirty: 0 +- workunit: + clients: + all: [rbd/qemu-iotests.sh] diff --git a/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml b/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml new file mode 100644 index 00000000000..c5230d0554e --- /dev/null +++ b/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.a, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - mon/rbd_snaps_ops.sh + diff --git a/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml b/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml new file mode 100644 index 00000000000..f7d44456d3b --- /dev/null +++ b/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml @@ -0,0 +1,11 @@ +roles: +- [mon.a, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + conf: + client: + rbd cache: false +- workunit: + clients: + all: [rbd/read-flags.sh] diff --git a/qa/suites/rbd/singleton/all/read-flags-writeback.yaml b/qa/suites/rbd/singleton/all/read-flags-writeback.yaml new file mode 100644 index 00000000000..f25be79e0b6 --- /dev/null +++ b/qa/suites/rbd/singleton/all/read-flags-writeback.yaml @@ -0,0 +1,11 @@ +roles: +- [mon.a, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + conf: + client: + rbd cache: true +- workunit: + clients: + all: [rbd/read-flags.sh] diff --git a/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml b/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml new file mode 100644 index 00000000000..80d7b4254b6 --- /dev/null +++ b/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml @@ -0,0 +1,12 @@ +roles: +- [mon.a, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + conf: + client: + rbd cache: true + rbd cache max dirty: 0 +- workunit: + clients: + all: [rbd/read-flags.sh] diff --git a/qa/suites/rbd/thrash/% b/qa/suites/rbd/thrash/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rbd/thrash/base/install.yaml b/qa/suites/rbd/thrash/base/install.yaml new file mode 100644 index 00000000000..2030acb9083 --- /dev/null +++ b/qa/suites/rbd/thrash/base/install.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rbd/thrash/clusters/fixed-2.yaml b/qa/suites/rbd/thrash/clusters/fixed-2.yaml new file mode 120000 index 00000000000..cd0791a1486 --- /dev/null +++ b/qa/suites/rbd/thrash/clusters/fixed-2.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rbd/thrash/fs/btrfs.yaml b/qa/suites/rbd/thrash/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/rbd/thrash/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/rbd/thrash/fs/xfs.yaml b/qa/suites/rbd/thrash/fs/xfs.yaml new file mode 120000 index 00000000000..4c28d731f6b --- /dev/null +++ b/qa/suites/rbd/thrash/fs/xfs.yaml @@ -0,0 +1 @@ +../../../../fs/xfs.yaml \ No newline at end of file diff --git a/qa/suites/rbd/thrash/msgr-failures/few.yaml b/qa/suites/rbd/thrash/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/rbd/thrash/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/rbd/thrash/thrashers/cache.yaml b/qa/suites/rbd/thrash/thrashers/cache.yaml new file mode 100644 index 00000000000..5bab78ee840 --- /dev/null +++ b/qa/suites/rbd/thrash/thrashers/cache.yaml @@ -0,0 +1,18 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +tasks: +- exec: + client.0: + - ceph osd pool create cache 4 + - ceph osd tier add rbd cache + - ceph osd tier cache-mode cache writeback + - ceph osd tier set-overlay rbd cache + - ceph osd pool set cache hit_set_type bloom + - ceph osd pool set cache hit_set_count 8 + - ceph osd pool set cache hit_set_period 60 + - ceph osd pool set cache target_max_objects 250 +- thrashosds: + timeout: 1200 diff --git a/qa/suites/rbd/thrash/thrashers/default.yaml b/qa/suites/rbd/thrash/thrashers/default.yaml new file mode 100644 index 00000000000..89c9bdfb0e5 --- /dev/null +++ b/qa/suites/rbd/thrash/thrashers/default.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 diff --git a/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml b/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml new file mode 100644 index 00000000000..188ddc56c60 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml new file mode 100644 index 00000000000..bd812695c83 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml @@ -0,0 +1,9 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 2000 +overrides: + ceph: + conf: + client: + rbd cache: true diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml new file mode 100644 index 00000000000..56895298025 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml @@ -0,0 +1,10 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 2000 +overrides: + ceph: + conf: + client: + rbd cache: true + rbd cache max dirty: 0 diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml new file mode 100644 index 00000000000..6c5e0e45707 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml @@ -0,0 +1,9 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 2000 +overrides: + ceph: + conf: + client: + rbd cache: false diff --git a/qa/suites/rest/basic/tasks/rest_test.yaml b/qa/suites/rest/basic/tasks/rest_test.yaml new file mode 100644 index 00000000000..8ed1918ce7a --- /dev/null +++ b/qa/suites/rest/basic/tasks/rest_test.yaml @@ -0,0 +1,24 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 + - client.0 + +tasks: +- install: +- ceph: +- rest-api: [client.0] +- workunit: + clients: + client.0: + - rest/test.py diff --git a/qa/suites/rgw/multifs/% b/qa/suites/rgw/multifs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rgw/multifs/clusters/fixed-2.yaml b/qa/suites/rgw/multifs/clusters/fixed-2.yaml new file mode 120000 index 00000000000..cd0791a1486 --- /dev/null +++ b/qa/suites/rgw/multifs/clusters/fixed-2.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rgw/multifs/fs/btrfs.yaml b/qa/suites/rgw/multifs/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/rgw/multifs/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/rgw/multifs/fs/ext4.yaml b/qa/suites/rgw/multifs/fs/ext4.yaml new file mode 120000 index 00000000000..65d71886933 --- /dev/null +++ b/qa/suites/rgw/multifs/fs/ext4.yaml @@ -0,0 +1 @@ +../../../../fs/ext4.yaml \ No newline at end of file diff --git a/qa/suites/rgw/multifs/fs/xfs.yaml b/qa/suites/rgw/multifs/fs/xfs.yaml new file mode 120000 index 00000000000..4c28d731f6b --- /dev/null +++ b/qa/suites/rgw/multifs/fs/xfs.yaml @@ -0,0 +1 @@ +../../../../fs/xfs.yaml \ No newline at end of file diff --git a/qa/suites/rgw/multifs/rgw_pool_type b/qa/suites/rgw/multifs/rgw_pool_type new file mode 120000 index 00000000000..0506f616ce2 --- /dev/null +++ b/qa/suites/rgw/multifs/rgw_pool_type @@ -0,0 +1 @@ +../../../rgw_pool_type \ No newline at end of file diff --git a/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml b/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml new file mode 100644 index 00000000000..767debdf3c8 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- workunit: + clients: + client.0: + - rgw/s3_bucket_quota.pl diff --git a/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml b/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml new file mode 100644 index 00000000000..1781dee096b --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- workunit: + clients: + client.0: + - rgw/s3_multipart_upload.pl diff --git a/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml b/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml new file mode 100644 index 00000000000..c7efaa1c757 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml @@ -0,0 +1,16 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- s3readwrite: + client.0: + rgw_server: client.0 + readwrite: + bucket: rwtest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml b/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml new file mode 100644 index 00000000000..47b3c1894a2 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml @@ -0,0 +1,16 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- s3roundtrip: + client.0: + rgw_server: client.0 + roundtrip: + bucket: rttest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml b/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml new file mode 100644 index 00000000000..62608773a2a --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- s3tests: + client.0: + rgw_server: client.0 diff --git a/qa/suites/rgw/multifs/tasks/rgw_swift.yaml b/qa/suites/rgw/multifs/tasks/rgw_swift.yaml new file mode 100644 index 00000000000..569741b0e15 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_swift.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- swift: + client.0: + rgw_server: client.0 diff --git a/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml b/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml new file mode 100644 index 00000000000..c2c38a816cc --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- workunit: + clients: + client.0: + - rgw/s3_user_quota.pl diff --git a/qa/suites/rgw/singleton/% b/qa/suites/rgw/singleton/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml b/qa/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml new file mode 100644 index 00000000000..0e61941718e --- /dev/null +++ b/qa/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml @@ -0,0 +1,56 @@ +roles: +- [mon.a, osd.0, osd.1, client.0, client.1] +tasks: +- install: +- ceph: + conf: + client: + debug ms: 1 + rgw gc obj min wait: 15 + rgw data log window: 30 + osd: + debug ms: 1 + debug objclass : 20 + client.0: + rgw region: region0 + rgw zone: r0z0 + rgw region root pool: .rgw.region.0 + rgw zone root pool: .rgw.zone.0 + rgw gc pool: .rgw.gc.0 + rgw user uid pool: .users.uid.0 + rgw user keys pool: .users.0 + rgw log data: True + rgw log meta: True + client.1: + rgw region: region0 + rgw zone: r0z1 + rgw region root pool: .rgw.region.0 + rgw zone root pool: .rgw.zone.1 + rgw gc pool: .rgw.gc.1 + rgw user uid pool: .users.uid.1 + rgw user keys pool: .users.1 + rgw log data: False + rgw log meta: False +- rgw: + regions: + region0: + api name: api1 + is master: True + master zone: r0z0 + zones: [r0z0, r0z1] + client.0: + system user: + name: client0-system-user + access key: 0te6NH5mcdcq0Tc5i8i2 + secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv + client.1: + system user: + name: client1-system-user + access key: 1te6NH5mcdcq0Tc5i8i3 + secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw +- radosgw-agent: + client.0: + max-entries: 10 + src: client.0 + dest: client.1 +- radosgw-admin: diff --git a/qa/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml b/qa/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml new file mode 100644 index 00000000000..05aed994d93 --- /dev/null +++ b/qa/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml @@ -0,0 +1,61 @@ +roles: +- [mon.a, mds.a, osd.0, osd.1, client.0] +- [mon.b, mon.c, osd.2, osd.3, client.1] +tasks: +- install: +- ceph: + conf: + client: + debug ms: 1 + rgw gc obj min wait: 15 + osd: + debug ms: 1 + debug objclass : 20 + client.0: + rgw region: region0 + rgw zone: r0z1 + rgw region root pool: .rgw.region.0 + rgw zone root pool: .rgw.zone.0 + rgw gc pool: .rgw.gc.0 + rgw user uid pool: .users.uid.0 + rgw user keys pool: .users.0 + rgw log data: True + rgw log meta: True + client.1: + rgw region: region1 + rgw zone: r1z1 + rgw region root pool: .rgw.region.1 + rgw zone root pool: .rgw.zone.1 + rgw gc pool: .rgw.gc.1 + rgw user uid pool: .users.uid.1 + rgw user keys pool: .users.1 + rgw log data: False + rgw log meta: False +- rgw: + regions: + region0: + api name: api1 + is master: True + master zone: r0z1 + zones: [r0z1] + region1: + api name: api1 + is master: False + master zone: r1z1 + zones: [r1z1] + client.0: + system user: + name: client0-system-user + access key: 0te6NH5mcdcq0Tc5i8i2 + secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv + client.1: + system user: + name: client1-system-user + access key: 1te6NH5mcdcq0Tc5i8i3 + secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw +- radosgw-agent: + client.0: + src: client.0 + dest: client.1 + metadata-only: true +- radosgw-admin: diff --git a/qa/suites/rgw/singleton/all/radosgw-admin.yaml b/qa/suites/rgw/singleton/all/radosgw-admin.yaml new file mode 100644 index 00000000000..67aa5f92efe --- /dev/null +++ b/qa/suites/rgw/singleton/all/radosgw-admin.yaml @@ -0,0 +1,15 @@ +roles: +- [mon.a, mds.a, osd.0, client.0, osd.1] +tasks: +- install: +- ceph: + conf: + client: + debug ms: 1 + rgw gc obj min wait: 15 + osd: + debug ms: 1 + debug objclass : 20 +- rgw: + client.0: +- radosgw-admin: diff --git a/qa/suites/rgw/singleton/all/radosgw-convert-to-region.yaml b/qa/suites/rgw/singleton/all/radosgw-convert-to-region.yaml new file mode 100644 index 00000000000..4b6d7469ef8 --- /dev/null +++ b/qa/suites/rgw/singleton/all/radosgw-convert-to-region.yaml @@ -0,0 +1,67 @@ +overrides: + s3readwrite: + s3: + user_id: s3readwrite-test-user + display_name: test user for the s3readwrite tests + email: tester@inktank + access_key: 2te6NH5mcdcq0Tc5i8i4 + secret_key: Qy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXx + readwrite: + deterministic_file_names: True + duration: 30 + bucket: testbucket + files: + num: 10 + size: 2000 + stddev: 500 +roles: +- [mon.a, mds.a, osd.0, osd.1, client.0] +- [mon.b, mon.c, osd.2, osd.3, client.1] + +tasks: +- install: +- ceph: + conf: + client.1: + rgw region: default + rgw zone: r1z1 + rgw region root pool: .rgw + rgw zone root pool: .rgw + rgw domain root: .rgw + rgw gc pool: .rgw.gc + rgw user uid pool: .users.uid + rgw user keys pool: .users +- rgw: + client.0: + system user: + name: nr-system + access key: 0te6NH5mcdcq0Tc5i8i2 + secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv +- s3readwrite: + client.0: + extra_args: ['--no-cleanup'] + s3: + delete_user: False + readwrite: + writers: 1 + readers: 0 +- rgw: + regions: + default: + api name: api1 + is master: true + master zone: r1z1 + zones: [r1z1] + client.1: + system user: + name: r2-system + access key: 1te6NH5mcdcq0Tc5i8i3 + secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw +- s3readwrite: + client.1: + s3: + create_user: False + readwrite: + writers: 0 + readers: 2 + diff --git a/qa/suites/rgw/singleton/rgw_pool_type b/qa/suites/rgw/singleton/rgw_pool_type new file mode 120000 index 00000000000..77fa7e71b78 --- /dev/null +++ b/qa/suites/rgw/singleton/rgw_pool_type @@ -0,0 +1 @@ +../../../rgw_pool_type/ \ No newline at end of file diff --git a/qa/suites/rgw/verify/% b/qa/suites/rgw/verify/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/rgw/verify/clusters/fixed-2.yaml b/qa/suites/rgw/verify/clusters/fixed-2.yaml new file mode 120000 index 00000000000..cd0791a1486 --- /dev/null +++ b/qa/suites/rgw/verify/clusters/fixed-2.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rgw/verify/fs/btrfs.yaml b/qa/suites/rgw/verify/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/rgw/verify/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/rgw/verify/msgr-failures/few.yaml b/qa/suites/rgw/verify/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/rgw/verify/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/rgw/verify/rgw_pool_type b/qa/suites/rgw/verify/rgw_pool_type new file mode 120000 index 00000000000..77fa7e71b78 --- /dev/null +++ b/qa/suites/rgw/verify/rgw_pool_type @@ -0,0 +1 @@ +../../../rgw_pool_type/ \ No newline at end of file diff --git a/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml b/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml new file mode 100644 index 00000000000..c23a2cbf4ec --- /dev/null +++ b/qa/suites/rgw/verify/tasks/rgw_s3tests.yaml @@ -0,0 +1,10 @@ +tasks: +- install: + flavor: notcmalloc +- ceph: +- rgw: + client.0: + valgrind: [--tool=memcheck] +- s3tests: + client.0: + rgw_server: client.0 diff --git a/qa/suites/rgw/verify/tasks/rgw_s3tests_multiregion.yaml b/qa/suites/rgw/verify/tasks/rgw_s3tests_multiregion.yaml new file mode 100644 index 00000000000..399f4aac2f6 --- /dev/null +++ b/qa/suites/rgw/verify/tasks/rgw_s3tests_multiregion.yaml @@ -0,0 +1,59 @@ +tasks: +- install: + flavor: notcmalloc +- ceph: + conf: + client.0: + rgw region: zero + rgw zone: r0z1 + rgw region root pool: .rgw.region.0 + rgw zone root pool: .rgw.zone.0 + rgw gc pool: .rgw.gc.0 + rgw user uid pool: .users.uid.0 + rgw user keys pool: .users.0 + rgw log data: True + rgw log meta: True + client.1: + rgw region: one + rgw zone: r1z1 + rgw region root pool: .rgw.region.1 + rgw zone root pool: .rgw.zone.1 + rgw gc pool: .rgw.gc.1 + rgw user uid pool: .users.uid.1 + rgw user keys pool: .users.1 + rgw log data: False + rgw log meta: False +- rgw: + default_idle_timeout: 300 + regions: + zero: + api name: api1 + is master: True + master zone: r0z1 + zones: [r0z1] + one: + api name: api1 + is master: False + master zone: r1z1 + zones: [r1z1] + client.0: + valgrind: [--tool=memcheck] + system user: + name: client0-system-user + access key: 1te6NH5mcdcq0Tc5i8i2 + secret key: 1y4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv + client.1: + valgrind: [--tool=memcheck] + system user: + name: client1-system-user + access key: 0te6NH5mcdcq0Tc5i8i2 + secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv +- radosgw-agent: + client.0: + src: client.0 + dest: client.1 + metadata-only: true +- s3tests: + client.0: + idle_timeout: 300 + rgw_server: client.0 diff --git a/qa/suites/rgw/verify/tasks/rgw_swift.yaml b/qa/suites/rgw/verify/tasks/rgw_swift.yaml new file mode 100644 index 00000000000..792fb848a9e --- /dev/null +++ b/qa/suites/rgw/verify/tasks/rgw_swift.yaml @@ -0,0 +1,10 @@ +tasks: +- install: + flavor: notcmalloc +- ceph: +- rgw: + client.0: + valgrind: [--tool=memcheck] +- swift: + client.0: + rgw_server: client.0 diff --git a/qa/suites/rgw/verify/validater/lockdep.yaml b/qa/suites/rgw/verify/validater/lockdep.yaml new file mode 100644 index 00000000000..941fe12b1e4 --- /dev/null +++ b/qa/suites/rgw/verify/validater/lockdep.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + osd: + lockdep: true + mon: + lockdep: true diff --git a/qa/suites/rgw/verify/validater/valgrind.yaml b/qa/suites/rgw/verify/validater/valgrind.yaml new file mode 100644 index 00000000000..7b8f7a28629 --- /dev/null +++ b/qa/suites/rgw/verify/validater/valgrind.yaml @@ -0,0 +1,9 @@ +overrides: + install: + ceph: + flavor: notcmalloc + ceph: + valgrind: + mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + osd: [--tool=memcheck] + mds: [--tool=memcheck] diff --git a/qa/suites/samba/% b/qa/suites/samba/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/samba/clusters/samba-basic.yaml b/qa/suites/samba/clusters/samba-basic.yaml new file mode 100644 index 00000000000..caced4a26d1 --- /dev/null +++ b/qa/suites/samba/clusters/samba-basic.yaml @@ -0,0 +1,3 @@ +roles: +- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1] +- [samba.0, client.0, client.1] diff --git a/qa/suites/samba/debug/mds_client.yaml b/qa/suites/samba/debug/mds_client.yaml new file mode 120000 index 00000000000..2550b024ded --- /dev/null +++ b/qa/suites/samba/debug/mds_client.yaml @@ -0,0 +1 @@ +../../../debug/mds_client.yaml \ No newline at end of file diff --git a/qa/suites/samba/fs/btrfs.yaml b/qa/suites/samba/fs/btrfs.yaml new file mode 120000 index 00000000000..ea693ab0b42 --- /dev/null +++ b/qa/suites/samba/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/samba/install/install.yaml b/qa/suites/samba/install/install.yaml new file mode 100644 index 00000000000..c53f9c55b17 --- /dev/null +++ b/qa/suites/samba/install/install.yaml @@ -0,0 +1,9 @@ +# we currently can't install Samba on RHEL; need a gitbuilder and code updates +os_type: ubuntu + +tasks: +- install: +- install: + project: samba + extra_packages: ['samba'] +- ceph: diff --git a/qa/suites/samba/mount/fuse.yaml b/qa/suites/samba/mount/fuse.yaml new file mode 100644 index 00000000000..d00ffdb4804 --- /dev/null +++ b/qa/suites/samba/mount/fuse.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: [client.0] +- samba: + samba.0: + ceph: "{testdir}/mnt.0" + diff --git a/qa/suites/samba/mount/kclient.yaml b/qa/suites/samba/mount/kclient.yaml new file mode 100644 index 00000000000..14fee85d266 --- /dev/null +++ b/qa/suites/samba/mount/kclient.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- kclient: [client.0] +- samba: + samba.0: + ceph: "{testdir}/mnt.0" + diff --git a/qa/suites/samba/mount/native.yaml b/qa/suites/samba/mount/native.yaml new file mode 100644 index 00000000000..09b8c1c4e3d --- /dev/null +++ b/qa/suites/samba/mount/native.yaml @@ -0,0 +1,2 @@ +tasks: +- samba: diff --git a/qa/suites/samba/mount/noceph.yaml b/qa/suites/samba/mount/noceph.yaml new file mode 100644 index 00000000000..3cad4740d8b --- /dev/null +++ b/qa/suites/samba/mount/noceph.yaml @@ -0,0 +1,5 @@ +tasks: +- localdir: [client.0] +- samba: + samba.0: + ceph: "{testdir}/mnt.0" diff --git a/qa/suites/samba/workload/cifs-dbench.yaml b/qa/suites/samba/workload/cifs-dbench.yaml new file mode 100644 index 00000000000..c13c1c099e5 --- /dev/null +++ b/qa/suites/samba/workload/cifs-dbench.yaml @@ -0,0 +1,8 @@ +tasks: +- cifs-mount: + client.1: + share: ceph +- workunit: + clients: + client.1: + - suites/dbench.sh diff --git a/qa/suites/samba/workload/cifs-fsstress.yaml b/qa/suites/samba/workload/cifs-fsstress.yaml new file mode 100644 index 00000000000..ff003af3433 --- /dev/null +++ b/qa/suites/samba/workload/cifs-fsstress.yaml @@ -0,0 +1,8 @@ +tasks: +- cifs-mount: + client.1: + share: ceph +- workunit: + clients: + client.1: + - suites/fsstress.sh diff --git a/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled b/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled new file mode 100644 index 00000000000..ab9ff8ac731 --- /dev/null +++ b/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled @@ -0,0 +1,9 @@ +tasks: +- cifs-mount: + client.1: + share: ceph +- workunit: + clients: + client.1: + - kernel_untar_build.sh + diff --git a/qa/suites/samba/workload/smbtorture.yaml b/qa/suites/samba/workload/smbtorture.yaml new file mode 100644 index 00000000000..823489a2082 --- /dev/null +++ b/qa/suites/samba/workload/smbtorture.yaml @@ -0,0 +1,39 @@ +tasks: +- pexec: + client.1: + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb +# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon +# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl +# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid diff --git a/qa/suites/smoke/basic/% b/qa/suites/smoke/basic/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/smoke/basic/clusters/fixed-3.yaml b/qa/suites/smoke/basic/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/smoke/basic/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/smoke/basic/fs/btrfs.yaml b/qa/suites/smoke/basic/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/smoke/basic/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml new file mode 100644 index 00000000000..ed9d92d5bda --- /dev/null +++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..b58487c0785 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml new file mode 100644 index 00000000000..dc6df2f709f --- /dev/null +++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: [client.0] +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000000..347c7fdf04c --- /dev/null +++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml new file mode 100644 index 00000000000..21820071dbc --- /dev/null +++ b/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - direct_io diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml new file mode 100644 index 00000000000..cda94a38f6d --- /dev/null +++ b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..64bfc5f3811 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml new file mode 100644 index 00000000000..272610b2915 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml b/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml new file mode 100644 index 00000000000..22d1f142161 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - libcephfs/test.sh diff --git a/qa/suites/smoke/basic/tasks/rados_python.yaml b/qa/suites/smoke/basic/tasks/rados_python.yaml new file mode 100644 index 00000000000..b9ac20e57f0 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rados_python.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - rados/test_python.sh diff --git a/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml b/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml new file mode 100644 index 00000000000..716deac2156 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - rados/load-gen-mix.sh diff --git a/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml b/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml new file mode 100644 index 00000000000..46e43b98ab4 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/smoke/basic/tasks/rbd_api_tests_old_format.yaml b/qa/suites/smoke/basic/tasks/rbd_api_tests_old_format.yaml new file mode 100644 index 00000000000..390b9c034f1 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_api_tests_old_format.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh diff --git a/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml b/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml new file mode 100644 index 00000000000..df23dc58f33 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/smoke/basic/tasks/rbd_cli_import_export_old_format.yaml b/qa/suites/smoke/basic/tasks/rbd_cli_import_export_old_format.yaml new file mode 100644 index 00000000000..c870ad42153 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_cli_import_export_old_format.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - rbd/import_export.sh diff --git a/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml b/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml new file mode 100644 index 00000000000..9714a6e40ee --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/smoke/basic/tasks/rbd_python_api_tests_old_format.yaml b/qa/suites/smoke/basic/tasks/rbd_python_api_tests_old_format.yaml new file mode 100644 index 00000000000..642175f7766 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_python_api_tests_old_format.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh diff --git a/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml b/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml new file mode 100644 index 00000000000..461a59a1df0 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: +- ceph: +- rbd: + all: + image_size: 20480 +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/smoke/multiclient/% b/qa/suites/smoke/multiclient/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/smoke/multiclient/clusters/two_clients.yaml b/qa/suites/smoke/multiclient/clusters/two_clients.yaml new file mode 100644 index 00000000000..d062b8ce040 --- /dev/null +++ b/qa/suites/smoke/multiclient/clusters/two_clients.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1] +- [client.1] +- [client.0] diff --git a/qa/suites/smoke/multiclient/fs/btrfs.yaml b/qa/suites/smoke/multiclient/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/smoke/multiclient/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/smoke/multiclient/tasks/locktest.yaml b/qa/suites/smoke/multiclient/tasks/locktest.yaml new file mode 100644 index 00000000000..444bb1f19b3 --- /dev/null +++ b/qa/suites/smoke/multiclient/tasks/locktest.yaml @@ -0,0 +1,5 @@ +tasks: +- install: +- ceph: +- kclient: +- locktest: [client.0, client.1] diff --git a/qa/suites/smoke/multifs/% b/qa/suites/smoke/multifs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/smoke/multifs/clusters/fixed-3.yaml b/qa/suites/smoke/multifs/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/smoke/multifs/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/smoke/multifs/fs/btrfs.yaml b/qa/suites/smoke/multifs/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/smoke/multifs/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/smoke/multifs/tasks/rgw_s3tests.yaml b/qa/suites/smoke/multifs/tasks/rgw_s3tests.yaml new file mode 100644 index 00000000000..62608773a2a --- /dev/null +++ b/qa/suites/smoke/multifs/tasks/rgw_s3tests.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- s3tests: + client.0: + rgw_server: client.0 diff --git a/qa/suites/smoke/multimon/% b/qa/suites/smoke/multimon/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/smoke/multimon/clusters/6.yaml b/qa/suites/smoke/multimon/clusters/6.yaml new file mode 100644 index 00000000000..662fc92b8de --- /dev/null +++ b/qa/suites/smoke/multimon/clusters/6.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.d, osd.0] +- [mon.b, mon.e, mds.a] +- [mon.c, mon.f, osd.1] diff --git a/qa/suites/smoke/multimon/tasks/mon_recovery.yaml b/qa/suites/smoke/multimon/tasks/mon_recovery.yaml new file mode 100644 index 00000000000..94721ea53a4 --- /dev/null +++ b/qa/suites/smoke/multimon/tasks/mon_recovery.yaml @@ -0,0 +1,4 @@ +tasks: +- install: +- ceph: +- mon_recovery: diff --git a/qa/suites/smoke/singleton/% b/qa/suites/smoke/singleton/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/smoke/singleton/all/filestore-idempotent.yaml b/qa/suites/smoke/singleton/all/filestore-idempotent.yaml new file mode 100644 index 00000000000..c6af200d57f --- /dev/null +++ b/qa/suites/smoke/singleton/all/filestore-idempotent.yaml @@ -0,0 +1,6 @@ +roles: +- [mon.0, osd.0, osd.1, mds.a, client.0] +tasks: +- install: +- ceph: +- filestore_idempotent: diff --git a/qa/suites/smoke/singleton/all/osd-backfill.yaml b/qa/suites/smoke/singleton/all/osd-backfill.yaml new file mode 100644 index 00000000000..77a79440f55 --- /dev/null +++ b/qa/suites/smoke/singleton/all/osd-backfill.yaml @@ -0,0 +1,17 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + conf: + osd: + osd min pg log entries: 5 +- osd_backfill: diff --git a/qa/suites/smoke/singleton/all/thrash-rados.yaml b/qa/suites/smoke/singleton/all/thrash-rados.yaml new file mode 100644 index 00000000000..157f0f71cc8 --- /dev/null +++ b/qa/suites/smoke/singleton/all/thrash-rados.yaml @@ -0,0 +1,23 @@ +roles: +- - mon.a + - mds.0 + - osd.0 +- - osd.1 +- - osd.2 +- - osd.3 +- - osd.4 +- - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down +- thrashosds: + op_delay: 30 + clean_interval: 120 + chance_down: .5 +- ceph-fuse: +- workunit: + clients: + all: + - rados/load-gen-mix-small.sh diff --git a/qa/suites/smoke/singleton/fs/btrfs.yaml b/qa/suites/smoke/singleton/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/smoke/singleton/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/smoke/thrash/% b/qa/suites/smoke/thrash/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/smoke/thrash/clusters/6-osd-3-machine.yaml b/qa/suites/smoke/thrash/clusters/6-osd-3-machine.yaml new file mode 100644 index 00000000000..f6247ebf2f3 --- /dev/null +++ b/qa/suites/smoke/thrash/clusters/6-osd-3-machine.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, osd.0, osd.1, osd.2] +- [mds.a, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/smoke/thrash/fs/xfs.yaml b/qa/suites/smoke/thrash/fs/xfs.yaml new file mode 120000 index 00000000000..4c28d731f6b --- /dev/null +++ b/qa/suites/smoke/thrash/fs/xfs.yaml @@ -0,0 +1 @@ +../../../../fs/xfs.yaml \ No newline at end of file diff --git a/qa/suites/smoke/thrash/thrashers/default.yaml b/qa/suites/smoke/thrash/thrashers/default.yaml new file mode 100644 index 00000000000..14d772583cf --- /dev/null +++ b/qa/suites/smoke/thrash/thrashers/default.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +- thrashosds: diff --git a/qa/suites/smoke/thrash/workloads/admin_socket_objecter_requests.yaml b/qa/suites/smoke/thrash/workloads/admin_socket_objecter_requests.yaml new file mode 100644 index 00000000000..66791551fb0 --- /dev/null +++ b/qa/suites/smoke/thrash/workloads/admin_socket_objecter_requests.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + client.0: + admin socket: /var/run/ceph/ceph-$name.asok +tasks: +- radosbench: + clients: [client.0] + time: 60 +- admin_socket: + client.0: + objecter_requests: + test: "http://ceph.newdream.net/git/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}" diff --git a/qa/suites/smoke/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled b/qa/suites/smoke/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled new file mode 100644 index 00000000000..d61ede1bd66 --- /dev/null +++ b/qa/suites/smoke/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled @@ -0,0 +1,8 @@ +tasks: +- rbd: + all: + image_size: 20480 +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/smoke/thrash/workloads/snaps-few-objects.yaml b/qa/suites/smoke/thrash/workloads/snaps-few-objects.yaml new file mode 100644 index 00000000000..aa82d973ae1 --- /dev/null +++ b/qa/suites/smoke/thrash/workloads/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/smoke/verify/% b/qa/suites/smoke/verify/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/smoke/verify/clusters/fixed-3.yaml b/qa/suites/smoke/verify/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/smoke/verify/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/smoke/verify/fs/btrfs.yaml b/qa/suites/smoke/verify/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/smoke/verify/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/smoke/verify/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/smoke/verify/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000000..b58487c0785 --- /dev/null +++ b/qa/suites/smoke/verify/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/smoke/verify/tasks/libcephfs_interface_tests.yaml b/qa/suites/smoke/verify/tasks/libcephfs_interface_tests.yaml new file mode 100644 index 00000000000..22d1f142161 --- /dev/null +++ b/qa/suites/smoke/verify/tasks/libcephfs_interface_tests.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - libcephfs/test.sh diff --git a/qa/suites/smoke/verify/tasks/mon_recovery.yaml b/qa/suites/smoke/verify/tasks/mon_recovery.yaml new file mode 100644 index 00000000000..94721ea53a4 --- /dev/null +++ b/qa/suites/smoke/verify/tasks/mon_recovery.yaml @@ -0,0 +1,4 @@ +tasks: +- install: +- ceph: +- mon_recovery: diff --git a/qa/suites/smoke/verify/tasks/rados_api_tests.yaml b/qa/suites/smoke/verify/tasks/rados_api_tests.yaml new file mode 100644 index 00000000000..c154219bc1b --- /dev/null +++ b/qa/suites/smoke/verify/tasks/rados_api_tests.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/smoke/verify/tasks/rados_cls_all.yaml b/qa/suites/smoke/verify/tasks/rados_cls_all.yaml new file mode 100644 index 00000000000..80be56276db --- /dev/null +++ b/qa/suites/smoke/verify/tasks/rados_cls_all.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - cls diff --git a/qa/suites/smoke/verify/tasks/rgw_s3tests.yaml b/qa/suites/smoke/verify/tasks/rgw_s3tests.yaml new file mode 100644 index 00000000000..7e5b409f390 --- /dev/null +++ b/qa/suites/smoke/verify/tasks/rgw_s3tests.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- rgw: + client.0: + valgrind: [--tool=memcheck] +- s3tests: + default_idle_timeout: 300 + client.0: + rgw_server: client.0 diff --git a/qa/suites/smoke/verify/validater/lockdep.yaml b/qa/suites/smoke/verify/validater/lockdep.yaml new file mode 100644 index 00000000000..25f84355c0b --- /dev/null +++ b/qa/suites/smoke/verify/validater/lockdep.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + lockdep: true diff --git a/qa/suites/smoke/verify/validater/valgrind.yaml b/qa/suites/smoke/verify/validater/valgrind.yaml new file mode 100644 index 00000000000..518d72b0ffe --- /dev/null +++ b/qa/suites/smoke/verify/validater/valgrind.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + valgrind: + mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + osd: [--tool=memcheck] + mds: [--tool=memcheck] diff --git a/qa/suites/stress/bench/% b/qa/suites/stress/bench/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/stress/bench/clusters/fixed-3.yaml b/qa/suites/stress/bench/clusters/fixed-3.yaml new file mode 120000 index 00000000000..a3ac9fc4dec --- /dev/null +++ b/qa/suites/stress/bench/clusters/fixed-3.yaml @@ -0,0 +1 @@ +../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml b/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml new file mode 100644 index 00000000000..eafec39e3d0 --- /dev/null +++ b/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - snaps diff --git a/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml b/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml new file mode 100644 index 00000000000..a0d2e765bdb --- /dev/null +++ b/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/stress/thrash/% b/qa/suites/stress/thrash/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/stress/thrash/clusters/16-osd.yaml b/qa/suites/stress/thrash/clusters/16-osd.yaml new file mode 100644 index 00000000000..373dd4052c3 --- /dev/null +++ b/qa/suites/stress/thrash/clusters/16-osd.yaml @@ -0,0 +1,18 @@ +roles: +- [mon.0, mds.a, osd.0] +- [mon.1, osd.1] +- [mon.2, osd.2] +- [osd.3] +- [osd.4] +- [osd.5] +- [osd.6] +- [osd.7] +- [osd.8] +- [osd.9] +- [osd.10] +- [osd.11] +- [osd.12] +- [osd.13] +- [osd.14] +- [osd.15] +- [client.0] diff --git a/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml b/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml new file mode 100644 index 00000000000..d8ff594b95d --- /dev/null +++ b/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml @@ -0,0 +1,3 @@ +roles: +- [mon.0, mds.a, osd.0, osd.1, osd.2] +- [mon.1, mon.2, client.0] diff --git a/qa/suites/stress/thrash/clusters/8-osd.yaml b/qa/suites/stress/thrash/clusters/8-osd.yaml new file mode 100644 index 00000000000..3b131054e95 --- /dev/null +++ b/qa/suites/stress/thrash/clusters/8-osd.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.0, mds.a, osd.0] +- [mon.1, osd.1] +- [mon.2, osd.2] +- [osd.3] +- [osd.4] +- [osd.5] +- [osd.6] +- [osd.7] +- [client.0] diff --git a/qa/suites/stress/thrash/fs/btrfs.yaml b/qa/suites/stress/thrash/fs/btrfs.yaml new file mode 120000 index 00000000000..10d0c3f1266 --- /dev/null +++ b/qa/suites/stress/thrash/fs/btrfs.yaml @@ -0,0 +1 @@ +../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/qa/suites/stress/thrash/fs/none.yaml b/qa/suites/stress/thrash/fs/none.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/stress/thrash/fs/xfs.yaml b/qa/suites/stress/thrash/fs/xfs.yaml new file mode 120000 index 00000000000..4c28d731f6b --- /dev/null +++ b/qa/suites/stress/thrash/fs/xfs.yaml @@ -0,0 +1 @@ +../../../../fs/xfs.yaml \ No newline at end of file diff --git a/qa/suites/stress/thrash/thrashers/default.yaml b/qa/suites/stress/thrash/thrashers/default.yaml new file mode 100644 index 00000000000..14d772583cf --- /dev/null +++ b/qa/suites/stress/thrash/thrashers/default.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +- thrashosds: diff --git a/qa/suites/stress/thrash/thrashers/fast.yaml b/qa/suites/stress/thrash/thrashers/fast.yaml new file mode 100644 index 00000000000..eea9c06cd90 --- /dev/null +++ b/qa/suites/stress/thrash/thrashers/fast.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +- thrashosds: + op_delay: 1 + chance_down: 10 diff --git a/qa/suites/stress/thrash/thrashers/more-down.yaml b/qa/suites/stress/thrash/thrashers/more-down.yaml new file mode 100644 index 00000000000..e39098b1cb6 --- /dev/null +++ b/qa/suites/stress/thrash/thrashers/more-down.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +- thrashosds: + chance_down: 50 diff --git a/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml b/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml new file mode 100644 index 00000000000..912f12d6ce7 --- /dev/null +++ b/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/bonnie.sh diff --git a/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml b/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml new file mode 100644 index 00000000000..18a6051be39 --- /dev/null +++ b/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/stress/thrash/workloads/radosbench.yaml b/qa/suites/stress/thrash/workloads/radosbench.yaml new file mode 100644 index 00000000000..3940870fce0 --- /dev/null +++ b/qa/suites/stress/thrash/workloads/radosbench.yaml @@ -0,0 +1,4 @@ +tasks: +- radosbench: + clients: [client.0] + time: 1800 diff --git a/qa/suites/stress/thrash/workloads/readwrite.yaml b/qa/suites/stress/thrash/workloads/readwrite.yaml new file mode 100644 index 00000000000..c53e52b0872 --- /dev/null +++ b/qa/suites/stress/thrash/workloads/readwrite.yaml @@ -0,0 +1,9 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 45 + write: 45 + delete: 10 diff --git a/qa/suites/tgt/basic/% b/qa/suites/tgt/basic/% new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/qa/suites/tgt/basic/% @@ -0,0 +1 @@ + diff --git a/qa/suites/tgt/basic/clusters/fixed-3.yaml b/qa/suites/tgt/basic/clusters/fixed-3.yaml new file mode 100644 index 00000000000..0038432afa7 --- /dev/null +++ b/qa/suites/tgt/basic/clusters/fixed-3.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mds.a, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/tgt/basic/fs/btrfs.yaml b/qa/suites/tgt/basic/fs/btrfs.yaml new file mode 100644 index 00000000000..4c7af311538 --- /dev/null +++ b/qa/suites/tgt/basic/fs/btrfs.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + fs: btrfs + conf: + osd: + osd op thread timeout: 60 diff --git a/qa/suites/tgt/basic/msgr-failures/few.yaml b/qa/suites/tgt/basic/msgr-failures/few.yaml new file mode 100644 index 00000000000..0de320d46b8 --- /dev/null +++ b/qa/suites/tgt/basic/msgr-failures/few.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 diff --git a/qa/suites/tgt/basic/msgr-failures/many.yaml b/qa/suites/tgt/basic/msgr-failures/many.yaml new file mode 100644 index 00000000000..86f8dde8a0e --- /dev/null +++ b/qa/suites/tgt/basic/msgr-failures/many.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 diff --git a/qa/suites/tgt/basic/tasks/blogbench.yaml b/qa/suites/tgt/basic/tasks/blogbench.yaml new file mode 100644 index 00000000000..f77a78b6bc0 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/blogbench.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/tgt/basic/tasks/bonnie.yaml b/qa/suites/tgt/basic/tasks/bonnie.yaml new file mode 100644 index 00000000000..2cbfcf8872e --- /dev/null +++ b/qa/suites/tgt/basic/tasks/bonnie.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/bonnie.sh diff --git a/qa/suites/tgt/basic/tasks/dbench-short.yaml b/qa/suites/tgt/basic/tasks/dbench-short.yaml new file mode 100644 index 00000000000..fcb721a4d14 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/dbench-short.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/dbench-short.sh diff --git a/qa/suites/tgt/basic/tasks/dbench.yaml b/qa/suites/tgt/basic/tasks/dbench.yaml new file mode 100644 index 00000000000..7f732175faa --- /dev/null +++ b/qa/suites/tgt/basic/tasks/dbench.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/tgt/basic/tasks/ffsb.yaml b/qa/suites/tgt/basic/tasks/ffsb.yaml new file mode 100644 index 00000000000..f50a3a19647 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/ffsb.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/tgt/basic/tasks/fio.yaml b/qa/suites/tgt/basic/tasks/fio.yaml new file mode 100644 index 00000000000..e7346ce528e --- /dev/null +++ b/qa/suites/tgt/basic/tasks/fio.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/fio.sh diff --git a/qa/suites/tgt/basic/tasks/fsstress.yaml b/qa/suites/tgt/basic/tasks/fsstress.yaml new file mode 100644 index 00000000000..c77f511c0f6 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/fsstress.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/tgt/basic/tasks/fsx.yaml b/qa/suites/tgt/basic/tasks/fsx.yaml new file mode 100644 index 00000000000..04732c84009 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/fsx.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/tgt/basic/tasks/fsync-tester.yaml b/qa/suites/tgt/basic/tasks/fsync-tester.yaml new file mode 100644 index 00000000000..ea627b7d184 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/fsync-tester.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/tgt/basic/tasks/iogen.yaml b/qa/suites/tgt/basic/tasks/iogen.yaml new file mode 100644 index 00000000000..1065c74daba --- /dev/null +++ b/qa/suites/tgt/basic/tasks/iogen.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/iogen.sh diff --git a/qa/suites/tgt/basic/tasks/iozone-sync.yaml b/qa/suites/tgt/basic/tasks/iozone-sync.yaml new file mode 100644 index 00000000000..ac241a417e8 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/iozone-sync.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/iozone-sync.sh diff --git a/qa/suites/tgt/basic/tasks/iozone.yaml b/qa/suites/tgt/basic/tasks/iozone.yaml new file mode 100644 index 00000000000..cf5604c21a7 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/iozone.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/tgt/basic/tasks/pjd.yaml b/qa/suites/tgt/basic/tasks/pjd.yaml new file mode 100644 index 00000000000..ba5c631f157 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/pjd.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/upgrade/cuttlefish/fs/% b/qa/suites/upgrade/cuttlefish/fs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/cuttlefish/fs/0-cluster/start.yaml b/qa/suites/upgrade/cuttlefish/fs/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/fs/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/bobtail.v0.61.5.yaml b/qa/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/bobtail.v0.61.5.yaml new file mode 100644 index 00000000000..286cdd66624 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/bobtail.v0.61.5.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: bobtail +- ceph: +- install.upgrade: + all: + tag: v0.61.5 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.5.yaml b/qa/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.5.yaml new file mode 100644 index 00000000000..07d04317ec1 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.61.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.6.yaml b/qa/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.6.yaml new file mode 100644 index 00000000000..9d74ab38f00 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.6.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.61.6 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/fs/2-workload/blogbench.yaml b/qa/suites/upgrade/cuttlefish/fs/2-workload/blogbench.yaml new file mode 100644 index 00000000000..0cd59eaafde --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/fs/2-workload/blogbench.yaml @@ -0,0 +1,5 @@ +workload: + workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..c97aef77e13 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..9d06ef37d05 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..dd76b10b5ce --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/qa/suites/upgrade/cuttlefish/fs/4-final/monthrash.yaml b/qa/suites/upgrade/cuttlefish/fs/4-final/monthrash.yaml new file mode 100644 index 00000000000..13af446eb3b --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/fs/4-final/monthrash.yaml @@ -0,0 +1,10 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- ceph-fuse: +- workunit: + clients: + client.0: + - suites/dbench.sh + diff --git a/qa/suites/upgrade/cuttlefish/fs/4-final/osdthrash.yaml b/qa/suites/upgrade/cuttlefish/fs/4-final/osdthrash.yaml new file mode 100644 index 00000000000..dbd7191e36c --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/fs/4-final/osdthrash.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- ceph-fuse: +- workunit: + clients: + all: + - suites/iogen.sh + diff --git a/qa/suites/upgrade/cuttlefish/rados-older/% b/qa/suites/upgrade/cuttlefish/rados-older/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/cuttlefish/rados-older/0-cluster/start.yaml b/qa/suites/upgrade/cuttlefish/rados-older/0-cluster/start.yaml new file mode 100644 index 00000000000..8626abc26c4 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/0-cluster/start.yaml @@ -0,0 +1,20 @@ +overrides: + ceph: + log-whitelist: + - scrub + conf: + paxos service trim min: 5 + mon min osdmap epochs: 25 + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/cuttlefish/rados-older/1-install/bobtail.yaml b/qa/suites/upgrade/cuttlefish/rados-older/1-install/bobtail.yaml new file mode 100644 index 00000000000..21dc2bb2b27 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/1-install/bobtail.yaml @@ -0,0 +1,4 @@ +tasks: +- install: + branch: bobtail +- ceph: diff --git a/qa/suites/upgrade/cuttlefish/rados-older/1-install/v0.61.1.yaml b/qa/suites/upgrade/cuttlefish/rados-older/1-install/v0.61.1.yaml new file mode 100644 index 00000000000..c77ab27ba83 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/1-install/v0.61.1.yaml @@ -0,0 +1,4 @@ +tasks: +- install: + tag: v0.61.1 +- ceph: diff --git a/qa/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.3.yaml b/qa/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.3.yaml new file mode 100644 index 00000000000..aca276b8d39 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.3.yaml @@ -0,0 +1,5 @@ +tasks: +- install.upgrade: + all: + tag: v0.61.3 +- ceph.restart: diff --git a/qa/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.4.yaml b/qa/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.4.yaml new file mode 100644 index 00000000000..c44994e5eaf --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.4.yaml @@ -0,0 +1,5 @@ +tasks: +- install.upgrade: + all: + tag: v0.61.4 +- ceph.restart: diff --git a/qa/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.5.yaml b/qa/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.5.yaml new file mode 100644 index 00000000000..d46a1a1527e --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.5.yaml @@ -0,0 +1,5 @@ +tasks: +- install.upgrade: + all: + tag: v0.61.5 +- ceph.restart: diff --git a/qa/suites/upgrade/cuttlefish/rados-older/3-rolling-upgrade/all.yaml b/qa/suites/upgrade/cuttlefish/rados-older/3-rolling-upgrade/all.yaml new file mode 100644 index 00000000000..fe892358df8 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/3-rolling-upgrade/all.yaml @@ -0,0 +1,4 @@ +tasks: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..e70e5d0cf1b --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,39 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..ed25b701597 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,39 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..33368cd8df7 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,41 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 diff --git a/qa/suites/upgrade/cuttlefish/rados-older/5-workload/testrados.yaml b/qa/suites/upgrade/cuttlefish/rados-older/5-workload/testrados.yaml new file mode 100644 index 00000000000..49339ecd044 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/5-workload/testrados.yaml @@ -0,0 +1,12 @@ +workload: + rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/cuttlefish/rados-older/6-final/monthrash.yaml b/qa/suites/upgrade/cuttlefish/rados-older/6-final/monthrash.yaml new file mode 100644 index 00000000000..810ba1b30e2 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/6-final/monthrash.yaml @@ -0,0 +1,9 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + clients: + client.0: + - rados/test.sh + diff --git a/qa/suites/upgrade/cuttlefish/rados-older/6-final/osdthrash.yaml b/qa/suites/upgrade/cuttlefish/rados-older/6-final/osdthrash.yaml new file mode 100644 index 00000000000..f81504233ad --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados-older/6-final/osdthrash.yaml @@ -0,0 +1,23 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + diff --git a/qa/suites/upgrade/cuttlefish/rados/% b/qa/suites/upgrade/cuttlefish/rados/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/cuttlefish/rados/0-cluster/start.yaml b/qa/suites/upgrade/cuttlefish/rados/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/bobtail.v0.61.5.yaml b/qa/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/bobtail.v0.61.5.yaml new file mode 100644 index 00000000000..286cdd66624 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/bobtail.v0.61.5.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: bobtail +- ceph: +- install.upgrade: + all: + tag: v0.61.5 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.5.yaml b/qa/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.5.yaml new file mode 100644 index 00000000000..07d04317ec1 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.61.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.6.yaml b/qa/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.6.yaml new file mode 100644 index 00000000000..9d74ab38f00 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.6.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.61.6 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/rados/2-workload/testrados.yaml b/qa/suites/upgrade/cuttlefish/rados/2-workload/testrados.yaml new file mode 100644 index 00000000000..8eaab19fd9e --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados/2-workload/testrados.yaml @@ -0,0 +1,13 @@ +workload: + rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + diff --git a/qa/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..c97aef77e13 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..9d06ef37d05 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..c061399adac --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,35 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 diff --git a/qa/suites/upgrade/cuttlefish/rados/4-final/monthrash.yaml b/qa/suites/upgrade/cuttlefish/rados/4-final/monthrash.yaml new file mode 100644 index 00000000000..810ba1b30e2 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados/4-final/monthrash.yaml @@ -0,0 +1,9 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + clients: + client.0: + - rados/test.sh + diff --git a/qa/suites/upgrade/cuttlefish/rados/4-final/osdthrash.yaml b/qa/suites/upgrade/cuttlefish/rados/4-final/osdthrash.yaml new file mode 100644 index 00000000000..f81504233ad --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rados/4-final/osdthrash.yaml @@ -0,0 +1,23 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + diff --git a/qa/suites/upgrade/cuttlefish/rbd/% b/qa/suites/upgrade/cuttlefish/rbd/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/cuttlefish/rbd/0-cluster/start.yaml b/qa/suites/upgrade/cuttlefish/rbd/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rbd/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/bobtail.v0.61.5.yaml b/qa/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/bobtail.v0.61.5.yaml new file mode 100644 index 00000000000..286cdd66624 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/bobtail.v0.61.5.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: bobtail +- ceph: +- install.upgrade: + all: + tag: v0.61.5 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.5.yaml b/qa/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.5.yaml new file mode 100644 index 00000000000..07d04317ec1 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.61.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.6.yaml b/qa/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.6.yaml new file mode 100644 index 00000000000..9d74ab38f00 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.6.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.61.6 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/rbd/2-workload/rbd.yaml b/qa/suites/upgrade/cuttlefish/rbd/2-workload/rbd.yaml new file mode 100644 index 00000000000..ce2fabe0359 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rbd/2-workload/rbd.yaml @@ -0,0 +1,14 @@ +workload: + sequential: + - workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - workunit: + clients: + client.0: + - cls/test_cls_rbd.sh + + diff --git a/qa/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..c97aef77e13 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..9d06ef37d05 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..dd76b10b5ce --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/qa/suites/upgrade/cuttlefish/rbd/4-final/monthrash.yaml b/qa/suites/upgrade/cuttlefish/rbd/4-final/monthrash.yaml new file mode 100644 index 00000000000..593191c24f5 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rbd/4-final/monthrash.yaml @@ -0,0 +1,11 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + clients: + client.0: + - rbd/copy.sh + env: + RBD_CREATE_ARGS: --new-format + diff --git a/qa/suites/upgrade/cuttlefish/rbd/4-final/osdthrash.yaml b/qa/suites/upgrade/cuttlefish/rbd/4-final/osdthrash.yaml new file mode 100644 index 00000000000..575fd7922ad --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rbd/4-final/osdthrash.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- workunit: + clients: + client.0: + - rbd/test_lock_fence.sh + diff --git a/qa/suites/upgrade/cuttlefish/rgw/% b/qa/suites/upgrade/cuttlefish/rgw/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/cuttlefish/rgw/0-cluster/start.yaml b/qa/suites/upgrade/cuttlefish/rgw/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rgw/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/bobtail.v0.61.5.yaml b/qa/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/bobtail.v0.61.5.yaml new file mode 100644 index 00000000000..286cdd66624 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/bobtail.v0.61.5.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: bobtail +- ceph: +- install.upgrade: + all: + tag: v0.61.5 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.5.yaml b/qa/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.5.yaml new file mode 100644 index 00000000000..07d04317ec1 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.61.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.6.yaml b/qa/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.6.yaml new file mode 100644 index 00000000000..9d74ab38f00 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.6.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.61.6 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/cuttlefish/rgw/2-workload/testrgw.yaml b/qa/suites/upgrade/cuttlefish/rgw/2-workload/testrgw.yaml new file mode 100644 index 00000000000..f1b2f3e88ef --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rgw/2-workload/testrgw.yaml @@ -0,0 +1,6 @@ +workload: + rgw: [client.0] + s3tests: + client.0: + rgw_server: client.0 + diff --git a/qa/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..0de4f185fa2 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - ceph.restart: [rgw.client.0] diff --git a/qa/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..c7fa40f1f50 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - ceph.restart: [rgw.client.0] diff --git a/qa/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..0d0639ed74a --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: cuttlefish + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [rgw.client.0] diff --git a/qa/suites/upgrade/cuttlefish/rgw/4-final/monthrash.yaml b/qa/suites/upgrade/cuttlefish/rgw/4-final/monthrash.yaml new file mode 100644 index 00000000000..9361edc8015 --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rgw/4-final/monthrash.yaml @@ -0,0 +1,8 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- swift: + client.0: + rgw_server: client.0 + diff --git a/qa/suites/upgrade/cuttlefish/rgw/4-final/osdthrash.yaml b/qa/suites/upgrade/cuttlefish/rgw/4-final/osdthrash.yaml new file mode 100644 index 00000000000..6cf6d861d5f --- /dev/null +++ b/qa/suites/upgrade/cuttlefish/rgw/4-final/osdthrash.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- swift: + client.0: + rgw_server: client.0 + diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/% b/qa/suites/upgrade/dumpling-emperor-x/parallel/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/0-cluster/start.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/0-cluster/start.yaml new file mode 100644 index 00000000000..e3d7f85f9ff --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/0-cluster/start.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + conf: + mon: + mon warn on legacy crush tunables: false + log-whitelist: + - scrub mismatch + - ScrubResult +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 + - client.1 diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..92df8cebc5f --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/1-dumpling-install/dumpling.yaml @@ -0,0 +1,8 @@ +tasks: +- install: + branch: dumpling +- ceph: + fs: xfs +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/+ b/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_api.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_api.yaml new file mode 100644 index 00000000000..96d656e4932 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_api.yaml @@ -0,0 +1,8 @@ +workload: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rados/test.sh + - cls diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_loadgenbig.yaml new file mode 100644 index 00000000000..16241b3bed6 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_loadgenbig.yaml @@ -0,0 +1,7 @@ +workload: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rados/load-gen-big.sh diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_api.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_api.yaml new file mode 100644 index 00000000000..7584f0e1ff0 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_api.yaml @@ -0,0 +1,7 @@ +workload: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd.sh diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_python.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_python.yaml new file mode 100644 index 00000000000..09c5326592b --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_python.yaml @@ -0,0 +1,7 @@ +workload: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd_python.sh diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/3-emperor-upgrade/emperor.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/3-emperor-upgrade/emperor.yaml new file mode 100644 index 00000000000..626bc161cbd --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/3-emperor-upgrade/emperor.yaml @@ -0,0 +1,10 @@ +tasks: + - install.upgrade: + mon.a: + branch: emperor + mon.b: + branch: emperor + - ceph.restart: + - parallel: + - workload2 + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/+ b/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_api.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_api.yaml new file mode 100644 index 00000000000..b6bb42048a4 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_api.yaml @@ -0,0 +1,8 @@ +workload2: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rados/test.sh + - cls diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_loadgenbig.yaml new file mode 100644 index 00000000000..fd5c31dc477 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_loadgenbig.yaml @@ -0,0 +1,7 @@ +workload2: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rados/load-gen-big.sh diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_api.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_api.yaml new file mode 100644 index 00000000000..8c8c97a4bf3 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_api.yaml @@ -0,0 +1,7 @@ +workload2: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd.sh diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_python.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_python.yaml new file mode 100644 index 00000000000..1edb13cf907 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_python.yaml @@ -0,0 +1,7 @@ +workload2: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd_python.sh diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 00000000000..da6028e47b2 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,8 @@ +upgrade-sequence: + sequential: + - install.upgrade: + mon.a: + branch: emperor + mon.b: + branch: emperor + - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..b9027db7eea --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,35 @@ +upgrade-sequence: + sequential: + - install.upgrade: + mon.a: + branch: emperor + mon.b: + branch: emperor + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 60 + - ceph.restart: [osd.1] + - sleep: + duration: 60 + - ceph.restart: [osd.2] + - sleep: + duration: 60 + - ceph.restart: [osd.3] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/+ b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados-snaps-few-objects.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados-snaps-few-objects.yaml new file mode 100644 index 00000000000..bf85020d8d9 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados-snaps-few-objects.yaml @@ -0,0 +1,12 @@ +tasks: +- rados: + clients: [client.1] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_loadgenmix.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_loadgenmix.yaml new file mode 100644 index 00000000000..0bddda0ab84 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_loadgenmix.yaml @@ -0,0 +1,6 @@ +tasks: + - workunit: + branch: dumpling + clients: + client.1: + - rados/load-gen-mix.sh diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_mon_thrash.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_mon_thrash.yaml new file mode 100644 index 00000000000..1a932e059f0 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_mon_thrash.yaml @@ -0,0 +1,9 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + branch: dumpling + clients: + client.1: + - rados/test.sh diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_cls.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_cls.yaml new file mode 100644 index 00000000000..9407ab48916 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_cls.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.1: + - cls/test_cls_rbd.sh + diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_import_export.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_import_export.yaml new file mode 100644 index 00000000000..185cd1ab32a --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_import_export.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.1: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_s3tests.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_s3tests.yaml new file mode 100644 index 00000000000..22c3a3f821a --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_s3tests.yaml @@ -0,0 +1,5 @@ +tasks: +- rgw: [client.1] +- s3tests: + client.1: + rgw_server: client.1 diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_swift.yaml b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_swift.yaml new file mode 100644 index 00000000000..0ab9febd2fc --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_swift.yaml @@ -0,0 +1,6 @@ +tasks: +# Uncomment the next line if you have not already included rgw_s3tests.yaml in your test. +# - rgw: [client.1] +- swift: + client.1: + rgw_server: client.1 diff --git a/qa/suites/upgrade/dumpling-emperor-x/parallel/distros b/qa/suites/upgrade/dumpling-emperor-x/parallel/distros new file mode 120000 index 00000000000..79010c36a59 --- /dev/null +++ b/qa/suites/upgrade/dumpling-emperor-x/parallel/distros @@ -0,0 +1 @@ +../../../../distros/supported \ No newline at end of file diff --git a/qa/suites/upgrade/dumpling-x/parallel/% b/qa/suites/upgrade/dumpling-x/parallel/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-x/parallel/0-cluster/start.yaml b/qa/suites/upgrade/dumpling-x/parallel/0-cluster/start.yaml new file mode 100644 index 00000000000..4a9420f3906 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/0-cluster/start.yaml @@ -0,0 +1,19 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 + - client.1 +overrides: + ceph: + conf: + mon: + mon warn on legacy crush tunables: false + log-whitelist: + - scrub mismatch + - ScrubResult diff --git a/qa/suites/upgrade/dumpling-x/parallel/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/dumpling-x/parallel/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..adbdedee518 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/1-dumpling-install/dumpling.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: dumpling +- print: "**** done install" +- ceph: + fs: xfs +- print: "**** done ceph" +- parallel: + - workload + - upgrade-sequence +- print: "**** done parallel" diff --git a/qa/suites/upgrade/dumpling-x/parallel/2-workload/+ b/qa/suites/upgrade/dumpling-x/parallel/2-workload/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-x/parallel/2-workload/rados_api.yaml b/qa/suites/upgrade/dumpling-x/parallel/2-workload/rados_api.yaml new file mode 100644 index 00000000000..cd820a8a711 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/2-workload/rados_api.yaml @@ -0,0 +1,9 @@ +workload: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rados/test-upgrade-firefly.sh + - cls + diff --git a/qa/suites/upgrade/dumpling-x/parallel/2-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/dumpling-x/parallel/2-workload/rados_loadgenbig.yaml new file mode 100644 index 00000000000..cc1ef874cb0 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/2-workload/rados_loadgenbig.yaml @@ -0,0 +1,7 @@ +workload: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rados/load-gen-big.sh diff --git a/qa/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_api.yaml b/qa/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_api.yaml new file mode 100644 index 00000000000..36ffa27ec3f --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_api.yaml @@ -0,0 +1,7 @@ +workload: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd.sh diff --git a/qa/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_python.yaml b/qa/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_python.yaml new file mode 100644 index 00000000000..e704a9794b9 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_python.yaml @@ -0,0 +1,7 @@ +workload: + sequential: + - workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd_python.sh diff --git a/qa/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 00000000000..f5d10cdfcab --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,6 @@ +upgrade-sequence: + sequential: + - install.upgrade: + mon.a: + mon.b: + - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..fcb61b1cef2 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + mon.a: + mon.b: + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 60 + - ceph.restart: [osd.1] + - sleep: + duration: 60 + - ceph.restart: [osd.2] + - sleep: + duration: 60 + - ceph.restart: [osd.3] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/qa/suites/upgrade/dumpling-x/parallel/4-final-upgrade/client.yaml b/qa/suites/upgrade/dumpling-x/parallel/4-final-upgrade/client.yaml new file mode 100644 index 00000000000..cf35d41e6c5 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/4-final-upgrade/client.yaml @@ -0,0 +1,4 @@ +tasks: + - install.upgrade: + client.0: + - print: "**** done install.upgrade" diff --git a/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/+ b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rados-snaps-few-objects.yaml b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rados-snaps-few-objects.yaml new file mode 100644 index 00000000000..40f66da37f2 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rados-snaps-few-objects.yaml @@ -0,0 +1,12 @@ +tasks: + - rados: + clients: [client.1] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_loadgenmix.yaml b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_loadgenmix.yaml new file mode 100644 index 00000000000..faa96ed24d5 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_loadgenmix.yaml @@ -0,0 +1,5 @@ +tasks: + - workunit: + clients: + client.1: + - rados/load-gen-mix.sh diff --git a/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_mon_thrash.yaml b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_mon_thrash.yaml new file mode 100644 index 00000000000..88019bef17a --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_mon_thrash.yaml @@ -0,0 +1,8 @@ +tasks: + - mon_thrash: + revive_delay: 20 + thrash_delay: 1 + - workunit: + clients: + client.1: + - rados/test.sh diff --git a/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_cls.yaml b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_cls.yaml new file mode 100644 index 00000000000..4ef47768237 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_cls.yaml @@ -0,0 +1,6 @@ +tasks: + - workunit: + clients: + client.1: + - cls/test_cls_rbd.sh + diff --git a/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_import_export.yaml b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_import_export.yaml new file mode 100644 index 00000000000..6c40377324d --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_import_export.yaml @@ -0,0 +1,7 @@ +tasks: + - workunit: + clients: + client.1: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_s3tests.yaml b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_s3tests.yaml new file mode 100644 index 00000000000..53ceb786ba0 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_s3tests.yaml @@ -0,0 +1,6 @@ +tasks: + - rgw: [client.1] + - s3tests: + client.1: + rgw_server: client.1 + branch: dumpling diff --git a/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_swift.yaml b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_swift.yaml new file mode 100644 index 00000000000..44085b469d2 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_swift.yaml @@ -0,0 +1,6 @@ +tasks: +# no need for rwg when we use + +# - rgw: [client.1] + - swift: + client.1: + rgw_server: client.1 diff --git a/qa/suites/upgrade/dumpling-x/parallel/distros b/qa/suites/upgrade/dumpling-x/parallel/distros new file mode 120000 index 00000000000..79010c36a59 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/parallel/distros @@ -0,0 +1 @@ +../../../../distros/supported \ No newline at end of file diff --git a/qa/suites/upgrade/dumpling-x/stress-split/% b/qa/suites/upgrade/dumpling-x/stress-split/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-x/stress-split/0-cluster/start.yaml b/qa/suites/upgrade/dumpling-x/stress-split/0-cluster/start.yaml new file mode 100644 index 00000000000..d8f49e35396 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + conf: + mon: + mon warn on legacy crush tunables: false +roles: +- - mon.a + - mon.b + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - mon.c +- - client.0 diff --git a/qa/suites/upgrade/dumpling-x/stress-split/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/dumpling-x/stress-split/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..c98631e2bbd --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/1-dumpling-install/dumpling.yaml @@ -0,0 +1,5 @@ +tasks: +- install: + branch: dumpling +- ceph: + fs: xfs diff --git a/qa/suites/upgrade/dumpling-x/stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/dumpling-x/stress-split/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 00000000000..312df6e21c6 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,5 @@ +tasks: +- install.upgrade: + osd.0: +- ceph.restart: + daemons: [osd.0, osd.1, osd.2] diff --git a/qa/suites/upgrade/dumpling-x/stress-split/3-thrash/default.yaml b/qa/suites/upgrade/dumpling-x/stress-split/3-thrash/default.yaml new file mode 100644 index 00000000000..a85510eb6fa --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/3-thrash/default.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + thrash_primary_affinity: false diff --git a/qa/suites/upgrade/dumpling-x/stress-split/4-mon/mona.yaml b/qa/suites/upgrade/dumpling-x/stress-split/4-mon/mona.yaml new file mode 100644 index 00000000000..b6ffb3323d1 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/4-mon/mona.yaml @@ -0,0 +1,5 @@ +tasks: +- ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true diff --git a/qa/suites/upgrade/dumpling-x/stress-split/5-workload/+ b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-x/stress-split/5-workload/rados_api_tests.yaml b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/rados_api_tests.yaml new file mode 100644 index 00000000000..7b2c72cbb2e --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/rados_api_tests.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rados/test-upgrade-firefly.sh diff --git a/qa/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-cls.yaml b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-cls.yaml new file mode 100644 index 00000000000..db3dff7fc5c --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-cls.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - cls/test_cls_rbd.sh diff --git a/qa/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-import-export.yaml b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-import-export.yaml new file mode 100644 index 00000000000..a5a964ce13b --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-import-export.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/upgrade/dumpling-x/stress-split/5-workload/readwrite.yaml b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/readwrite.yaml new file mode 100644 index 00000000000..c53e52b0872 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/readwrite.yaml @@ -0,0 +1,9 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 45 + write: 45 + delete: 10 diff --git a/qa/suites/upgrade/dumpling-x/stress-split/5-workload/snaps-few-objects.yaml b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/snaps-few-objects.yaml new file mode 100644 index 00000000000..c54039766c0 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/5-workload/snaps-few-objects.yaml @@ -0,0 +1,12 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/dumpling-x/stress-split/6-next-mon/monb.yaml b/qa/suites/upgrade/dumpling-x/stress-split/6-next-mon/monb.yaml new file mode 100644 index 00000000000..513890c41c0 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/6-next-mon/monb.yaml @@ -0,0 +1,5 @@ +tasks: +- ceph.restart: + daemons: [mon.b] + wait-for-healthy: false + wait-for-osds-up: true diff --git a/qa/suites/upgrade/dumpling-x/stress-split/7-workload/+ b/qa/suites/upgrade/dumpling-x/stress-split/7-workload/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-x/stress-split/7-workload/rados_api_tests.yaml b/qa/suites/upgrade/dumpling-x/stress-split/7-workload/rados_api_tests.yaml new file mode 100644 index 00000000000..7b2c72cbb2e --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/7-workload/rados_api_tests.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rados/test-upgrade-firefly.sh diff --git a/qa/suites/upgrade/dumpling-x/stress-split/7-workload/radosbench.yaml b/qa/suites/upgrade/dumpling-x/stress-split/7-workload/radosbench.yaml new file mode 100644 index 00000000000..3940870fce0 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/7-workload/radosbench.yaml @@ -0,0 +1,4 @@ +tasks: +- radosbench: + clients: [client.0] + time: 1800 diff --git a/qa/suites/upgrade/dumpling-x/stress-split/7-workload/rbd_api.yaml b/qa/suites/upgrade/dumpling-x/stress-split/7-workload/rbd_api.yaml new file mode 100644 index 00000000000..bbcde3e1559 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/7-workload/rbd_api.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd.sh diff --git a/qa/suites/upgrade/dumpling-x/stress-split/8-next-mon/monc.yaml b/qa/suites/upgrade/dumpling-x/stress-split/8-next-mon/monc.yaml new file mode 100644 index 00000000000..73f22bd5f7c --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/8-next-mon/monc.yaml @@ -0,0 +1,8 @@ +tasks: +- install.upgrade: + mon.c: +- ceph.restart: + daemons: [mon.c] + wait-for-healthy: false + wait-for-osds-up: true +- ceph.wait_for_mon_quorum: [a, b, c] diff --git a/qa/suites/upgrade/dumpling-x/stress-split/9-workload/+ b/qa/suites/upgrade/dumpling-x/stress-split/9-workload/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling-x/stress-split/9-workload/rados_api_tests.yaml b/qa/suites/upgrade/dumpling-x/stress-split/9-workload/rados_api_tests.yaml new file mode 100644 index 00000000000..7b2c72cbb2e --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/9-workload/rados_api_tests.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rados/test-upgrade-firefly.sh diff --git a/qa/suites/upgrade/dumpling-x/stress-split/9-workload/rbd-python.yaml b/qa/suites/upgrade/dumpling-x/stress-split/9-workload/rbd-python.yaml new file mode 100644 index 00000000000..1c5e53906f8 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/9-workload/rbd-python.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd_python.sh diff --git a/qa/suites/upgrade/dumpling-x/stress-split/9-workload/rgw-s3tests.yaml b/qa/suites/upgrade/dumpling-x/stress-split/9-workload/rgw-s3tests.yaml new file mode 100644 index 00000000000..e44546dbcaa --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/9-workload/rgw-s3tests.yaml @@ -0,0 +1,8 @@ +tasks: +- rgw: + default_idle_timeout: 300 + client.0: +- swift: + client.0: + rgw_server: client.0 + diff --git a/qa/suites/upgrade/dumpling-x/stress-split/9-workload/snaps-many-objects.yaml b/qa/suites/upgrade/dumpling-x/stress-split/9-workload/snaps-many-objects.yaml new file mode 100644 index 00000000000..9e311c946e1 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/9-workload/snaps-many-objects.yaml @@ -0,0 +1,12 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/dumpling-x/stress-split/distros b/qa/suites/upgrade/dumpling-x/stress-split/distros new file mode 120000 index 00000000000..79010c36a59 --- /dev/null +++ b/qa/suites/upgrade/dumpling-x/stress-split/distros @@ -0,0 +1 @@ +../../../../distros/supported \ No newline at end of file diff --git a/qa/suites/upgrade/dumpling/fs/% b/qa/suites/upgrade/dumpling/fs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling/fs/0-cluster/start.yaml b/qa/suites/upgrade/dumpling/fs/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/dumpling/fs/1-dumpling-install/cuttlefish.v0.67.1.yaml b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/cuttlefish.v0.67.1.yaml new file mode 100644 index 00000000000..032340ba25f --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/cuttlefish.v0.67.1.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: cuttlefish +- ceph: +- install.upgrade: + all: + tag: v0.67.1 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.1.yaml b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.1.yaml new file mode 100644 index 00000000000..a5bf1fa9073 --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.1.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.1 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.2.yaml b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.2.yaml new file mode 100644 index 00000000000..d39967fe408 --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.2.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.2 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.3.yaml b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.3.yaml new file mode 100644 index 00000000000..d0c1861193d --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.3.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.3 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.4.yaml b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.4.yaml new file mode 100644 index 00000000000..4e7d7c5fba1 --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.4.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.4 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.5.yaml b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.5.yaml new file mode 100644 index 00000000000..611b6d6b822 --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.7.yaml b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.7.yaml new file mode 100644 index 00000000000..7cb8fcc22ac --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.7.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.7 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/fs/2-workload/blogbench.yaml b/qa/suites/upgrade/dumpling/fs/2-workload/blogbench.yaml new file mode 100644 index 00000000000..0cd59eaafde --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/2-workload/blogbench.yaml @@ -0,0 +1,5 @@ +workload: + workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..38bba91895d --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..5b617fdfd5a --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..91c146a2070 --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/qa/suites/upgrade/dumpling/fs/4-final/monthrash.yaml b/qa/suites/upgrade/dumpling/fs/4-final/monthrash.yaml new file mode 100644 index 00000000000..13af446eb3b --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/4-final/monthrash.yaml @@ -0,0 +1,10 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- ceph-fuse: +- workunit: + clients: + client.0: + - suites/dbench.sh + diff --git a/qa/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml b/qa/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml new file mode 100644 index 00000000000..dbd7191e36c --- /dev/null +++ b/qa/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- ceph-fuse: +- workunit: + clients: + all: + - suites/iogen.sh + diff --git a/qa/suites/upgrade/dumpling/rados/% b/qa/suites/upgrade/dumpling/rados/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling/rados/0-cluster/start.yaml b/qa/suites/upgrade/dumpling/rados/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/dumpling/rados/1-dumpling-install/cuttlefish.v0.67.1.yaml b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/cuttlefish.v0.67.1.yaml new file mode 100644 index 00000000000..032340ba25f --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/cuttlefish.v0.67.1.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: cuttlefish +- ceph: +- install.upgrade: + all: + tag: v0.67.1 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.1.yaml b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.1.yaml new file mode 100644 index 00000000000..a5bf1fa9073 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.1.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.1 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.2.yaml b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.2.yaml new file mode 100644 index 00000000000..d39967fe408 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.2.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.2 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.3.yaml b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.3.yaml new file mode 100644 index 00000000000..d0c1861193d --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.3.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.3 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.4.yaml b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.4.yaml new file mode 100644 index 00000000000..4e7d7c5fba1 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.4.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.4 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.5.yaml b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.5.yaml new file mode 100644 index 00000000000..611b6d6b822 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.7.yaml b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.7.yaml new file mode 100644 index 00000000000..7cb8fcc22ac --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.7.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.7 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rados/2-workload/testrados.yaml b/qa/suites/upgrade/dumpling/rados/2-workload/testrados.yaml new file mode 100644 index 00000000000..8eaab19fd9e --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/2-workload/testrados.yaml @@ -0,0 +1,13 @@ +workload: + rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + diff --git a/qa/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..38bba91895d --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..5b617fdfd5a --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..801bab9f1f0 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,35 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 diff --git a/qa/suites/upgrade/dumpling/rados/4-final/monthrash.yaml b/qa/suites/upgrade/dumpling/rados/4-final/monthrash.yaml new file mode 100644 index 00000000000..810ba1b30e2 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/4-final/monthrash.yaml @@ -0,0 +1,9 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + clients: + client.0: + - rados/test.sh + diff --git a/qa/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml b/qa/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml new file mode 100644 index 00000000000..f81504233ad --- /dev/null +++ b/qa/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml @@ -0,0 +1,23 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + diff --git a/qa/suites/upgrade/dumpling/rbd/% b/qa/suites/upgrade/dumpling/rbd/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling/rbd/0-cluster/start.yaml b/qa/suites/upgrade/dumpling/rbd/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/cuttlefish.v0.67.1.yaml b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/cuttlefish.v0.67.1.yaml new file mode 100644 index 00000000000..032340ba25f --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/cuttlefish.v0.67.1.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: cuttlefish +- ceph: +- install.upgrade: + all: + tag: v0.67.1 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.1.yaml b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.1.yaml new file mode 100644 index 00000000000..a5bf1fa9073 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.1.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.1 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.2.yaml b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.2.yaml new file mode 100644 index 00000000000..d39967fe408 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.2.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.2 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.3.yaml b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.3.yaml new file mode 100644 index 00000000000..d0c1861193d --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.3.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.3 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.4.yaml b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.4.yaml new file mode 100644 index 00000000000..4e7d7c5fba1 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.4.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.4 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.5.yaml b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.5.yaml new file mode 100644 index 00000000000..611b6d6b822 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.7.yaml b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.7.yaml new file mode 100644 index 00000000000..7cb8fcc22ac --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.7.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.7 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rbd/2-workload/rbd.yaml b/qa/suites/upgrade/dumpling/rbd/2-workload/rbd.yaml new file mode 100644 index 00000000000..ce2fabe0359 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/2-workload/rbd.yaml @@ -0,0 +1,14 @@ +workload: + sequential: + - workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - workunit: + clients: + client.0: + - cls/test_cls_rbd.sh + + diff --git a/qa/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..38bba91895d --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..5b617fdfd5a --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..91c146a2070 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/qa/suites/upgrade/dumpling/rbd/4-final/monthrash.yaml b/qa/suites/upgrade/dumpling/rbd/4-final/monthrash.yaml new file mode 100644 index 00000000000..593191c24f5 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/4-final/monthrash.yaml @@ -0,0 +1,11 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + clients: + client.0: + - rbd/copy.sh + env: + RBD_CREATE_ARGS: --new-format + diff --git a/qa/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml b/qa/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml new file mode 100644 index 00000000000..575fd7922ad --- /dev/null +++ b/qa/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- workunit: + clients: + client.0: + - rbd/test_lock_fence.sh + diff --git a/qa/suites/upgrade/dumpling/rgw/% b/qa/suites/upgrade/dumpling/rgw/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/dumpling/rgw/0-cluster/start.yaml b/qa/suites/upgrade/dumpling/rgw/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/cuttlefish.v0.67.1.yaml b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/cuttlefish.v0.67.1.yaml new file mode 100644 index 00000000000..032340ba25f --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/cuttlefish.v0.67.1.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: cuttlefish +- ceph: +- install.upgrade: + all: + tag: v0.67.1 +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.1.yaml b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.1.yaml new file mode 100644 index 00000000000..a5bf1fa9073 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.1.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.1 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.2.yaml b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.2.yaml new file mode 100644 index 00000000000..d39967fe408 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.2.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.2 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.3.yaml b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.3.yaml new file mode 100644 index 00000000000..d0c1861193d --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.3.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.3 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.4.yaml b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.4.yaml new file mode 100644 index 00000000000..4e7d7c5fba1 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.4.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.4 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.5.yaml b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.5.yaml new file mode 100644 index 00000000000..611b6d6b822 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.7.yaml b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.7.yaml new file mode 100644 index 00000000000..7cb8fcc22ac --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.7.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.67.7 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/dumpling/rgw/2-workload/testrgw.yaml b/qa/suites/upgrade/dumpling/rgw/2-workload/testrgw.yaml new file mode 100644 index 00000000000..f1b2f3e88ef --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/2-workload/testrgw.yaml @@ -0,0 +1,6 @@ +workload: + rgw: [client.0] + s3tests: + client.0: + rgw_server: client.0 + diff --git a/qa/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..ff9129046e1 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - ceph.restart: [rgw.client.0] diff --git a/qa/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..75face28d88 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - ceph.restart: [rgw.client.0] diff --git a/qa/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..a08c669bf9e --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: dumpling + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [rgw.client.0] diff --git a/qa/suites/upgrade/dumpling/rgw/4-final/monthrash.yaml b/qa/suites/upgrade/dumpling/rgw/4-final/monthrash.yaml new file mode 100644 index 00000000000..9361edc8015 --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/4-final/monthrash.yaml @@ -0,0 +1,8 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- swift: + client.0: + rgw_server: client.0 + diff --git a/qa/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml b/qa/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml new file mode 100644 index 00000000000..6cf6d861d5f --- /dev/null +++ b/qa/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- swift: + client.0: + rgw_server: client.0 + diff --git a/qa/suites/upgrade/emperor/fs/% b/qa/suites/upgrade/emperor/fs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/emperor/fs/0-cluster/start.yaml b/qa/suites/upgrade/emperor/fs/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/emperor/fs/1-emperor-install/dumpling.v0.67.5.yaml b/qa/suites/upgrade/emperor/fs/1-emperor-install/dumpling.v0.67.5.yaml new file mode 100644 index 00000000000..a2891c427c2 --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/1-emperor-install/dumpling.v0.67.5.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: dumpling +- ceph: +- install.upgrade: + all: + tag: +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/fs/1-emperor-install/v0.73.yaml b/qa/suites/upgrade/emperor/fs/1-emperor-install/v0.73.yaml new file mode 100644 index 00000000000..7750040f138 --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/1-emperor-install/v0.73.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.73 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/fs/1-emperor-install/v0.74.yaml b/qa/suites/upgrade/emperor/fs/1-emperor-install/v0.74.yaml new file mode 100644 index 00000000000..9d0ded4f56f --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/1-emperor-install/v0.74.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.74 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/fs/1-emperor-install/v0.75.yaml b/qa/suites/upgrade/emperor/fs/1-emperor-install/v0.75.yaml new file mode 100644 index 00000000000..368f0ec479e --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/1-emperor-install/v0.75.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.75 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/fs/2-workload/blogbench.yaml b/qa/suites/upgrade/emperor/fs/2-workload/blogbench.yaml new file mode 100644 index 00000000000..0cd59eaafde --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/2-workload/blogbench.yaml @@ -0,0 +1,5 @@ +workload: + workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..520dc4b30cf --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..e11f8c3b13f --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..78cb33be1ec --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/qa/suites/upgrade/emperor/fs/4-final/monthrash.yaml b/qa/suites/upgrade/emperor/fs/4-final/monthrash.yaml new file mode 100644 index 00000000000..13af446eb3b --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/4-final/monthrash.yaml @@ -0,0 +1,10 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- ceph-fuse: +- workunit: + clients: + client.0: + - suites/dbench.sh + diff --git a/qa/suites/upgrade/emperor/fs/4-final/osdthrash.yaml b/qa/suites/upgrade/emperor/fs/4-final/osdthrash.yaml new file mode 100644 index 00000000000..dbd7191e36c --- /dev/null +++ b/qa/suites/upgrade/emperor/fs/4-final/osdthrash.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- ceph-fuse: +- workunit: + clients: + all: + - suites/iogen.sh + diff --git a/qa/suites/upgrade/emperor/rados/% b/qa/suites/upgrade/emperor/rados/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/emperor/rados/0-cluster/start.yaml b/qa/suites/upgrade/emperor/rados/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/emperor/rados/1-emperor-install/dumpling.v0.67.5.yaml b/qa/suites/upgrade/emperor/rados/1-emperor-install/dumpling.v0.67.5.yaml new file mode 100644 index 00000000000..a2891c427c2 --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/1-emperor-install/dumpling.v0.67.5.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: dumpling +- ceph: +- install.upgrade: + all: + tag: +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rados/1-emperor-install/v0.73.yaml b/qa/suites/upgrade/emperor/rados/1-emperor-install/v0.73.yaml new file mode 100644 index 00000000000..7750040f138 --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/1-emperor-install/v0.73.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.73 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rados/1-emperor-install/v0.74.yaml b/qa/suites/upgrade/emperor/rados/1-emperor-install/v0.74.yaml new file mode 100644 index 00000000000..9d0ded4f56f --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/1-emperor-install/v0.74.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.74 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rados/1-emperor-install/v0.75.yaml b/qa/suites/upgrade/emperor/rados/1-emperor-install/v0.75.yaml new file mode 100644 index 00000000000..368f0ec479e --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/1-emperor-install/v0.75.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.75 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rados/2-workload/testrados.yaml b/qa/suites/upgrade/emperor/rados/2-workload/testrados.yaml new file mode 100644 index 00000000000..8eaab19fd9e --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/2-workload/testrados.yaml @@ -0,0 +1,13 @@ +workload: + rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + diff --git a/qa/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..520dc4b30cf --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..e11f8c3b13f --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..f0fa4b886c5 --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,35 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 diff --git a/qa/suites/upgrade/emperor/rados/4-final/monthrash.yaml b/qa/suites/upgrade/emperor/rados/4-final/monthrash.yaml new file mode 100644 index 00000000000..810ba1b30e2 --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/4-final/monthrash.yaml @@ -0,0 +1,9 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + clients: + client.0: + - rados/test.sh + diff --git a/qa/suites/upgrade/emperor/rados/4-final/osdthrash.yaml b/qa/suites/upgrade/emperor/rados/4-final/osdthrash.yaml new file mode 100644 index 00000000000..f81504233ad --- /dev/null +++ b/qa/suites/upgrade/emperor/rados/4-final/osdthrash.yaml @@ -0,0 +1,23 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + diff --git a/qa/suites/upgrade/emperor/rbd/% b/qa/suites/upgrade/emperor/rbd/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/emperor/rbd/0-cluster/start.yaml b/qa/suites/upgrade/emperor/rbd/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/emperor/rbd/1-emperor-install/dumpling.v0.67.5.yaml b/qa/suites/upgrade/emperor/rbd/1-emperor-install/dumpling.v0.67.5.yaml new file mode 100644 index 00000000000..a2891c427c2 --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/1-emperor-install/dumpling.v0.67.5.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: dumpling +- ceph: +- install.upgrade: + all: + tag: +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rbd/1-emperor-install/v0.73.yaml b/qa/suites/upgrade/emperor/rbd/1-emperor-install/v0.73.yaml new file mode 100644 index 00000000000..7750040f138 --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/1-emperor-install/v0.73.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.73 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rbd/1-emperor-install/v0.74.yaml b/qa/suites/upgrade/emperor/rbd/1-emperor-install/v0.74.yaml new file mode 100644 index 00000000000..9d0ded4f56f --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/1-emperor-install/v0.74.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.74 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rbd/1-emperor-install/v0.75.yaml b/qa/suites/upgrade/emperor/rbd/1-emperor-install/v0.75.yaml new file mode 100644 index 00000000000..368f0ec479e --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/1-emperor-install/v0.75.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.75 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rbd/2-workload/rbd.yaml b/qa/suites/upgrade/emperor/rbd/2-workload/rbd.yaml new file mode 100644 index 00000000000..ce2fabe0359 --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/2-workload/rbd.yaml @@ -0,0 +1,14 @@ +workload: + sequential: + - workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - workunit: + clients: + client.0: + - cls/test_cls_rbd.sh + + diff --git a/qa/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..520dc4b30cf --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..e11f8c3b13f --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] diff --git a/qa/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..78cb33be1ec --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/qa/suites/upgrade/emperor/rbd/4-final/monthrash.yaml b/qa/suites/upgrade/emperor/rbd/4-final/monthrash.yaml new file mode 100644 index 00000000000..593191c24f5 --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/4-final/monthrash.yaml @@ -0,0 +1,11 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- workunit: + clients: + client.0: + - rbd/copy.sh + env: + RBD_CREATE_ARGS: --new-format + diff --git a/qa/suites/upgrade/emperor/rbd/4-final/osdthrash.yaml b/qa/suites/upgrade/emperor/rbd/4-final/osdthrash.yaml new file mode 100644 index 00000000000..575fd7922ad --- /dev/null +++ b/qa/suites/upgrade/emperor/rbd/4-final/osdthrash.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- workunit: + clients: + client.0: + - rbd/test_lock_fence.sh + diff --git a/qa/suites/upgrade/emperor/rgw/% b/qa/suites/upgrade/emperor/rgw/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/emperor/rgw/0-cluster/start.yaml b/qa/suites/upgrade/emperor/rgw/0-cluster/start.yaml new file mode 100644 index 00000000000..c1acc4e8ad6 --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/0-cluster/start.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - scrub + fs: xfs +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 diff --git a/qa/suites/upgrade/emperor/rgw/1-emperor-install/dumpling.v0.67.5.yaml b/qa/suites/upgrade/emperor/rgw/1-emperor-install/dumpling.v0.67.5.yaml new file mode 100644 index 00000000000..a2891c427c2 --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/1-emperor-install/dumpling.v0.67.5.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: dumpling +- ceph: +- install.upgrade: + all: + tag: +- ceph.restart: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rgw/1-emperor-install/v0.73.yaml b/qa/suites/upgrade/emperor/rgw/1-emperor-install/v0.73.yaml new file mode 100644 index 00000000000..7750040f138 --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/1-emperor-install/v0.73.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.73 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rgw/1-emperor-install/v0.74.yaml b/qa/suites/upgrade/emperor/rgw/1-emperor-install/v0.74.yaml new file mode 100644 index 00000000000..9d0ded4f56f --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/1-emperor-install/v0.74.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.74 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rgw/1-emperor-install/v0.75.yaml b/qa/suites/upgrade/emperor/rgw/1-emperor-install/v0.75.yaml new file mode 100644 index 00000000000..368f0ec479e --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/1-emperor-install/v0.75.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.75 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/emperor/rgw/2-workload/testrgw.yaml b/qa/suites/upgrade/emperor/rgw/2-workload/testrgw.yaml new file mode 100644 index 00000000000..f1b2f3e88ef --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/2-workload/testrgw.yaml @@ -0,0 +1,6 @@ +workload: + rgw: [client.0] + s3tests: + client.0: + rgw_server: client.0 + diff --git a/qa/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..0ae6f928096 --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - ceph.restart: [rgw.client.0] diff --git a/qa/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..3f0aad2ec86 --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - ceph.restart: [rgw.client.0] diff --git a/qa/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..622b02d4271 --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,36 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [rgw.client.0] diff --git a/qa/suites/upgrade/emperor/rgw/4-final/monthrash.yaml b/qa/suites/upgrade/emperor/rgw/4-final/monthrash.yaml new file mode 100644 index 00000000000..9361edc8015 --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/4-final/monthrash.yaml @@ -0,0 +1,8 @@ +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 +- swift: + client.0: + rgw_server: client.0 + diff --git a/qa/suites/upgrade/emperor/rgw/4-final/osdthrash.yaml b/qa/suites/upgrade/emperor/rgw/4-final/osdthrash.yaml new file mode 100644 index 00000000000..6cf6d861d5f --- /dev/null +++ b/qa/suites/upgrade/emperor/rgw/4-final/osdthrash.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 +- swift: + client.0: + rgw_server: client.0 + diff --git a/qa/suites/upgrade/firefly/newer/% b/qa/suites/upgrade/firefly/newer/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/firefly/newer/0-cluster/start.yaml b/qa/suites/upgrade/firefly/newer/0-cluster/start.yaml new file mode 100644 index 00000000000..1e7ad743675 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/0-cluster/start.yaml @@ -0,0 +1,24 @@ +overrides: + ceph: + log-whitelist: + - scrub + - scrub mismatch + - ScrubResult + - osd_map_max_advance + fs: xfs + conf: + osd: + osd map max advance: 1000 +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 +- - client.0 + - client.1 diff --git a/qa/suites/upgrade/firefly/newer/1-install/v0.80.10.yaml b/qa/suites/upgrade/firefly/newer/1-install/v0.80.10.yaml new file mode 100644 index 00000000000..40e3883773d --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/1-install/v0.80.10.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.80.10 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/newer/1-install/v0.80.4.yaml b/qa/suites/upgrade/firefly/newer/1-install/v0.80.4.yaml new file mode 100644 index 00000000000..371fc35502f --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/1-install/v0.80.4.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.80.4 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/newer/1-install/v0.80.5.yaml b/qa/suites/upgrade/firefly/newer/1-install/v0.80.5.yaml new file mode 100644 index 00000000000..90f7dfd9860 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/1-install/v0.80.5.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.80.5 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/newer/1-install/v0.80.6.yaml b/qa/suites/upgrade/firefly/newer/1-install/v0.80.6.yaml new file mode 100644 index 00000000000..c6502a03dab --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/1-install/v0.80.6.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.80.6 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/newer/1-install/v0.80.8.yaml b/qa/suites/upgrade/firefly/newer/1-install/v0.80.8.yaml new file mode 100644 index 00000000000..1582dcf4479 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/1-install/v0.80.8.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.80.8 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/newer/1-install/v0.80.9.yaml b/qa/suites/upgrade/firefly/newer/1-install/v0.80.9.yaml new file mode 100644 index 00000000000..374b7e0f436 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/1-install/v0.80.9.yaml @@ -0,0 +1,7 @@ +tasks: +- install: + tag: v0.80.9 +- ceph: +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/newer/2-workload/+ b/qa/suites/upgrade/firefly/newer/2-workload/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/firefly/newer/2-workload/blogbench.yaml b/qa/suites/upgrade/firefly/newer/2-workload/blogbench.yaml new file mode 100644 index 00000000000..909f5bc1c75 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/2-workload/blogbench.yaml @@ -0,0 +1,6 @@ +workload: + sequential: + - workunit: + clients: + client.0: + - suites/blogbench.sh diff --git a/qa/suites/upgrade/firefly/newer/2-workload/rbd.yaml b/qa/suites/upgrade/firefly/newer/2-workload/rbd.yaml new file mode 100644 index 00000000000..0d2f3ad72ef --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/2-workload/rbd.yaml @@ -0,0 +1,12 @@ +workload: + sequential: + - workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - workunit: + clients: + client.0: + - cls/test_cls_rbd.sh diff --git a/qa/suites/upgrade/firefly/newer/2-workload/s3tests.yaml b/qa/suites/upgrade/firefly/newer/2-workload/s3tests.yaml new file mode 100644 index 00000000000..1de84f2de2e --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/2-workload/s3tests.yaml @@ -0,0 +1,7 @@ +workload: + sequential: + - rgw: [client.0] + - s3tests: + client.0: + force-branch: firefly-original + rgw_server: client.0 diff --git a/qa/suites/upgrade/firefly/newer/2-workload/testrados.yaml b/qa/suites/upgrade/firefly/newer/2-workload/testrados.yaml new file mode 100644 index 00000000000..98f426b3737 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/2-workload/testrados.yaml @@ -0,0 +1,13 @@ +workload: + sequential: + - rados: + clients: [client.0] + ops: 2000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..32f2314f736 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,35 @@ +upgrade-sequence: + sequential: + - install.upgrade: + mon.a: + branch: firefly + mon.b: + branch: firefly + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/qa/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..7a3dbe51e3f --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,35 @@ +upgrade-sequence: + sequential: + - install.upgrade: + mon.a: + branch: firefly + mon.b: + branch: firefly + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] diff --git a/qa/suites/upgrade/firefly/newer/4-finish-upgrade.yaml b/qa/suites/upgrade/firefly/newer/4-finish-upgrade.yaml new file mode 100644 index 00000000000..3f55404ccdb --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/4-finish-upgrade.yaml @@ -0,0 +1,3 @@ +tasks: +- install.upgrade: + client.0: diff --git a/qa/suites/upgrade/firefly/newer/5-final/+ b/qa/suites/upgrade/firefly/newer/5-final/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/firefly/newer/5-final/monthrash.yaml b/qa/suites/upgrade/firefly/newer/5-final/monthrash.yaml new file mode 100644 index 00000000000..8e321c47c41 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/5-final/monthrash.yaml @@ -0,0 +1,11 @@ +tasks: +- sequential: + - mon_thrash: + revive_delay: 20 + thrash_delay: 1 + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/dbench.sh + diff --git a/qa/suites/upgrade/firefly/newer/5-final/osdthrash.yaml b/qa/suites/upgrade/firefly/newer/5-final/osdthrash.yaml new file mode 100644 index 00000000000..1efbf428039 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/5-final/osdthrash.yaml @@ -0,0 +1,18 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- sequential: + - thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/iogen.sh + diff --git a/qa/suites/upgrade/firefly/newer/5-final/rbd.yaml b/qa/suites/upgrade/firefly/newer/5-final/rbd.yaml new file mode 100644 index 00000000000..ee82941298d --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/5-final/rbd.yaml @@ -0,0 +1,12 @@ +tasks: +- sequential: + - workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - workunit: + clients: + client.0: + - cls/test_cls_rbd.sh diff --git a/qa/suites/upgrade/firefly/newer/5-final/testrgw.yaml b/qa/suites/upgrade/firefly/newer/5-final/testrgw.yaml new file mode 100644 index 00000000000..9dfc14ea677 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/5-final/testrgw.yaml @@ -0,0 +1,15 @@ +tasks: +- sequential: + - rgw: [client.1] + - s3readwrite: + client.0: + rgw_server: client.1 + readwrite: + bucket: rwtest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/upgrade/firefly/newer/distros b/qa/suites/upgrade/firefly/newer/distros new file mode 120000 index 00000000000..79010c36a59 --- /dev/null +++ b/qa/suites/upgrade/firefly/newer/distros @@ -0,0 +1 @@ +../../../../distros/supported \ No newline at end of file diff --git a/qa/suites/upgrade/firefly/older/% b/qa/suites/upgrade/firefly/older/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/firefly/older/0-cluster/start.yaml b/qa/suites/upgrade/firefly/older/0-cluster/start.yaml new file mode 100644 index 00000000000..5dab05f107f --- /dev/null +++ b/qa/suites/upgrade/firefly/older/0-cluster/start.yaml @@ -0,0 +1,22 @@ +overrides: + ceph: + log-whitelist: + - scrub + - osd_map_max_advance + fs: xfs + conf: + osd: + osd map max advance: 1000 +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 +- - client.1 + - client.0 diff --git a/qa/suites/upgrade/firefly/older/1-install/dumpling.v0.80.1.yaml b/qa/suites/upgrade/firefly/older/1-install/dumpling.v0.80.1.yaml new file mode 100644 index 00000000000..518dfa8c500 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/1-install/dumpling.v0.80.1.yaml @@ -0,0 +1,20 @@ +tasks: +- install: + branch: dumpling +- ceph: + conf: + mon: + mon warn on legacy crush tunables: false + log-whitelist: + - scrub mismatch + - ScrubResult +- install.upgrade: + all: + tag: v0.80.1 +- ceph.restart: +- exec: + client.0: + - ceph osd crush tunables firefly +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/older/1-install/emperor.v.80.1.yaml b/qa/suites/upgrade/firefly/older/1-install/emperor.v.80.1.yaml new file mode 100644 index 00000000000..e41e50c1f65 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/1-install/emperor.v.80.1.yaml @@ -0,0 +1,20 @@ +tasks: +- install: + branch: emperor +- ceph: + conf: + mon: + mon warn on legacy crush tunables: false + log-whitelist: + - scrub mismatch + - ScrubResult +- install.upgrade: + all: + tag: v0.80.1 +- ceph.restart: +- exec: + client.0: + - ceph osd crush tunables firefly +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/older/1-install/latest_dumpling_release.yaml b/qa/suites/upgrade/firefly/older/1-install/latest_dumpling_release.yaml new file mode 100644 index 00000000000..090ff7f1020 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/1-install/latest_dumpling_release.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + conf: + mon: + mon warn on legacy crush tunables: false + thrashosds: + thrash_primary_affinity: false +tasks: +- install: + tag: v0.67.11 +- ceph: + log-whitelist: + - scrub mismatch + - ScrubResult +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/older/1-install/v0.80.1.yaml b/qa/suites/upgrade/firefly/older/1-install/v0.80.1.yaml new file mode 100644 index 00000000000..8c6d1fda3c4 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/1-install/v0.80.1.yaml @@ -0,0 +1,10 @@ +tasks: +- install: + tag: v0.80.1 +- ceph: + log-whitelist: + - scrub mismatch + - ScrubResult +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/older/1-install/v0.80.2.yaml b/qa/suites/upgrade/firefly/older/1-install/v0.80.2.yaml new file mode 100644 index 00000000000..d03e4f777c3 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/1-install/v0.80.2.yaml @@ -0,0 +1,10 @@ +tasks: +- install: + tag: v0.80.2 +- ceph: + log-whitelist: + - scrub mismatch + - ScrubResult +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/older/1-install/v0.80.3.yaml b/qa/suites/upgrade/firefly/older/1-install/v0.80.3.yaml new file mode 100644 index 00000000000..6396ab617f8 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/1-install/v0.80.3.yaml @@ -0,0 +1,10 @@ +tasks: +- install: + tag: v0.80.3 +- ceph: + log-whitelist: + - scrub mismatch + - ScrubResult +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/older/1-install/v0.80.yaml b/qa/suites/upgrade/firefly/older/1-install/v0.80.yaml new file mode 100644 index 00000000000..eb945e62eb7 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/1-install/v0.80.yaml @@ -0,0 +1,10 @@ +tasks: +- install: + tag: v0.80 +- ceph: + log-whitelist: + - scrub mismatch + - ScrubResult +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/firefly/older/2-workload/+ b/qa/suites/upgrade/firefly/older/2-workload/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/firefly/older/2-workload/blogbench.yaml b/qa/suites/upgrade/firefly/older/2-workload/blogbench.yaml new file mode 100644 index 00000000000..909f5bc1c75 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/2-workload/blogbench.yaml @@ -0,0 +1,6 @@ +workload: + sequential: + - workunit: + clients: + client.0: + - suites/blogbench.sh diff --git a/qa/suites/upgrade/firefly/older/2-workload/radosloadgen.yaml b/qa/suites/upgrade/firefly/older/2-workload/radosloadgen.yaml new file mode 100644 index 00000000000..572f4755e66 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/2-workload/radosloadgen.yaml @@ -0,0 +1,6 @@ +workload: + sequential: + - workunit: + clients: + client.0: + - rados/load-gen-big.sh diff --git a/qa/suites/upgrade/firefly/older/2-workload/rbd.yaml b/qa/suites/upgrade/firefly/older/2-workload/rbd.yaml new file mode 100644 index 00000000000..0d2f3ad72ef --- /dev/null +++ b/qa/suites/upgrade/firefly/older/2-workload/rbd.yaml @@ -0,0 +1,12 @@ +workload: + sequential: + - workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - workunit: + clients: + client.0: + - cls/test_cls_rbd.sh diff --git a/qa/suites/upgrade/firefly/older/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/firefly/older/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..120de5cca8e --- /dev/null +++ b/qa/suites/upgrade/firefly/older/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,37 @@ +upgrade-sequence: + sequential: + - install.upgrade: + mon.a: + branch: firefly + mon.b: + branch: firefly + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 diff --git a/qa/suites/upgrade/firefly/older/4-finish-upgrade.yaml b/qa/suites/upgrade/firefly/older/4-finish-upgrade.yaml new file mode 100644 index 00000000000..3f55404ccdb --- /dev/null +++ b/qa/suites/upgrade/firefly/older/4-finish-upgrade.yaml @@ -0,0 +1,3 @@ +tasks: +- install.upgrade: + client.0: diff --git a/qa/suites/upgrade/firefly/older/5-final/+ b/qa/suites/upgrade/firefly/older/5-final/+ new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/firefly/older/5-final/monthrash.yaml b/qa/suites/upgrade/firefly/older/5-final/monthrash.yaml new file mode 100644 index 00000000000..8e321c47c41 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/5-final/monthrash.yaml @@ -0,0 +1,11 @@ +tasks: +- sequential: + - mon_thrash: + revive_delay: 20 + thrash_delay: 1 + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/dbench.sh + diff --git a/qa/suites/upgrade/firefly/older/5-final/osdthrash.yaml b/qa/suites/upgrade/firefly/older/5-final/osdthrash.yaml new file mode 100644 index 00000000000..1efbf428039 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/5-final/osdthrash.yaml @@ -0,0 +1,18 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- sequential: + - thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/iogen.sh + diff --git a/qa/suites/upgrade/firefly/older/5-final/rbd.yaml b/qa/suites/upgrade/firefly/older/5-final/rbd.yaml new file mode 100644 index 00000000000..ee82941298d --- /dev/null +++ b/qa/suites/upgrade/firefly/older/5-final/rbd.yaml @@ -0,0 +1,12 @@ +tasks: +- sequential: + - workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - workunit: + clients: + client.0: + - cls/test_cls_rbd.sh diff --git a/qa/suites/upgrade/firefly/older/5-final/testrgw.yaml b/qa/suites/upgrade/firefly/older/5-final/testrgw.yaml new file mode 100644 index 00000000000..5d388e86200 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/5-final/testrgw.yaml @@ -0,0 +1,7 @@ +tasks: +- sequential: + - rgw: [client.1] + - s3tests: + client.1: + force-branch: firefly-original + rgw_server: client.1 diff --git a/qa/suites/upgrade/firefly/older/distros b/qa/suites/upgrade/firefly/older/distros new file mode 120000 index 00000000000..79010c36a59 --- /dev/null +++ b/qa/suites/upgrade/firefly/older/distros @@ -0,0 +1 @@ +../../../../distros/supported \ No newline at end of file diff --git a/qa/suites/upgrade/firefly/singleton/upgrade_client/% b/qa/suites/upgrade/firefly/singleton/upgrade_client/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/firefly/singleton/upgrade_client/distros b/qa/suites/upgrade/firefly/singleton/upgrade_client/distros new file mode 120000 index 00000000000..ea78f6570e5 --- /dev/null +++ b/qa/suites/upgrade/firefly/singleton/upgrade_client/distros @@ -0,0 +1 @@ +../../../../../distros/supported/ \ No newline at end of file diff --git a/qa/suites/upgrade/firefly/singleton/upgrade_client/upgrade_client_first.yaml b/qa/suites/upgrade/firefly/singleton/upgrade_client/upgrade_client_first.yaml new file mode 100644 index 00000000000..882b69350ef --- /dev/null +++ b/qa/suites/upgrade/firefly/singleton/upgrade_client/upgrade_client_first.yaml @@ -0,0 +1,69 @@ +# this case tests issue #9419 "dumpling->firefly upgrade, sending setallochint?" +overrides: + ceph: + conf: + mon: + mon warn on legacy crush tunables: false + log-whitelist: + - scrub mismatch + - ScrubResult +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 +tasks: +- install: + branch: dumpling +- print: "**** done install dumpling" +- ceph: + fs: xfs +- print: "**** done ceph" +- install.upgrade: + client.0: +- print: "**** done install.upgrade on clinet.0" +- install.upgrade: + mon.a: + mon.b: +- print: "**** done install.upgrade" +- ceph.restart: + #osd.2 is not upgraded + daemons: [mon.a, mon.b, mon.c, osd.0, osd.1] +- print: "**** done restart all" +- workunit: + branch: firefly + clients: + client.0: + - rbd/test_librbd_python.sh +- print: "**** done rbd/test_librbd_python.sh" +- workunit: + branch: dumpling + clients: + client.0: + - rados/load-gen-big.sh +- print: "**** done rados/load-gen-big.sh" +- workunit: + branch: firefly + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh" +- workunit: + branch: firefly + clients: + client.0: + - cls/test_cls_rbd.sh +- print: "**** done cls/test_cls_rbd.sh" +- rgw: [client.0] +- s3tests: + client.0: + force-branch: firefly + rgw_server: client.0 +- print: "**** done s3tests" diff --git a/qa/suites/upgrade/firefly/singleton/versions-steps/% b/qa/suites/upgrade/firefly/singleton/versions-steps/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/firefly/singleton/versions-steps/distros b/qa/suites/upgrade/firefly/singleton/versions-steps/distros new file mode 120000 index 00000000000..ea78f6570e5 --- /dev/null +++ b/qa/suites/upgrade/firefly/singleton/versions-steps/distros @@ -0,0 +1 @@ +../../../../../distros/supported/ \ No newline at end of file diff --git a/qa/suites/upgrade/firefly/singleton/versions-steps/versions-steps.yaml b/qa/suites/upgrade/firefly/singleton/versions-steps/versions-steps.yaml new file mode 100644 index 00000000000..116811c12ea --- /dev/null +++ b/qa/suites/upgrade/firefly/singleton/versions-steps/versions-steps.yaml @@ -0,0 +1,342 @@ +overrides: + ceph: + log-whitelist: + - scrub + - osd_map_max_advance + fs: xfs + conf: + osd: + osd map max advance: 1000 +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 +- - client.1 +tasks: +- install: + tag: v0.80.4 +- print: "**** done v0.80.4 install" +- ceph: + fs: xfs +- print: "**** done ceph xfs" +- sequential: + - workload +- print: "**** done workload v0.80.4" +- parallel: + - workload1 + - upgrade-sequence1 +- print: "**** done parallel v0.80.5" +- parallel: + - workload2 + - upgrade-sequence2 +- print: "**** done parallel v0.80.7" +- parallel: + - workload3 + - upgrade-sequence3 +- print: "**** done parallel v0.80.8" +- parallel: + - workload4 + - upgrade-sequence4 +- print: "**** done parallel v0.80.9" +- parallel: + - workload_firefly + - upgrade-sequence_firefly +- print: "**** done parallel firefly branch" +####################### +workload: + sequential: + - workunit: + clients: + client.0: + - suites/blogbench.sh + - print: "**** done suites/blogbench.sh workload" +workload1: + sequential: + - workunit: + clients: + client.0: + - rados/load-gen-big.sh + - print: "**** done rados/load-gen-big.sh workload1" + - workunit: + clients: + client.0: + - rados/test.sh + - cls + - print: "**** done rados/test.sh & cls workload1" + - workunit: + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh workload1" +upgrade-sequence1: + sequential: + - install.upgrade: + mon.a: + tag: v0.80.5 + mon.b: + tag: v0.80.5 + client.1: + tag: v0.80.5 + - print: "**** done v0.80.5 install.upgrade" + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - print: "**** done ceph.restart all 1 mon/mds/osd" +workload2: + sequential: +# removed to fix #10176 +# - workunit: +# clients: +# client.0: +# - rbd/import_export.sh +# env: +# RBD_CREATE_ARGS: --new-format + - workunit: + clients: + client.0: + - cls/test_cls_rbd.sh + - print: "**** done cls/test_cls_rbd.sh workload2" +upgrade-sequence2: + sequential: + - install.upgrade: + mon.a: + tag: v0.80.7 + mon.b: + tag: v0.80.7 + client.1: + tag: v0.80.7 + - print: "**** done v0.80.7 install.upgrade" + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - print: "**** done ceph.restart all 2 osd/mon/mds" +workload3: + sequential: + - workunit: + clients: + client.0: + - rados/load-gen-big.sh + - print: "**** done rados/load-gen-big.sh workload3" + - workunit: + clients: + client.0: + - rados/test.sh + - cls + - print: "**** done rados/test.sh & cls workload3" + - workunit: + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh workload3" +upgrade-sequence3: + sequential: + - install.upgrade: + mon.a: + tag: v0.80.8 + mon.b: + tag: v0.80.8 + client.1: + tag: v0.80.8 + - print: "**** done v0.80.8 install.upgrade" + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - print: "**** done ceph.restart all mon/mds/osd upgrade-sequence3" +workload4: + sequential: + - workunit: + clients: + client.0: + - rados/load-gen-big.sh + - print: "**** done rados/load-gen-big.sh workload4" + - workunit: + clients: + client.0: + - rados/test.sh + - cls + - print: "**** done rados/test.sh & cls workload4" + - workunit: + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh workload4" +upgrade-sequence4: + sequential: + - install.upgrade: + mon.a: + tag: v0.80.9 + mon.b: + tag: v0.80.9 + client.1: + tag: v0.80.9 + - print: "**** done v0.80.9 install.upgrade" + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 30 + - print: "**** done ceph.restart all 1 mon/mds/osd upgrade-sequence4" +workload_firefly: + sequential: + - rgw: [client.0] + - print: "**** done rgw workload_firefly" + - s3tests: + client.0: + force-branch: firefly + rgw_server: client.0 + - print: "**** done s3tests workload_firefly" +upgrade-sequence_firefly: + sequential: + - install.upgrade: + mon.a: + branch: firefly + mon.b: + branch: firefly + client.1: + branch: firefly + - print: "**** done branch: firefly install.upgrade" + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - print: "**** done ceph.restart all firefly current branch mds/osd/mon" diff --git a/qa/suites/upgrade/old/fs/fs/% b/qa/suites/upgrade/old/fs/fs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/fs/fs/0-cluster/start.yaml b/qa/suites/upgrade/old/fs/fs/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/fs/fs/1-cuttlefish-install/cuttlefish.yaml b/qa/suites/upgrade/old/fs/fs/1-cuttlefish-install/cuttlefish.yaml new file mode 100644 index 00000000000..e427343d8c3 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/1-cuttlefish-install/cuttlefish.yaml @@ -0,0 +1,6 @@ +tasks: +- install: + branch: cuttlefish +- ceph: + fs: xfs +- ceph-fuse: diff --git a/qa/suites/upgrade/old/fs/fs/2-cuttlefish-workload/blogbench.yaml b/qa/suites/upgrade/old/fs/fs/2-cuttlefish-workload/blogbench.yaml new file mode 100644 index 00000000000..50161b08114 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/2-cuttlefish-workload/blogbench.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + client.0: + - suites/blogbench.sh diff --git a/qa/suites/upgrade/old/fs/fs/2-cuttlefish-workload/dbench.yaml b/qa/suites/upgrade/old/fs/fs/2-cuttlefish-workload/dbench.yaml new file mode 100644 index 00000000000..3bb9040e251 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/2-cuttlefish-workload/dbench.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/upgrade/old/fs/fs/2-cuttlefish-workload/iogen.yaml b/qa/suites/upgrade/old/fs/fs/2-cuttlefish-workload/iogen.yaml new file mode 100644 index 00000000000..c832d2f5bb1 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/2-cuttlefish-workload/iogen.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + client.0: + - suites/iogen.sh diff --git a/qa/suites/upgrade/old/fs/fs/3-upgrade/dumpling.yaml b/qa/suites/upgrade/old/fs/fs/3-upgrade/dumpling.yaml new file mode 100644 index 00000000000..e3e332c4f22 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/3-upgrade/dumpling.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: dumpling diff --git a/qa/suites/upgrade/old/fs/fs/4-restart/mds-mon-osd.yaml b/qa/suites/upgrade/old/fs/fs/4-restart/mds-mon-osd.yaml new file mode 100644 index 00000000000..d21800684d3 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/4-restart/mds-mon-osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/fs/fs/4-restart/mon-mds-osd.yaml b/qa/suites/upgrade/old/fs/fs/4-restart/mon-mds-osd.yaml new file mode 100644 index 00000000000..78e14e9472a --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/4-restart/mon-mds-osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/fs/fs/4-restart/osd-mds-mon.yaml b/qa/suites/upgrade/old/fs/fs/4-restart/osd-mds-mon.yaml new file mode 100644 index 00000000000..dbcd013b3f0 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/4-restart/osd-mds-mon.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/fsstress.yaml b/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/fsstress.yaml new file mode 100644 index 00000000000..ae6f7936989 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/fsstress.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - suites/fsstress.sh diff --git a/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/iogen.yaml b/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/iogen.yaml new file mode 100644 index 00000000000..5aa4d3e091d --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/iogen.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/iogen.sh diff --git a/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/kernel-untar-build.yaml b/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/kernel-untar-build.yaml new file mode 100644 index 00000000000..7fc7de979ed --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/kernel-untar-build.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/tiobench.yaml b/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/tiobench.yaml new file mode 100644 index 00000000000..58ee040af77 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/5-dumpling-workload/tiobench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/tiobench.sh diff --git a/qa/suites/upgrade/old/fs/fs/6-upgrade-to-emperor/emperor.yaml b/qa/suites/upgrade/old/fs/fs/6-upgrade-to-emperor/emperor.yaml new file mode 100644 index 00000000000..e473f31862d --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/6-upgrade-to-emperor/emperor.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: emperor diff --git a/qa/suites/upgrade/old/fs/fs/7-restart/mds-mon-osd.yaml b/qa/suites/upgrade/old/fs/fs/7-restart/mds-mon-osd.yaml new file mode 100644 index 00000000000..d21800684d3 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/7-restart/mds-mon-osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/fs/fs/7-restart/mon-mds-osd.yaml b/qa/suites/upgrade/old/fs/fs/7-restart/mon-mds-osd.yaml new file mode 100644 index 00000000000..78e14e9472a --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/7-restart/mon-mds-osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/fs/fs/7-restart/osd-mds-mon.yaml b/qa/suites/upgrade/old/fs/fs/7-restart/osd-mds-mon.yaml new file mode 100644 index 00000000000..dbcd013b3f0 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/7-restart/osd-mds-mon.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/qa/suites/upgrade/old/fs/fs/8-emperor-workload/blogbench.yaml b/qa/suites/upgrade/old/fs/fs/8-emperor-workload/blogbench.yaml new file mode 100644 index 00000000000..4e54068f314 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/8-emperor-workload/blogbench.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: emperor + clients: + client.0: + - suites/blogbench.sh diff --git a/qa/suites/upgrade/old/fs/fs/8-emperor-workload/dbench.yaml b/qa/suites/upgrade/old/fs/fs/8-emperor-workload/dbench.yaml new file mode 100644 index 00000000000..365ba9ac8f4 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/8-emperor-workload/dbench.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: emperor + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/upgrade/old/fs/fs/8-emperor-workload/iogen.yaml b/qa/suites/upgrade/old/fs/fs/8-emperor-workload/iogen.yaml new file mode 100644 index 00000000000..994fa8716d5 --- /dev/null +++ b/qa/suites/upgrade/old/fs/fs/8-emperor-workload/iogen.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: emperor + clients: + client.0: + - suites/iogen.sh diff --git a/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/% b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/0-cluster/start.yaml b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/0-cluster/start.yaml new file mode 100644 index 00000000000..0a85eacad7f --- /dev/null +++ b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/0-cluster/start.yaml @@ -0,0 +1,11 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 + diff --git a/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/1-cuttlefish-install/cuttlefish.yaml b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/1-cuttlefish-install/cuttlefish.yaml new file mode 100644 index 00000000000..50b65f72bca --- /dev/null +++ b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/1-cuttlefish-install/cuttlefish.yaml @@ -0,0 +1,5 @@ +tasks: +- install: + branch: cuttlefish +- ceph: + diff --git a/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/api.yaml b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/api.yaml new file mode 100644 index 00000000000..ad36bddb48c --- /dev/null +++ b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/api.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + client.0: + - rados/test.sh + - cls + diff --git a/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/load-gen-mix.yaml b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/load-gen-mix.yaml new file mode 100644 index 00000000000..7ec655c8ccf --- /dev/null +++ b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/load-gen-mix.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + client.0: + - rados/load-gen-mix.sh diff --git a/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/3-partial-osds-upgrade/dumpling.yaml b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/3-partial-osds-upgrade/dumpling.yaml new file mode 100644 index 00000000000..a9b9bf8418c --- /dev/null +++ b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/3-partial-osds-upgrade/dumpling.yaml @@ -0,0 +1,7 @@ +tasks: +- install.upgrade: + osd.0: + branch: dumpling + osd.2: + branch: dumpling + diff --git a/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/4-osds-restart/restart.yaml b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/4-osds-restart/restart.yaml new file mode 100644 index 00000000000..3a84bbb4074 --- /dev/null +++ b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/4-osds-restart/restart.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.2] diff --git a/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/api.yaml b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/api.yaml new file mode 100644 index 00000000000..ad36bddb48c --- /dev/null +++ b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/api.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + client.0: + - rados/test.sh + - cls + diff --git a/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/load-gen-big.yaml b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/load-gen-big.yaml new file mode 100644 index 00000000000..0f6e616a286 --- /dev/null +++ b/qa/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/load-gen-big.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + client.0: + - rados/load-gen-big.sh diff --git a/qa/suites/upgrade/old/mixed-mons/mixed-mons/% b/qa/suites/upgrade/old/mixed-mons/mixed-mons/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/mixed-mons/mixed-mons/0-cluster/start.yaml b/qa/suites/upgrade/old/mixed-mons/mixed-mons/0-cluster/start.yaml new file mode 100644 index 00000000000..0a85eacad7f --- /dev/null +++ b/qa/suites/upgrade/old/mixed-mons/mixed-mons/0-cluster/start.yaml @@ -0,0 +1,11 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 + diff --git a/qa/suites/upgrade/old/mixed-mons/mixed-mons/1-cuttlefish-install/cuttlefish.yaml b/qa/suites/upgrade/old/mixed-mons/mixed-mons/1-cuttlefish-install/cuttlefish.yaml new file mode 100644 index 00000000000..50b65f72bca --- /dev/null +++ b/qa/suites/upgrade/old/mixed-mons/mixed-mons/1-cuttlefish-install/cuttlefish.yaml @@ -0,0 +1,5 @@ +tasks: +- install: + branch: cuttlefish +- ceph: + diff --git a/qa/suites/upgrade/old/mixed-mons/mixed-mons/2-cuttlefish-workload/cephtool.yaml b/qa/suites/upgrade/old/mixed-mons/mixed-mons/2-cuttlefish-workload/cephtool.yaml new file mode 100644 index 00000000000..8648784fd16 --- /dev/null +++ b/qa/suites/upgrade/old/mixed-mons/mixed-mons/2-cuttlefish-workload/cephtool.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + all: + - cephtool/test.sh + - mon/pool_ops.sh diff --git a/qa/suites/upgrade/old/mixed-mons/mixed-mons/3-partial-mon-upgrade/dumpling.yaml b/qa/suites/upgrade/old/mixed-mons/mixed-mons/3-partial-mon-upgrade/dumpling.yaml new file mode 100644 index 00000000000..6c9d3206f16 --- /dev/null +++ b/qa/suites/upgrade/old/mixed-mons/mixed-mons/3-partial-mon-upgrade/dumpling.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + mon.a: + branch: dumpling diff --git a/qa/suites/upgrade/old/mixed-mons/mixed-mons/4-mon-restart/restart.yaml b/qa/suites/upgrade/old/mixed-mons/mixed-mons/4-mon-restart/restart.yaml new file mode 100644 index 00000000000..b6ffb3323d1 --- /dev/null +++ b/qa/suites/upgrade/old/mixed-mons/mixed-mons/4-mon-restart/restart.yaml @@ -0,0 +1,5 @@ +tasks: +- ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true diff --git a/qa/suites/upgrade/old/mixed-mons/mixed-mons/5-mixed-workload/cephtool.yaml b/qa/suites/upgrade/old/mixed-mons/mixed-mons/5-mixed-workload/cephtool.yaml new file mode 100644 index 00000000000..8648784fd16 --- /dev/null +++ b/qa/suites/upgrade/old/mixed-mons/mixed-mons/5-mixed-workload/cephtool.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + all: + - cephtool/test.sh + - mon/pool_ops.sh diff --git a/qa/suites/upgrade/old/mixed-mons/mixed-mons/6-rest/rest.yaml b/qa/suites/upgrade/old/mixed-mons/mixed-mons/6-rest/rest.yaml new file mode 100644 index 00000000000..18ae735e663 --- /dev/null +++ b/qa/suites/upgrade/old/mixed-mons/mixed-mons/6-rest/rest.yaml @@ -0,0 +1,26 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - had wrong client addr + - had wrong cluster addr +tasks: +- install.upgrade: + mon.b: + branch: dumpling + client.0: + branch: dumpling +- ceph.restart: + daemons: + - mon.b + - mon.c + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- workunit: + branch: dumpling + clients: + all: + - cephtool/test.sh + - mon/pool_ops.sh diff --git a/qa/suites/upgrade/old/parallel/fs/% b/qa/suites/upgrade/old/parallel/fs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/parallel/fs/0-cluster/start.yaml b/qa/suites/upgrade/old/parallel/fs/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/fs/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/parallel/fs/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/old/parallel/fs/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..6d3947abd90 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/fs/1-dumpling-install/dumpling.yaml @@ -0,0 +1,8 @@ +tasks: +- install: + branch: dumpling +- ceph: + fs: xfs +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/old/parallel/fs/2-workload/blogbench.yaml b/qa/suites/upgrade/old/parallel/fs/2-workload/blogbench.yaml new file mode 100644 index 00000000000..0cd59eaafde --- /dev/null +++ b/qa/suites/upgrade/old/parallel/fs/2-workload/blogbench.yaml @@ -0,0 +1,5 @@ +workload: + workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/upgrade/old/parallel/fs/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/old/parallel/fs/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 00000000000..4cb05ce8777 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/fs/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,6 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/parallel/fs/distro b/qa/suites/upgrade/old/parallel/fs/distro new file mode 120000 index 00000000000..3a0ac71c8af --- /dev/null +++ b/qa/suites/upgrade/old/parallel/fs/distro @@ -0,0 +1 @@ +../rados/distro \ No newline at end of file diff --git a/qa/suites/upgrade/old/parallel/rados/% b/qa/suites/upgrade/old/parallel/rados/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/parallel/rados/0-cluster/start.yaml b/qa/suites/upgrade/old/parallel/rados/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/parallel/rados/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/old/parallel/rados/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..f1a09304712 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/1-dumpling-install/dumpling.yaml @@ -0,0 +1,8 @@ +tasks: +- install: + branch: dumpling +- ceph: + fs: xfs +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/old/parallel/rados/2-workload/loadgenbig.yaml b/qa/suites/upgrade/old/parallel/rados/2-workload/loadgenbig.yaml new file mode 100644 index 00000000000..b118459ce90 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/2-workload/loadgenbig.yaml @@ -0,0 +1,6 @@ +workload: + workunit: + branch: dumpling + clients: + all: + - rados/load-gen-big.sh diff --git a/qa/suites/upgrade/old/parallel/rados/2-workload/loadgenmix.yaml b/qa/suites/upgrade/old/parallel/rados/2-workload/loadgenmix.yaml new file mode 100644 index 00000000000..8c7f4c5653a --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/2-workload/loadgenmix.yaml @@ -0,0 +1,6 @@ +workload: + workunit: + branch: dumpling + clients: + client.0: + - rados/load-gen-mix.sh diff --git a/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 00000000000..4cb05ce8777 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,6 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml new file mode 100644 index 00000000000..717f778e458 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 60 + - ceph.restart: [osd.1] + - sleep: + duration: 60 + - ceph.restart: [osd.2] + - sleep: + duration: 60 + - ceph.restart: [osd.3] diff --git a/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000000..8ad7503bb63 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,33 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 60 + - ceph.restart: [osd.1] + - sleep: + duration: 60 + - ceph.restart: [osd.2] + - sleep: + duration: 60 + - ceph.restart: [osd.3] diff --git a/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml new file mode 100644 index 00000000000..a3607dbcb5c --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml @@ -0,0 +1,35 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [osd.0] + - sleep: + duration: 60 + - ceph.restart: [osd.1] + - sleep: + duration: 60 + - ceph.restart: [osd.2] + - sleep: + duration: 60 + - ceph.restart: [osd.3] + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 diff --git a/qa/suites/upgrade/old/parallel/rados/distro/centos_6.4.yaml b/qa/suites/upgrade/old/parallel/rados/distro/centos_6.4.yaml new file mode 100644 index 00000000000..02383cd5f8c --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/distro/centos_6.4.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "6.4" diff --git a/qa/suites/upgrade/old/parallel/rados/distro/debian_7.0.yaml b/qa/suites/upgrade/old/parallel/rados/distro/debian_7.0.yaml new file mode 100644 index 00000000000..8100dc41e3d --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/distro/debian_7.0.yaml @@ -0,0 +1,2 @@ +os_type: debian +os_version: "7.0" diff --git a/qa/suites/upgrade/old/parallel/rados/distro/fedora_18.yaml b/qa/suites/upgrade/old/parallel/rados/distro/fedora_18.yaml new file mode 100644 index 00000000000..07872aa7edf --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/distro/fedora_18.yaml @@ -0,0 +1,2 @@ +os_type: fedora +os_version: "18" diff --git a/qa/suites/upgrade/old/parallel/rados/distro/rhel_6.3.yaml b/qa/suites/upgrade/old/parallel/rados/distro/rhel_6.3.yaml new file mode 100644 index 00000000000..6a8edcd5626 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/distro/rhel_6.3.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "6.3" diff --git a/qa/suites/upgrade/old/parallel/rados/distro/ubuntu_12.04.yaml b/qa/suites/upgrade/old/parallel/rados/distro/ubuntu_12.04.yaml new file mode 100644 index 00000000000..dbc3a8d9c58 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rados/distro/ubuntu_12.04.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "12.04" diff --git a/qa/suites/upgrade/old/parallel/rgw/% b/qa/suites/upgrade/old/parallel/rgw/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/parallel/rgw/0-cluster/start.yaml b/qa/suites/upgrade/old/parallel/rgw/0-cluster/start.yaml new file mode 100644 index 00000000000..5b6d8978cdc --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rgw/0-cluster/start.yaml @@ -0,0 +1,11 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 + - client.1 diff --git a/qa/suites/upgrade/old/parallel/rgw/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/old/parallel/rgw/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..f1a09304712 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rgw/1-dumpling-install/dumpling.yaml @@ -0,0 +1,8 @@ +tasks: +- install: + branch: dumpling +- ceph: + fs: xfs +- parallel: + - workload + - upgrade-sequence diff --git a/qa/suites/upgrade/old/parallel/rgw/2-workload/s3tests.yaml b/qa/suites/upgrade/old/parallel/rgw/2-workload/s3tests.yaml new file mode 100644 index 00000000000..bd91e2c9f65 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rgw/2-workload/s3tests.yaml @@ -0,0 +1,8 @@ +workload: + sequential: + - rgw: [client.0] + - s3tests: + # use older tests when we are running a mix + client.0: + force-branch: dumpling + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/parallel/rgw/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/old/parallel/rgw/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 00000000000..23740967edf --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rgw/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,6 @@ +upgrade-sequence: + sequential: + - install.upgrade: + all: + branch: emperor + - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0] diff --git a/qa/suites/upgrade/old/parallel/rgw/4-final-workload/final.yaml b/qa/suites/upgrade/old/parallel/rgw/4-final-workload/final.yaml new file mode 100644 index 00000000000..fb754ed0ce7 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rgw/4-final-workload/final.yaml @@ -0,0 +1,5 @@ +tasks: +- rgw: [client.1] +- swift: + client.1: + rgw_server: client.1 diff --git a/qa/suites/upgrade/old/parallel/rgw/distro b/qa/suites/upgrade/old/parallel/rgw/distro new file mode 120000 index 00000000000..3a0ac71c8af --- /dev/null +++ b/qa/suites/upgrade/old/parallel/rgw/distro @@ -0,0 +1 @@ +../rados/distro \ No newline at end of file diff --git a/qa/suites/upgrade/old/parallel/stress-split/% b/qa/suites/upgrade/old/parallel/stress-split/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/parallel/stress-split/0-cluster/start.yaml b/qa/suites/upgrade/old/parallel/stress-split/0-cluster/start.yaml new file mode 100644 index 00000000000..89d4b3681a9 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/0-cluster/start.yaml @@ -0,0 +1,12 @@ +roles: +- - mon.a + - mon.b + - mds.a + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - mon.c +- - client.0 diff --git a/qa/suites/upgrade/old/parallel/stress-split/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/old/parallel/stress-split/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..c98631e2bbd --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/1-dumpling-install/dumpling.yaml @@ -0,0 +1,5 @@ +tasks: +- install: + branch: dumpling +- ceph: + fs: xfs diff --git a/qa/suites/upgrade/old/parallel/stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/old/parallel/stress-split/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 00000000000..68c9d44b7c3 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,5 @@ +tasks: +- install.upgrade: + osd.0: +- ceph.restart: + daemons: [osd.0, osd.1, osd.2] diff --git a/qa/suites/upgrade/old/parallel/stress-split/3-thrash/default.yaml b/qa/suites/upgrade/old/parallel/stress-split/3-thrash/default.yaml new file mode 100644 index 00000000000..21d4c752075 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/3-thrash/default.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 diff --git a/qa/suites/upgrade/old/parallel/stress-split/4-mon/mona.yaml b/qa/suites/upgrade/old/parallel/stress-split/4-mon/mona.yaml new file mode 100644 index 00000000000..b6ffb3323d1 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/4-mon/mona.yaml @@ -0,0 +1,5 @@ +tasks: +- ceph.restart: + daemons: [mon.a] + wait-for-healthy: false + wait-for-osds-up: true diff --git a/qa/suites/upgrade/old/parallel/stress-split/5-workload/rados_api_tests.yaml b/qa/suites/upgrade/old/parallel/stress-split/5-workload/rados_api_tests.yaml new file mode 100644 index 00000000000..5797c2f292a --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/5-workload/rados_api_tests.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/upgrade/old/parallel/stress-split/5-workload/radosbench.yaml b/qa/suites/upgrade/old/parallel/stress-split/5-workload/radosbench.yaml new file mode 100644 index 00000000000..3940870fce0 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/5-workload/radosbench.yaml @@ -0,0 +1,4 @@ +tasks: +- radosbench: + clients: [client.0] + time: 1800 diff --git a/qa/suites/upgrade/old/parallel/stress-split/5-workload/readwrite.yaml b/qa/suites/upgrade/old/parallel/stress-split/5-workload/readwrite.yaml new file mode 100644 index 00000000000..c53e52b0872 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/5-workload/readwrite.yaml @@ -0,0 +1,9 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 45 + write: 45 + delete: 10 diff --git a/qa/suites/upgrade/old/parallel/stress-split/5-workload/snaps-few-objects.yaml b/qa/suites/upgrade/old/parallel/stress-split/5-workload/snaps-few-objects.yaml new file mode 100644 index 00000000000..c54039766c0 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/5-workload/snaps-few-objects.yaml @@ -0,0 +1,12 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/old/parallel/stress-split/5-workload/snaps-many-objects.yaml b/qa/suites/upgrade/old/parallel/stress-split/5-workload/snaps-many-objects.yaml new file mode 100644 index 00000000000..9e311c946e1 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/5-workload/snaps-many-objects.yaml @@ -0,0 +1,12 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/old/parallel/stress-split/6-next-mon/monb.yaml b/qa/suites/upgrade/old/parallel/stress-split/6-next-mon/monb.yaml new file mode 100644 index 00000000000..513890c41c0 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/6-next-mon/monb.yaml @@ -0,0 +1,5 @@ +tasks: +- ceph.restart: + daemons: [mon.b] + wait-for-healthy: false + wait-for-osds-up: true diff --git a/qa/suites/upgrade/old/parallel/stress-split/7-workload/rados_api_tests.yaml b/qa/suites/upgrade/old/parallel/stress-split/7-workload/rados_api_tests.yaml new file mode 100644 index 00000000000..5797c2f292a --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/7-workload/rados_api_tests.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/upgrade/old/parallel/stress-split/8-next-mon/monc.yaml b/qa/suites/upgrade/old/parallel/stress-split/8-next-mon/monc.yaml new file mode 100644 index 00000000000..28acc466907 --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/8-next-mon/monc.yaml @@ -0,0 +1,8 @@ +tasks: +- install.upgrade: + mon.c: null +- ceph.restart: + daemons: [mon.c] + wait-for-healthy: false + wait-for-osds-up: true +- ceph.wait_for_mon_quorum: [a, b, c] diff --git a/qa/suites/upgrade/old/parallel/stress-split/9-workload/rados_api_tests.yaml b/qa/suites/upgrade/old/parallel/stress-split/9-workload/rados_api_tests.yaml new file mode 100644 index 00000000000..5797c2f292a --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/9-workload/rados_api_tests.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/upgrade/old/parallel/stress-split/distro b/qa/suites/upgrade/old/parallel/stress-split/distro new file mode 120000 index 00000000000..3a0ac71c8af --- /dev/null +++ b/qa/suites/upgrade/old/parallel/stress-split/distro @@ -0,0 +1 @@ +../rados/distro \ No newline at end of file diff --git a/qa/suites/upgrade/old/rados-double/rados-double/% b/qa/suites/upgrade/old/rados-double/rados-double/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/rados-double/rados-double/0-cluster/start.yaml b/qa/suites/upgrade/old/rados-double/rados-double/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/rados-double/rados-double/1-bobtail-install/bobtail.yaml b/qa/suites/upgrade/old/rados-double/rados-double/1-bobtail-install/bobtail.yaml new file mode 100644 index 00000000000..c676a5582d5 --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/1-bobtail-install/bobtail.yaml @@ -0,0 +1,4 @@ +tasks: +- install: + branch: bobtail +- ceph: diff --git a/qa/suites/upgrade/old/rados-double/rados-double/2-bobtail-workload/api.yaml b/qa/suites/upgrade/old/rados-double/rados-double/2-bobtail-workload/api.yaml new file mode 100644 index 00000000000..637b7a8be97 --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/2-bobtail-workload/api.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: bobtail + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/upgrade/old/rados-double/rados-double/3-upgrade/dumpling.yaml b/qa/suites/upgrade/old/rados-double/rados-double/3-upgrade/dumpling.yaml new file mode 100644 index 00000000000..e3e332c4f22 --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/3-upgrade/dumpling.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: dumpling diff --git a/qa/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_mon_mds_osd.yaml b/qa/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_mon_mds_osd.yaml new file mode 100644 index 00000000000..78e14e9472a --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_mon_mds_osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_osd_mds_mon.yaml b/qa/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_osd_mds_mon.yaml new file mode 100644 index 00000000000..dbcd013b3f0 --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_osd_mds_mon.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/qa/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/api.yaml b/qa/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/api.yaml new file mode 100644 index 00000000000..b091ecc2090 --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/api.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rados/test.sh + - cls diff --git a/qa/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/load-gen-mix.yaml b/qa/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/load-gen-mix.yaml new file mode 100644 index 00000000000..e89d1f5534c --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/load-gen-mix.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rados/load-gen-mix.sh diff --git a/qa/suites/upgrade/old/rados-double/rados-double/6-upgrade-next/next.yaml b/qa/suites/upgrade/old/rados-double/rados-double/6-upgrade-next/next.yaml new file mode 100644 index 00000000000..bb34346801c --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/6-upgrade-next/next.yaml @@ -0,0 +1,3 @@ +tasks: +- install.upgrade: + all: diff --git a/qa/suites/upgrade/old/rados-double/rados-double/7-restart/mon-mds-osd.yaml b/qa/suites/upgrade/old/rados-double/rados-double/7-restart/mon-mds-osd.yaml new file mode 100644 index 00000000000..78e14e9472a --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/7-restart/mon-mds-osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/rados-double/rados-double/7-restart/osd-mds-mon.yaml b/qa/suites/upgrade/old/rados-double/rados-double/7-restart/osd-mds-mon.yaml new file mode 100644 index 00000000000..dbcd013b3f0 --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/7-restart/osd-mds-mon.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/qa/suites/upgrade/old/rados-double/rados-double/8-next-workload/api.yaml b/qa/suites/upgrade/old/rados-double/rados-double/8-next-workload/api.yaml new file mode 100644 index 00000000000..9b9f1f2e675 --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/8-next-workload/api.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + client.0: + - rados/test.sh + - cls diff --git a/qa/suites/upgrade/old/rados-double/rados-double/8-next-workload/snaps-few-objects.yaml b/qa/suites/upgrade/old/rados-double/rados-double/8-next-workload/snaps-few-objects.yaml new file mode 100644 index 00000000000..aa82d973ae1 --- /dev/null +++ b/qa/suites/upgrade/old/rados-double/rados-double/8-next-workload/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/upgrade/old/rados/rados/% b/qa/suites/upgrade/old/rados/rados/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/rados/rados/0-cluster/start.yaml b/qa/suites/upgrade/old/rados/rados/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/rados/rados/1-cuttlefish-install/cuttlefish.yaml b/qa/suites/upgrade/old/rados/rados/1-cuttlefish-install/cuttlefish.yaml new file mode 100644 index 00000000000..b259af97269 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/1-cuttlefish-install/cuttlefish.yaml @@ -0,0 +1,4 @@ +tasks: +- install: + branch: cuttlefish +- ceph: diff --git a/qa/suites/upgrade/old/rados/rados/2-cuttlefish-workload/api.yaml b/qa/suites/upgrade/old/rados/rados/2-cuttlefish-workload/api.yaml new file mode 100644 index 00000000000..66526582579 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/2-cuttlefish-workload/api.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + client.0: + - rados/test.sh + - cls diff --git a/qa/suites/upgrade/old/rados/rados/2-cuttlefish-workload/load-gen-mix.yaml b/qa/suites/upgrade/old/rados/rados/2-cuttlefish-workload/load-gen-mix.yaml new file mode 100644 index 00000000000..7ec655c8ccf --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/2-cuttlefish-workload/load-gen-mix.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: cuttlefish + clients: + client.0: + - rados/load-gen-mix.sh diff --git a/qa/suites/upgrade/old/rados/rados/3-upgrade/dumpling.yaml b/qa/suites/upgrade/old/rados/rados/3-upgrade/dumpling.yaml new file mode 100644 index 00000000000..e3e332c4f22 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/3-upgrade/dumpling.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: dumpling diff --git a/qa/suites/upgrade/old/rados/rados/4-restart/upgrade_mds_mon_osd.yaml b/qa/suites/upgrade/old/rados/rados/4-restart/upgrade_mds_mon_osd.yaml new file mode 100644 index 00000000000..d21800684d3 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/4-restart/upgrade_mds_mon_osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/rados/rados/4-restart/upgrade_mon_mds_osd.yaml b/qa/suites/upgrade/old/rados/rados/4-restart/upgrade_mon_mds_osd.yaml new file mode 100644 index 00000000000..78e14e9472a --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/4-restart/upgrade_mon_mds_osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/rados/rados/4-restart/upgrade_osd_mon_mds.yaml b/qa/suites/upgrade/old/rados/rados/4-restart/upgrade_osd_mon_mds.yaml new file mode 100644 index 00000000000..e8fe288f657 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/4-restart/upgrade_osd_mon_mds.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a] diff --git a/qa/suites/upgrade/old/rados/rados/5-dumpling-workload/api.yaml b/qa/suites/upgrade/old/rados/rados/5-dumpling-workload/api.yaml new file mode 100644 index 00000000000..b091ecc2090 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/5-dumpling-workload/api.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rados/test.sh + - cls diff --git a/qa/suites/upgrade/old/rados/rados/5-dumpling-workload/snaps-few-objects.yaml b/qa/suites/upgrade/old/rados/rados/5-dumpling-workload/snaps-few-objects.yaml new file mode 100644 index 00000000000..c54039766c0 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/5-dumpling-workload/snaps-few-objects.yaml @@ -0,0 +1,12 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/old/rados/rados/6-upgrade-emp/emperor.yaml b/qa/suites/upgrade/old/rados/rados/6-upgrade-emp/emperor.yaml new file mode 100644 index 00000000000..e473f31862d --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/6-upgrade-emp/emperor.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: emperor diff --git a/qa/suites/upgrade/old/rados/rados/7-restart/upgrade_mds_mon_osd.yaml b/qa/suites/upgrade/old/rados/rados/7-restart/upgrade_mds_mon_osd.yaml new file mode 100644 index 00000000000..d21800684d3 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/7-restart/upgrade_mds_mon_osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/rados/rados/7-restart/upgrade_mon_mds_osd.yaml b/qa/suites/upgrade/old/rados/rados/7-restart/upgrade_mon_mds_osd.yaml new file mode 100644 index 00000000000..78e14e9472a --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/7-restart/upgrade_mon_mds_osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/rados/rados/7-restart/upgrade_osd_mds_mon.yaml b/qa/suites/upgrade/old/rados/rados/7-restart/upgrade_osd_mds_mon.yaml new file mode 100644 index 00000000000..dbcd013b3f0 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/7-restart/upgrade_osd_mds_mon.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/qa/suites/upgrade/old/rados/rados/8-emperor-workload/api.yaml b/qa/suites/upgrade/old/rados/rados/8-emperor-workload/api.yaml new file mode 100644 index 00000000000..29a4be13fb6 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/8-emperor-workload/api.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + branch: emperor + clients: + client.0: + - rados/test.sh + - cls diff --git a/qa/suites/upgrade/old/rados/rados/8-emperor-workload/snaps-few-objects.yaml b/qa/suites/upgrade/old/rados/rados/8-emperor-workload/snaps-few-objects.yaml new file mode 100644 index 00000000000..c54039766c0 --- /dev/null +++ b/qa/suites/upgrade/old/rados/rados/8-emperor-workload/snaps-few-objects.yaml @@ -0,0 +1,12 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/% b/qa/suites/upgrade/old/rbd-double/rbd-double/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/0-cluster/start.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/1-bobtail-install/bobtail.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/1-bobtail-install/bobtail.yaml new file mode 100644 index 00000000000..c676a5582d5 --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/1-bobtail-install/bobtail.yaml @@ -0,0 +1,4 @@ +tasks: +- install: + branch: bobtail +- ceph: diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/2-bobtail-workload/import_export.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/2-bobtail-workload/import_export.yaml new file mode 100644 index 00000000000..9123db83bbb --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/2-bobtail-workload/import_export.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + branch: bobtail + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/3-upgrade/dumpling.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/3-upgrade/dumpling.yaml new file mode 100644 index 00000000000..e3e332c4f22 --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/3-upgrade/dumpling.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: dumpling diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_mon_mds_osd.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_mon_mds_osd.yaml new file mode 100644 index 00000000000..78e14e9472a --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_mon_mds_osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_osd_mds_mon.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_osd_mds_mon.yaml new file mode 100644 index 00000000000..dbcd013b3f0 --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_osd_mds_mon.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/api.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/api.yaml new file mode 100644 index 00000000000..bbcde3e1559 --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/api.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd.sh diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/cls.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/cls.yaml new file mode 100644 index 00000000000..c8079e3dcdb --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/cls.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - cls/test_cls_rbd.sh diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/6-upgrade-next/next.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/6-upgrade-next/next.yaml new file mode 100644 index 00000000000..bb34346801c --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/6-upgrade-next/next.yaml @@ -0,0 +1,3 @@ +tasks: +- install.upgrade: + all: diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/7-restart/mon-mds-osd.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/7-restart/mon-mds-osd.yaml new file mode 100644 index 00000000000..78e14e9472a --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/7-restart/mon-mds-osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/7-restart/osd-mds-mon.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/7-restart/osd-mds-mon.yaml new file mode 100644 index 00000000000..dbcd013b3f0 --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/7-restart/osd-mds-mon.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/import-export.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/import-export.yaml new file mode 100644 index 00000000000..ae44a873829 --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/import-export.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/python.yaml b/qa/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/python.yaml new file mode 100644 index 00000000000..5c6df6e38dd --- /dev/null +++ b/qa/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/python.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh diff --git a/qa/suites/upgrade/old/rbd/rbd/% b/qa/suites/upgrade/old/rbd/rbd/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/rbd/rbd/0-cluster/start.yaml b/qa/suites/upgrade/old/rbd/rbd/0-cluster/start.yaml new file mode 100644 index 00000000000..cd071f9cff4 --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/0-cluster/start.yaml @@ -0,0 +1,14 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 +tasks: +- install: + branch: bobtail +- ceph: diff --git a/qa/suites/upgrade/old/rbd/rbd/1-bobtail-workload/import_export.yaml b/qa/suites/upgrade/old/rbd/rbd/1-bobtail-workload/import_export.yaml new file mode 100644 index 00000000000..9123db83bbb --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/1-bobtail-workload/import_export.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + branch: bobtail + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/upgrade/old/rbd/rbd/2-upgrade-to-dumpling/upgrade.yaml b/qa/suites/upgrade/old/rbd/rbd/2-upgrade-to-dumpling/upgrade.yaml new file mode 100644 index 00000000000..e3e332c4f22 --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/2-upgrade-to-dumpling/upgrade.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: dumpling diff --git a/qa/suites/upgrade/old/rbd/rbd/3-restart/mon-osd-mds.yaml b/qa/suites/upgrade/old/rbd/rbd/3-restart/mon-osd-mds.yaml new file mode 100644 index 00000000000..31a79e45938 --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/3-restart/mon-osd-mds.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a] diff --git a/qa/suites/upgrade/old/rbd/rbd/3-restart/osd-mon-mds.yaml b/qa/suites/upgrade/old/rbd/rbd/3-restart/osd-mon-mds.yaml new file mode 100644 index 00000000000..e8fe288f657 --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/3-restart/osd-mon-mds.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a] diff --git a/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/api.yaml b/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/api.yaml new file mode 100644 index 00000000000..bbcde3e1559 --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/api.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd.sh diff --git a/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/cls.yaml b/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/cls.yaml new file mode 100644 index 00000000000..c8079e3dcdb --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/cls.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - cls/test_cls_rbd.sh diff --git a/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/import-export.yaml b/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/import-export.yaml new file mode 100644 index 00000000000..364ef25f31c --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/import-export.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/python.yaml b/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/python.yaml new file mode 100644 index 00000000000..737a821f776 --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/4-dumpling-workload/python.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rbd/test_librbd_python.sh diff --git a/qa/suites/upgrade/old/rbd/rbd/5-upgrade-to-emperor/upgrade.yaml b/qa/suites/upgrade/old/rbd/rbd/5-upgrade-to-emperor/upgrade.yaml new file mode 100644 index 00000000000..e473f31862d --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/5-upgrade-to-emperor/upgrade.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: emperor diff --git a/qa/suites/upgrade/old/rbd/rbd/6-restart/mon-osd-mds.yaml b/qa/suites/upgrade/old/rbd/rbd/6-restart/mon-osd-mds.yaml new file mode 100644 index 00000000000..31a79e45938 --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/6-restart/mon-osd-mds.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a] diff --git a/qa/suites/upgrade/old/rbd/rbd/6-restart/osd-mon-mds.yaml b/qa/suites/upgrade/old/rbd/rbd/6-restart/osd-mon-mds.yaml new file mode 100644 index 00000000000..e8fe288f657 --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/6-restart/osd-mon-mds.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a] diff --git a/qa/suites/upgrade/old/rbd/rbd/7-emperor-workload/import_export.yaml b/qa/suites/upgrade/old/rbd/rbd/7-emperor-workload/import_export.yaml new file mode 100644 index 00000000000..e29788766b6 --- /dev/null +++ b/qa/suites/upgrade/old/rbd/rbd/7-emperor-workload/import_export.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + branch: emperor + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/% b/qa/suites/upgrade/old/rgw-double/rgw-double/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/0-cluster/start.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/1-bobtail-install/bobtail.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/1-bobtail-install/bobtail.yaml new file mode 100644 index 00000000000..ca81c710bd1 --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/1-bobtail-install/bobtail.yaml @@ -0,0 +1,5 @@ +tasks: +- install: + branch: bobtail +- ceph: +- rgw: diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/2-bobtail-workload/s3tests.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/2-bobtail-workload/s3tests.yaml new file mode 100644 index 00000000000..7397ae6873e --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/2-bobtail-workload/s3tests.yaml @@ -0,0 +1,5 @@ +tasks: +- s3tests: + client.0: + force-branch: bobtail + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/3-upgrade/dumpling.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/3-upgrade/dumpling.yaml new file mode 100644 index 00000000000..e3e332c4f22 --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/3-upgrade/dumpling.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: dumpling diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_mon_mds_osd.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_mon_mds_osd.yaml new file mode 100644 index 00000000000..86665905d67 --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_mon_mds_osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0] diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_osd_mds_mon.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_osd_mds_mon.yaml new file mode 100644 index 00000000000..425cf6082a3 --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_osd_mds_mon.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c, rgw.client.0] diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/readwrite.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/readwrite.yaml new file mode 100644 index 00000000000..d3166f117da --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/readwrite.yaml @@ -0,0 +1,13 @@ +tasks: +- s3readwrite: + client.0: + rgw_server: client.0 + readwrite: + bucket: rwtest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/s3tests.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/s3tests.yaml new file mode 100644 index 00000000000..6506960f73a --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/s3tests.yaml @@ -0,0 +1,5 @@ +tasks: +- s3tests: + client.0: + force-branch: dumpling + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/swift.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/swift.yaml new file mode 100644 index 00000000000..45e2fc9cc30 --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/swift.yaml @@ -0,0 +1,4 @@ +tasks: +- swift: + client.0: + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/6-upgrade-next/next.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/6-upgrade-next/next.yaml new file mode 100644 index 00000000000..bb34346801c --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/6-upgrade-next/next.yaml @@ -0,0 +1,3 @@ +tasks: +- install.upgrade: + all: diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/7-restart/mon-mds-osd.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/7-restart/mon-mds-osd.yaml new file mode 100644 index 00000000000..86665905d67 --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/7-restart/mon-mds-osd.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0] diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/7-restart/osd-mds-mon.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/7-restart/osd-mds-mon.yaml new file mode 100644 index 00000000000..425cf6082a3 --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/7-restart/osd-mds-mon.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c, rgw.client.0] diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/readwrite.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/readwrite.yaml new file mode 100644 index 00000000000..d3166f117da --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/readwrite.yaml @@ -0,0 +1,13 @@ +tasks: +- s3readwrite: + client.0: + rgw_server: client.0 + readwrite: + bucket: rwtest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/s3tests.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/s3tests.yaml new file mode 100644 index 00000000000..573cffbc30a --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/s3tests.yaml @@ -0,0 +1,4 @@ +tasks: +- s3tests: + client.0: + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/swift.yaml b/qa/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/swift.yaml new file mode 100644 index 00000000000..45e2fc9cc30 --- /dev/null +++ b/qa/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/swift.yaml @@ -0,0 +1,4 @@ +tasks: +- swift: + client.0: + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/rgw/rgw/% b/qa/suites/upgrade/old/rgw/rgw/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/rgw/rgw/0-cluster/start.yaml b/qa/suites/upgrade/old/rgw/rgw/0-cluster/start.yaml new file mode 100644 index 00000000000..8b1ebbe2c36 --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/0-cluster/start.yaml @@ -0,0 +1,20 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 + - client.1 +- - client.0 +tasks: +- install: + branch: bobtail +- ceph: + conf: + client: + client mount timeout: 600 + rgw init timeout: 600 +- rgw: [client.0] diff --git a/qa/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3readwrite.yaml b/qa/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3readwrite.yaml new file mode 100644 index 00000000000..d3166f117da --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3readwrite.yaml @@ -0,0 +1,13 @@ +tasks: +- s3readwrite: + client.0: + rgw_server: client.0 + readwrite: + bucket: rwtest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3tests.yaml b/qa/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3tests.yaml new file mode 100644 index 00000000000..8020d793c37 --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3tests.yaml @@ -0,0 +1,5 @@ +tasks: +- s3tests: + client.0: + rgw_server: client.0 + force-branch: bobtail diff --git a/qa/suites/upgrade/old/rgw/rgw/1-bobtail-workload/swift.yaml b/qa/suites/upgrade/old/rgw/rgw/1-bobtail-workload/swift.yaml new file mode 100644 index 00000000000..45e2fc9cc30 --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/1-bobtail-workload/swift.yaml @@ -0,0 +1,4 @@ +tasks: +- swift: + client.0: + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/rgw/rgw/2-upgrade-to-dumpling/upgrade.yaml b/qa/suites/upgrade/old/rgw/rgw/2-upgrade-to-dumpling/upgrade.yaml new file mode 100644 index 00000000000..e3e332c4f22 --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/2-upgrade-to-dumpling/upgrade.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: dumpling diff --git a/qa/suites/upgrade/old/rgw/rgw/3-restart/mon-osd-mds.yaml b/qa/suites/upgrade/old/rgw/rgw/3-restart/mon-osd-mds.yaml new file mode 100644 index 00000000000..ea8a58ccdae --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/3-restart/mon-osd-mds.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a, rgw.client.0] diff --git a/qa/suites/upgrade/old/rgw/rgw/3-restart/osd-mon-mds.yaml b/qa/suites/upgrade/old/rgw/rgw/3-restart/osd-mon-mds.yaml new file mode 100644 index 00000000000..f9606ef70cc --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/3-restart/osd-mon-mds.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0] diff --git a/qa/suites/upgrade/old/rgw/rgw/4-dumpling-workload/readwrite.yaml b/qa/suites/upgrade/old/rgw/rgw/4-dumpling-workload/readwrite.yaml new file mode 100644 index 00000000000..d3166f117da --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/4-dumpling-workload/readwrite.yaml @@ -0,0 +1,13 @@ +tasks: +- s3readwrite: + client.0: + rgw_server: client.0 + readwrite: + bucket: rwtest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/upgrade/old/rgw/rgw/4-dumpling-workload/s3tests.yaml b/qa/suites/upgrade/old/rgw/rgw/4-dumpling-workload/s3tests.yaml new file mode 100644 index 00000000000..6506960f73a --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/4-dumpling-workload/s3tests.yaml @@ -0,0 +1,5 @@ +tasks: +- s3tests: + client.0: + force-branch: dumpling + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/rgw/rgw/4-dumpling-workload/swift.yaml b/qa/suites/upgrade/old/rgw/rgw/4-dumpling-workload/swift.yaml new file mode 100644 index 00000000000..45e2fc9cc30 --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/4-dumpling-workload/swift.yaml @@ -0,0 +1,4 @@ +tasks: +- swift: + client.0: + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/rgw/rgw/5-upgrade-to-emperor/upgrade.yaml b/qa/suites/upgrade/old/rgw/rgw/5-upgrade-to-emperor/upgrade.yaml new file mode 100644 index 00000000000..e473f31862d --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/5-upgrade-to-emperor/upgrade.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: emperor diff --git a/qa/suites/upgrade/old/rgw/rgw/6-restart/mon-osd-mds.yaml b/qa/suites/upgrade/old/rgw/rgw/6-restart/mon-osd-mds.yaml new file mode 100644 index 00000000000..ea8a58ccdae --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/6-restart/mon-osd-mds.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a, rgw.client.0] diff --git a/qa/suites/upgrade/old/rgw/rgw/6-restart/osd-mon-mds.yaml b/qa/suites/upgrade/old/rgw/rgw/6-restart/osd-mon-mds.yaml new file mode 100644 index 00000000000..f9606ef70cc --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/6-restart/osd-mon-mds.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0] diff --git a/qa/suites/upgrade/old/rgw/rgw/7-emperor-workload/readwrite.yaml b/qa/suites/upgrade/old/rgw/rgw/7-emperor-workload/readwrite.yaml new file mode 100644 index 00000000000..d3166f117da --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/7-emperor-workload/readwrite.yaml @@ -0,0 +1,13 @@ +tasks: +- s3readwrite: + client.0: + rgw_server: client.0 + readwrite: + bucket: rwtest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/upgrade/old/rgw/rgw/7-emperor-workload/s3tests.yaml b/qa/suites/upgrade/old/rgw/rgw/7-emperor-workload/s3tests.yaml new file mode 100644 index 00000000000..cc9675c2ade --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/7-emperor-workload/s3tests.yaml @@ -0,0 +1,5 @@ +tasks: +- s3tests: + client.0: + force-branch: emperor + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/rgw/rgw/7-emperor-workload/swift.yaml b/qa/suites/upgrade/old/rgw/rgw/7-emperor-workload/swift.yaml new file mode 100644 index 00000000000..45e2fc9cc30 --- /dev/null +++ b/qa/suites/upgrade/old/rgw/rgw/7-emperor-workload/swift.yaml @@ -0,0 +1,4 @@ +tasks: +- swift: + client.0: + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/small/fs/% b/qa/suites/upgrade/old/small/fs/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/small/fs/0-cluster/start.yaml b/qa/suites/upgrade/old/small/fs/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/small/fs/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/small/fs/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/old/small/fs/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..d99595e47de --- /dev/null +++ b/qa/suites/upgrade/old/small/fs/1-dumpling-install/dumpling.yaml @@ -0,0 +1,6 @@ +tasks: +- install: + branch: dumpling +- ceph: + fs: xfs +- ceph-fuse: diff --git a/qa/suites/upgrade/old/small/fs/2-workload/blogbench.yaml b/qa/suites/upgrade/old/small/fs/2-workload/blogbench.yaml new file mode 100644 index 00000000000..edf71708b6d --- /dev/null +++ b/qa/suites/upgrade/old/small/fs/2-workload/blogbench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/upgrade/old/small/fs/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/old/small/fs/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 00000000000..e473f31862d --- /dev/null +++ b/qa/suites/upgrade/old/small/fs/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: emperor diff --git a/qa/suites/upgrade/old/small/fs/4-restart/restart.yaml b/qa/suites/upgrade/old/small/fs/4-restart/restart.yaml new file mode 100644 index 00000000000..4290b2b9f98 --- /dev/null +++ b/qa/suites/upgrade/old/small/fs/4-restart/restart.yaml @@ -0,0 +1,3 @@ +tasks: +- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] + diff --git a/qa/suites/upgrade/old/small/fs/5-emperor-workload/emperor.yaml b/qa/suites/upgrade/old/small/fs/5-emperor-workload/emperor.yaml new file mode 100644 index 00000000000..3f54d542976 --- /dev/null +++ b/qa/suites/upgrade/old/small/fs/5-emperor-workload/emperor.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: emperor + clients: + client.0: + - suites/dbench.sh diff --git a/qa/suites/upgrade/old/small/fs/distro b/qa/suites/upgrade/old/small/fs/distro new file mode 120000 index 00000000000..3a0ac71c8af --- /dev/null +++ b/qa/suites/upgrade/old/small/fs/distro @@ -0,0 +1 @@ +../rados/distro \ No newline at end of file diff --git a/qa/suites/upgrade/old/small/rados/% b/qa/suites/upgrade/old/small/rados/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/small/rados/0-cluster/start.yaml b/qa/suites/upgrade/old/small/rados/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/small/rados/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/old/small/rados/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..c98631e2bbd --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/1-dumpling-install/dumpling.yaml @@ -0,0 +1,5 @@ +tasks: +- install: + branch: dumpling +- ceph: + fs: xfs diff --git a/qa/suites/upgrade/old/small/rados/2-workload/loadgenbig.yaml b/qa/suites/upgrade/old/small/rados/2-workload/loadgenbig.yaml new file mode 100644 index 00000000000..9c5c2c71786 --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/2-workload/loadgenbig.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: dumpling + clients: + all: + - rados/load-gen-big.sh diff --git a/qa/suites/upgrade/old/small/rados/3-upgrade-sequence/upgrade.yaml b/qa/suites/upgrade/old/small/rados/3-upgrade-sequence/upgrade.yaml new file mode 100644 index 00000000000..e473f31862d --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/3-upgrade-sequence/upgrade.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: emperor diff --git a/qa/suites/upgrade/old/small/rados/4-restart/restart.yaml b/qa/suites/upgrade/old/small/rados/4-restart/restart.yaml new file mode 100644 index 00000000000..78e14e9472a --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/4-restart/restart.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/qa/suites/upgrade/old/small/rados/5-emperor-workload/emperor.yaml b/qa/suites/upgrade/old/small/rados/5-emperor-workload/emperor.yaml new file mode 100644 index 00000000000..58d439c7da9 --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/5-emperor-workload/emperor.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: emperor + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/upgrade/old/small/rados/distro/centos_6.4.yaml b/qa/suites/upgrade/old/small/rados/distro/centos_6.4.yaml new file mode 100644 index 00000000000..02383cd5f8c --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/distro/centos_6.4.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "6.4" diff --git a/qa/suites/upgrade/old/small/rados/distro/debian_7.0.yaml b/qa/suites/upgrade/old/small/rados/distro/debian_7.0.yaml new file mode 100644 index 00000000000..8100dc41e3d --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/distro/debian_7.0.yaml @@ -0,0 +1,2 @@ +os_type: debian +os_version: "7.0" diff --git a/qa/suites/upgrade/old/small/rados/distro/fedora_18.yaml b/qa/suites/upgrade/old/small/rados/distro/fedora_18.yaml new file mode 100644 index 00000000000..07872aa7edf --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/distro/fedora_18.yaml @@ -0,0 +1,2 @@ +os_type: fedora +os_version: "18" diff --git a/qa/suites/upgrade/old/small/rados/distro/rhel_6.3.yaml b/qa/suites/upgrade/old/small/rados/distro/rhel_6.3.yaml new file mode 100644 index 00000000000..6a8edcd5626 --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/distro/rhel_6.3.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "6.3" diff --git a/qa/suites/upgrade/old/small/rados/distro/rhel_6.4.yaml b/qa/suites/upgrade/old/small/rados/distro/rhel_6.4.yaml new file mode 100644 index 00000000000..5225495834a --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/distro/rhel_6.4.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "6.4" diff --git a/qa/suites/upgrade/old/small/rados/distro/ubuntu_12.04.yaml b/qa/suites/upgrade/old/small/rados/distro/ubuntu_12.04.yaml new file mode 100644 index 00000000000..dbc3a8d9c58 --- /dev/null +++ b/qa/suites/upgrade/old/small/rados/distro/ubuntu_12.04.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "12.04" diff --git a/qa/suites/upgrade/old/small/rbd/% b/qa/suites/upgrade/old/small/rbd/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/small/rbd/0-cluster/start.yaml b/qa/suites/upgrade/old/small/rbd/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/small/rbd/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/small/rbd/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/old/small/rbd/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..c98631e2bbd --- /dev/null +++ b/qa/suites/upgrade/old/small/rbd/1-dumpling-install/dumpling.yaml @@ -0,0 +1,5 @@ +tasks: +- install: + branch: dumpling +- ceph: + fs: xfs diff --git a/qa/suites/upgrade/old/small/rbd/2-workload/workload.yaml b/qa/suites/upgrade/old/small/rbd/2-workload/workload.yaml new file mode 100644 index 00000000000..364ef25f31c --- /dev/null +++ b/qa/suites/upgrade/old/small/rbd/2-workload/workload.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + branch: dumpling + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/upgrade/old/small/rbd/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/old/small/rbd/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 00000000000..e473f31862d --- /dev/null +++ b/qa/suites/upgrade/old/small/rbd/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: emperor diff --git a/qa/suites/upgrade/old/small/rbd/4-restart/restart.yaml b/qa/suites/upgrade/old/small/rbd/4-restart/restart.yaml new file mode 100644 index 00000000000..e8fe288f657 --- /dev/null +++ b/qa/suites/upgrade/old/small/rbd/4-restart/restart.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a] diff --git a/qa/suites/upgrade/old/small/rbd/5-emperor-workload/final.yaml b/qa/suites/upgrade/old/small/rbd/5-emperor-workload/final.yaml new file mode 100644 index 00000000000..19b5bd3b557 --- /dev/null +++ b/qa/suites/upgrade/old/small/rbd/5-emperor-workload/final.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + branch: emperor + clients: + client.0: + - cls/test_cls_rbd.sh diff --git a/qa/suites/upgrade/old/small/rbd/distro b/qa/suites/upgrade/old/small/rbd/distro new file mode 120000 index 00000000000..3a0ac71c8af --- /dev/null +++ b/qa/suites/upgrade/old/small/rbd/distro @@ -0,0 +1 @@ +../rados/distro \ No newline at end of file diff --git a/qa/suites/upgrade/old/small/rgw/% b/qa/suites/upgrade/old/small/rgw/% new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/suites/upgrade/old/small/rgw/0-cluster/start.yaml b/qa/suites/upgrade/old/small/rgw/0-cluster/start.yaml new file mode 100644 index 00000000000..01747e42056 --- /dev/null +++ b/qa/suites/upgrade/old/small/rgw/0-cluster/start.yaml @@ -0,0 +1,10 @@ +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mon.c + - osd.2 + - osd.3 +- - client.0 diff --git a/qa/suites/upgrade/old/small/rgw/1-dumpling-install/dumpling.yaml b/qa/suites/upgrade/old/small/rgw/1-dumpling-install/dumpling.yaml new file mode 100644 index 00000000000..fe05a61ff31 --- /dev/null +++ b/qa/suites/upgrade/old/small/rgw/1-dumpling-install/dumpling.yaml @@ -0,0 +1,6 @@ +tasks: +- install: + branch: dumpling +- ceph: + fs: xfs +- rgw: [client.0] diff --git a/qa/suites/upgrade/old/small/rgw/2-workload/s3tests.yaml b/qa/suites/upgrade/old/small/rgw/2-workload/s3tests.yaml new file mode 100644 index 00000000000..6e7449ebcd8 --- /dev/null +++ b/qa/suites/upgrade/old/small/rgw/2-workload/s3tests.yaml @@ -0,0 +1,5 @@ +tasks: +- s3tests: + client.0: + rgw_server: client.0 + force-branch: dumpling diff --git a/qa/suites/upgrade/old/small/rgw/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/old/small/rgw/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 00000000000..e473f31862d --- /dev/null +++ b/qa/suites/upgrade/old/small/rgw/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,4 @@ +tasks: +- install.upgrade: + all: + branch: emperor diff --git a/qa/suites/upgrade/old/small/rgw/4-restart/restart.yaml b/qa/suites/upgrade/old/small/rgw/4-restart/restart.yaml new file mode 100644 index 00000000000..f9606ef70cc --- /dev/null +++ b/qa/suites/upgrade/old/small/rgw/4-restart/restart.yaml @@ -0,0 +1,2 @@ +tasks: +- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0] diff --git a/qa/suites/upgrade/old/small/rgw/5-emperor-workload/final.yaml b/qa/suites/upgrade/old/small/rgw/5-emperor-workload/final.yaml new file mode 100644 index 00000000000..573cffbc30a --- /dev/null +++ b/qa/suites/upgrade/old/small/rgw/5-emperor-workload/final.yaml @@ -0,0 +1,4 @@ +tasks: +- s3tests: + client.0: + rgw_server: client.0 diff --git a/qa/suites/upgrade/old/small/rgw/distro b/qa/suites/upgrade/old/small/rgw/distro new file mode 120000 index 00000000000..3a0ac71c8af --- /dev/null +++ b/qa/suites/upgrade/old/small/rgw/distro @@ -0,0 +1 @@ +../rados/distro \ No newline at end of file diff --git a/qa/suites/workload/cifs-dbench.yaml b/qa/suites/workload/cifs-dbench.yaml new file mode 100644 index 00000000000..c13c1c099e5 --- /dev/null +++ b/qa/suites/workload/cifs-dbench.yaml @@ -0,0 +1,8 @@ +tasks: +- cifs-mount: + client.1: + share: ceph +- workunit: + clients: + client.1: + - suites/dbench.sh diff --git a/qa/suites/workload/cifs-fsstress.yaml b/qa/suites/workload/cifs-fsstress.yaml new file mode 100644 index 00000000000..ff003af3433 --- /dev/null +++ b/qa/suites/workload/cifs-fsstress.yaml @@ -0,0 +1,8 @@ +tasks: +- cifs-mount: + client.1: + share: ceph +- workunit: + clients: + client.1: + - suites/fsstress.sh diff --git a/qa/suites/workload/cifs-kernel-build.yaml.disabled b/qa/suites/workload/cifs-kernel-build.yaml.disabled new file mode 100644 index 00000000000..ab9ff8ac731 --- /dev/null +++ b/qa/suites/workload/cifs-kernel-build.yaml.disabled @@ -0,0 +1,9 @@ +tasks: +- cifs-mount: + client.1: + share: ceph +- workunit: + clients: + client.1: + - kernel_untar_build.sh + diff --git a/qa/suites/workload/smbtorture.yaml b/qa/suites/workload/smbtorture.yaml new file mode 100644 index 00000000000..823489a2082 --- /dev/null +++ b/qa/suites/workload/smbtorture.yaml @@ -0,0 +1,39 @@ +tasks: +- pexec: + client.1: + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb +# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon +# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl +# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid diff --git a/qa/tasks/__init__.py b/qa/tasks/__init__.py new file mode 100644 index 00000000000..9a7949a001e --- /dev/null +++ b/qa/tasks/__init__.py @@ -0,0 +1,6 @@ +import logging + +# Inherit teuthology's log level +teuthology_log = logging.getLogger('teuthology') +log = logging.getLogger(__name__) +log.setLevel(teuthology_log.level) diff --git a/qa/tasks/admin_socket.py b/qa/tasks/admin_socket.py new file mode 100644 index 00000000000..71f631ad7e5 --- /dev/null +++ b/qa/tasks/admin_socket.py @@ -0,0 +1,192 @@ +""" +Admin Socket task -- used in rados, powercycle, and smoke testing +""" +from cStringIO import StringIO + +import json +import logging +import os +import time + +from teuthology.orchestra import run +from teuthology import misc as teuthology +from teuthology.parallel import parallel + +log = logging.getLogger(__name__) + + +def task(ctx, config): + """ + Run an admin socket command, make sure the output is json, and run + a test program on it. The test program should read json from + stdin. This task succeeds if the test program exits with status 0. + + To run the same test on all clients:: + + tasks: + - ceph: + - rados: + - admin_socket: + all: + dump_requests: + test: http://example.com/script + + To restrict it to certain clients:: + + tasks: + - ceph: + - rados: [client.1] + - admin_socket: + client.1: + dump_requests: + test: http://example.com/script + + If an admin socket command has arguments, they can be specified as + a list:: + + tasks: + - ceph: + - rados: [client.0] + - admin_socket: + client.0: + dump_requests: + test: http://example.com/script + help: + test: http://example.com/test_help_version + args: [version] + + Note that there must be a ceph client with an admin socket running + before this task is run. The tests are parallelized at the client + level. Tests for a single client are run serially. + + :param ctx: Context + :param config: Configuration + """ + assert isinstance(config, dict), \ + 'admin_socket task requires a dict for configuration' + teuthology.replace_all_with_clients(ctx.cluster, config) + + with parallel() as ptask: + for client, tests in config.iteritems(): + ptask.spawn(_run_tests, ctx, client, tests) + + +def _socket_command(ctx, remote, socket_path, command, args): + """ + Run an admin socket command and return the result as a string. + + :param ctx: Context + :param remote: Remote site + :param socket_path: path to socket + :param command: command to be run remotely + :param args: command arguments + + :returns: output of command in json format + """ + json_fp = StringIO() + testdir = teuthology.get_testdir(ctx) + max_tries = 60 + while True: + proc = remote.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph', + '--admin-daemon', socket_path, + ] + command.split(' ') + args, + stdout=json_fp, + check_status=False, + ) + if proc.exitstatus == 0: + break + assert max_tries > 0 + max_tries -= 1 + log.info('ceph cli returned an error, command not registered yet?') + log.info('sleeping and retrying ...') + time.sleep(1) + out = json_fp.getvalue() + json_fp.close() + log.debug('admin socket command %s returned %s', command, out) + return json.loads(out) + +def _run_tests(ctx, client, tests): + """ + Create a temp directory and wait for a client socket to be created. + For each test, copy the executable locally and run the test. + Remove temp directory when finished. + + :param ctx: Context + :param client: client machine to run the test + :param tests: list of tests to run + """ + testdir = teuthology.get_testdir(ctx) + log.debug('Running admin socket tests on %s', client) + (remote,) = ctx.cluster.only(client).remotes.iterkeys() + socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client) + overrides = ctx.config.get('overrides', {}).get('admin_socket', {}) + + try: + tmp_dir = os.path.join( + testdir, + 'admin_socket_{client}'.format(client=client), + ) + remote.run( + args=[ + 'mkdir', + '--', + tmp_dir, + run.Raw('&&'), + # wait for client process to create the socket + 'while', 'test', '!', '-e', socket_path, run.Raw(';'), + 'do', 'sleep', '1', run.Raw(';'), 'done', + ], + ) + + for command, config in tests.iteritems(): + if config is None: + config = {} + teuthology.deep_merge(config, overrides) + log.debug('Testing %s with config %s', command, str(config)) + + test_path = None + if 'test' in config: + url = config['test'].format( + branch=config.get('branch', 'master') + ) + test_path = os.path.join(tmp_dir, command) + remote.run( + args=[ + 'wget', + '-q', + '-O', + test_path, + '--', + url, + run.Raw('&&'), + 'chmod', + 'u=rx', + '--', + test_path, + ], + ) + + args = config.get('args', []) + assert isinstance(args, list), \ + 'admin socket command args must be a list' + sock_out = _socket_command(ctx, remote, socket_path, command, args) + if test_path is not None: + remote.run( + args=[ + test_path, + ], + stdin=json.dumps(sock_out), + ) + + finally: + remote.run( + args=[ + 'rm', '-rf', '--', tmp_dir, + ], + ) diff --git a/qa/tasks/apache.conf.template b/qa/tasks/apache.conf.template new file mode 100644 index 00000000000..ed61bfc5f90 --- /dev/null +++ b/qa/tasks/apache.conf.template @@ -0,0 +1,59 @@ + + LoadModule version_module {mod_path}/mod_version.so + + + LoadModule env_module {mod_path}/mod_env.so + + + LoadModule rewrite_module {mod_path}/mod_rewrite.so + + + LoadModule fastcgi_module {mod_path}/mod_fastcgi.so + + + LoadModule log_config_module {mod_path}/mod_log_config.so + + +Listen {port} +ServerName {host} + += 2.4> + + LoadModule unixd_module {mod_path}/mod_unixd.so + + + LoadModule authz_core_module {mod_path}/mod_authz_core.so + + + LoadModule mpm_worker_module {mod_path}/mod_mpm_worker.so + + User {user} + Group {group} + + +ServerRoot {testdir}/apache +ErrorLog {testdir}/archive/apache.{client}/error.log +LogFormat "%h l %u %t \"%r\" %>s %b \"{{Referer}}i\" \"%{{User-agent}}i\"" combined +CustomLog {testdir}/archive/apache.{client}/access.log combined +PidFile {testdir}/apache/tmp.{client}/apache.pid +DocumentRoot {testdir}/apache/htdocs.{client} +FastCgiIPCDir {testdir}/apache/tmp.{client}/fastcgi_sock +FastCgiExternalServer {testdir}/apache/htdocs.{client}/rgw.fcgi -socket rgw_sock -idle-timeout {idle_timeout} +RewriteEngine On + +RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /rgw.fcgi?page=$1¶ms=$2&%{{QUERY_STRING}} [E=HTTP_AUTHORIZATION:%{{HTTP:Authorization}},L] + +# Set fastcgi environment variables. +# Note that this is separate from Unix environment variables! +SetEnv RGW_LOG_LEVEL 20 +SetEnv RGW_SHOULD_LOG yes +SetEnv RGW_PRINT_CONTINUE {print_continue} + + + Options +ExecCGI + AllowOverride All + SetHandler fastcgi-script + + +AllowEncodedSlashes On +ServerSignature Off diff --git a/qa/tasks/autotest.py b/qa/tasks/autotest.py new file mode 100644 index 00000000000..efa972123d2 --- /dev/null +++ b/qa/tasks/autotest.py @@ -0,0 +1,166 @@ +""" +Run an autotest test on the ceph cluster. +""" +import json +import logging +import os + +from teuthology import misc as teuthology +from teuthology.parallel import parallel +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Run an autotest test on the ceph cluster. + + Only autotest client tests are supported. + + The config is a mapping from role name to list of tests to run on + that client. + + For example:: + + tasks: + - ceph: + - ceph-fuse: [client.0, client.1] + - autotest: + client.0: [dbench] + client.1: [bonnie] + + You can also specify a list of tests to run on all clients:: + + tasks: + - ceph: + - ceph-fuse: + - autotest: + all: [dbench] + """ + assert isinstance(config, dict) + config = teuthology.replace_all_with_clients(ctx.cluster, config) + log.info('Setting up autotest...') + testdir = teuthology.get_testdir(ctx) + with parallel() as p: + for role in config.iterkeys(): + (remote,) = ctx.cluster.only(role).remotes.keys() + p.spawn(_download, testdir, remote) + + log.info('Making a separate scratch dir for every client...') + for role in config.iterkeys(): + assert isinstance(role, basestring) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) + remote.run( + args=[ + 'sudo', + 'install', + '-d', + '-m', '0755', + '--owner={user}'.format(user='ubuntu'), #TODO + '--', + scratch, + ], + ) + + with parallel() as p: + for role, tests in config.iteritems(): + (remote,) = ctx.cluster.only(role).remotes.keys() + p.spawn(_run_tests, testdir, remote, role, tests) + +def _download(testdir, remote): + """ + Download. Does not explicitly support muliple tasks in a single run. + """ + remote.run( + args=[ + # explicitly does not support multiple autotest tasks + # in a single run; the result archival would conflict + 'mkdir', '{tdir}/archive/autotest'.format(tdir=testdir), + run.Raw('&&'), + 'mkdir', '{tdir}/autotest'.format(tdir=testdir), + run.Raw('&&'), + 'wget', + '-nv', + '--no-check-certificate', + 'https://github.com/ceph/autotest/tarball/ceph', + '-O-', + run.Raw('|'), + 'tar', + '-C', '{tdir}/autotest'.format(tdir=testdir), + '-x', + '-z', + '-f-', + '--strip-components=1', + ], + ) + +def _run_tests(testdir, remote, role, tests): + """ + Spawned to run test on remote site + """ + assert isinstance(role, basestring) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) + + assert isinstance(tests, list) + for idx, testname in enumerate(tests): + log.info('Running autotest client test #%d: %s...', idx, testname) + + tag = 'client.{id}.num{idx}.{testname}'.format( + idx=idx, + testname=testname, + id=id_, + ) + control = '{tdir}/control.{tag}'.format(tdir=testdir, tag=tag) + teuthology.write_file( + remote=remote, + path=control, + data='import json; data=json.loads({data!r}); job.run_test(**data)'.format( + data=json.dumps(dict( + url=testname, + dir=scratch, + # TODO perhaps tag + # results will be in {testdir}/autotest/client/results/dbench + # or {testdir}/autotest/client/results/dbench.{tag} + )), + ), + ) + remote.run( + args=[ + '{tdir}/autotest/client/bin/autotest'.format(tdir=testdir), + '--verbose', + '--harness=simple', + '--tag={tag}'.format(tag=tag), + control, + run.Raw('3>&1'), + ], + ) + + remote.run( + args=[ + 'rm', '-rf', '--', control, + ], + ) + + remote.run( + args=[ + 'mv', + '--', + '{tdir}/autotest/client/results/{tag}'.format(tdir=testdir, tag=tag), + '{tdir}/archive/autotest/{tag}'.format(tdir=testdir, tag=tag), + ], + ) + + remote.run( + args=[ + 'rm', '-rf', '--', '{tdir}/autotest'.format(tdir=testdir), + ], + ) diff --git a/qa/tasks/blktrace.py b/qa/tasks/blktrace.py new file mode 100644 index 00000000000..401f9e39f64 --- /dev/null +++ b/qa/tasks/blktrace.py @@ -0,0 +1,93 @@ +""" +Run blktrace program through teuthology +""" +import contextlib +import logging + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.orchestra import run + +log = logging.getLogger(__name__) +blktrace = '/usr/sbin/blktrace' +daemon_signal = 'term' + +@contextlib.contextmanager +def setup(ctx, config): + """ + Setup all the remotes + """ + osds = ctx.cluster.only(teuthology.is_type('osd')) + log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx)) + + for remote, roles_for_host in osds.remotes.iteritems(): + log.info('Creating %s on %s' % (log_dir, remote.name)) + remote.run( + args=['mkdir', '-p', '-m0755', '--', log_dir], + wait=False, + ) + yield + +@contextlib.contextmanager +def execute(ctx, config): + """ + Run the blktrace program on remote machines. + """ + procs = [] + testdir = teuthology.get_testdir(ctx) + log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir) + + osds = ctx.cluster.only(teuthology.is_type('osd')) + for remote, roles_for_host in osds.remotes.iteritems(): + roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote] + for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): + if roles_to_devs.get(id_): + dev = roles_to_devs[id_] + log.info("running blktrace on %s: %s" % (remote.name, dev)) + + proc = remote.run( + args=[ + 'cd', + log_dir, + run.Raw(';'), + 'daemon-helper', + daemon_signal, + 'sudo', + blktrace, + '-o', + dev.rsplit("/", 1)[1], + '-d', + dev, + ], + wait=False, + stdin=run.PIPE, + ) + procs.append(proc) + try: + yield + finally: + osds = ctx.cluster.only(teuthology.is_type('osd')) + log.info('stopping blktrace processs') + for proc in procs: + proc.stdin.close() + +@contextlib.contextmanager +def task(ctx, config): + """ + Usage: + blktrace: + + Runs blktrace on all clients. + """ + if config is None: + config = dict(('client.{id}'.format(id=id_), None) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) + elif isinstance(config, list): + config = dict.fromkeys(config) + + with contextutil.nested( + lambda: setup(ctx=ctx, config=config), + lambda: execute(ctx=ctx, config=config), + ): + yield + diff --git a/qa/tasks/boto.cfg.template b/qa/tasks/boto.cfg.template new file mode 100644 index 00000000000..cdfe8873b42 --- /dev/null +++ b/qa/tasks/boto.cfg.template @@ -0,0 +1,2 @@ +[Boto] +http_socket_timeout = {idle_timeout} diff --git a/qa/tasks/calamari/http_client.py b/qa/tasks/calamari/http_client.py new file mode 100755 index 00000000000..84a03c7bfa0 --- /dev/null +++ b/qa/tasks/calamari/http_client.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +import json +import logging +import requests + +log = logging.getLogger(__name__) + + +class AuthenticatedHttpClient(requests.Session): + """ + Client for the calamari REST API, principally exists to do + authentication, but also helpfully prefixes + URLs in requests with the API base URL and JSONizes + POST data. + """ + def __init__(self, api_url, username, password): + super(AuthenticatedHttpClient, self).__init__() + self._username = username + self._password = password + self._api_url = api_url + self.headers = { + 'Content-type': "application/json; charset=UTF-8" + } + + def request(self, method, url, **kwargs): + if not url.startswith('/'): + url = self._api_url + url + response = super(AuthenticatedHttpClient, self).request(method, url, **kwargs) + if response.status_code >= 400: + # For the benefit of test logs + print "%s: %s" % (response.status_code, response.content) + return response + + def post(self, url, data=None, **kwargs): + if isinstance(data, dict): + data = json.dumps(data) + return super(AuthenticatedHttpClient, self).post(url, data, **kwargs) + + def patch(self, url, data=None, **kwargs): + if isinstance(data, dict): + data = json.dumps(data) + return super(AuthenticatedHttpClient, self).patch(url, data, **kwargs) + + def login(self): + """ + Authenticate with the Django auth system as + it is exposed in the Calamari REST API. + """ + log.info("Logging in as %s" % self._username) + response = self.get("auth/login/") + response.raise_for_status() + self.headers['X-XSRF-TOKEN'] = response.cookies['XSRF-TOKEN'] + + self.post("auth/login/", { + 'next': "/", + 'username': self._username, + 'password': self._password + }) + response.raise_for_status() + + # Check we're allowed in now. + response = self.get("cluster") + response.raise_for_status() + +if __name__ == "__main__": + + import argparse + + p = argparse.ArgumentParser() + p.add_argument('-u', '--uri', default='http://mira035/api/v1/') + p.add_argument('--user', default='admin') + p.add_argument('--pass', dest='password', default='admin') + args, remainder = p.parse_known_args() + + c = AuthenticatedHttpClient(args.uri, args.user, args.password) + c.login() + response = c.request('GET', ''.join(remainder)).json() + print json.dumps(response, indent=2) diff --git a/qa/tasks/calamari/servertest_1_0.py b/qa/tasks/calamari/servertest_1_0.py new file mode 100755 index 00000000000..b9b07a39052 --- /dev/null +++ b/qa/tasks/calamari/servertest_1_0.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python + +import datetime +import os +import logging +import logging.handlers +import requests +import uuid +import unittest +from http_client import AuthenticatedHttpClient + +log = logging.getLogger(__name__) +log.addHandler(logging.StreamHandler()) +log.setLevel(logging.INFO) + +global base_uri +global client +base_uri = None +server_uri = None +client = None + +def setUpModule(): + global base_uri + global server_uri + global client + try: + base_uri = os.environ['CALAMARI_BASE_URI'] + except KeyError: + log.error('Must define CALAMARI_BASE_URI') + os._exit(1) + if not base_uri.endswith('/'): + base_uri += '/' + if not base_uri.endswith('api/v1/'): + base_uri += 'api/v1/' + client = AuthenticatedHttpClient(base_uri, 'admin', 'admin') + server_uri = base_uri.replace('api/v1/', '') + client.login() + +class RestTest(unittest.TestCase): + 'Base class for all tests here; get class\'s data' + + def setUp(self): + # Called once for each test_* case. A bit wasteful, but we + # really like using the simple class variable self.uri + # to customize each derived TestCase + method = getattr(self, 'method', 'GET') + raw = self.uri.startswith('/') + self.response = self.get_object(method, self.uri, raw=raw) + + def get_object(self, method, url, raw=False): + global server_uri + 'Return Python object decoded from JSON response to method/url' + if not raw: + return client.request(method, url).json() + else: + return requests.request(method, server_uri + url).json() + +class TestUserMe(RestTest): + + uri = 'user/me' + + def test_me(self): + self.assertEqual(self.response['username'], 'admin') + +class TestCluster(RestTest): + + uri = 'cluster' + + def test_id(self): + self.assertEqual(self.response[0]['id'], 1) + + def test_times(self): + for time in ( + self.response[0]['cluster_update_time'], + self.response[0]['cluster_update_attempt_time'], + ): + self.assertTrue(is_datetime(time)) + + def test_api_base_url(self): + api_base_url = self.response[0]['api_base_url'] + self.assertTrue(api_base_url.startswith('http')) + self.assertIn('api/v0.1', api_base_url) + +class TestHealth(RestTest): + + uri = 'cluster/1/health' + + def test_cluster(self): + self.assertEqual(self.response['cluster'], 1) + + def test_times(self): + for time in ( + self.response['cluster_update_time'], + self.response['added'], + ): + self.assertTrue(is_datetime(time)) + + def test_report_and_overall_status(self): + self.assertIn('report', self.response) + self.assertIn('overall_status', self.response['report']) + +class TestHealthCounters(RestTest): + + uri = 'cluster/1/health_counters' + + def test_cluster(self): + self.assertEqual(self.response['cluster'], 1) + + def test_time(self): + self.assertTrue(is_datetime(self.response['cluster_update_time'])) + + def test_existence(self): + for section in ('pg', 'mon', 'osd'): + for counter in ('warn', 'critical', 'ok'): + count = self.response[section][counter]['count'] + self.assertIsInstance(count, int) + self.assertIsInstance(self.response['pool']['total'], int) + + def test_mds_sum(self): + count = self.response['mds'] + self.assertEqual( + count['up_not_in'] + count['not_up_not_in'] + count['up_in'], + count['total'] + ) + +class TestSpace(RestTest): + + uri = 'cluster/1/space' + + def test_cluster(self): + self.assertEqual(self.response['cluster'], 1) + + def test_times(self): + for time in ( + self.response['cluster_update_time'], + self.response['added'], + ): + self.assertTrue(is_datetime(time)) + + def test_space(self): + for size in ('free_bytes', 'used_bytes', 'capacity_bytes'): + self.assertIsInstance(self.response['space'][size], int) + self.assertGreater(self.response['space'][size], 0) + + def test_report(self): + for size in ('total_used', 'total_space', 'total_avail'): + self.assertIsInstance(self.response['report'][size], int) + self.assertGreater(self.response['report'][size], 0) + +class TestOSD(RestTest): + + uri = 'cluster/1/osd' + + def test_cluster(self): + self.assertEqual(self.response['cluster'], 1) + + def test_times(self): + for time in ( + self.response['cluster_update_time'], + self.response['added'], + ): + self.assertTrue(is_datetime(time)) + + def test_osd_uuid(self): + for osd in self.response['osds']: + uuidobj = uuid.UUID(osd['uuid']) + self.assertEqual(str(uuidobj), osd['uuid']) + + def test_osd_pools(self): + for osd in self.response['osds']: + if osd['up'] != 1: + continue + self.assertIsInstance(osd['pools'], list) + self.assertIsInstance(osd['pools'][0], basestring) + + def test_osd_up_in(self): + for osd in self.response['osds']: + for flag in ('up', 'in'): + self.assertIn(osd[flag], (0, 1)) + + def test_osd_0(self): + osd0 = self.get_object('GET', 'cluster/1/osd/0')['osd'] + for field in osd0.keys(): + if not field.startswith('cluster_update_time'): + self.assertEqual(self.response['osds'][0][field], osd0[field]) + +class TestPool(RestTest): + + uri = 'cluster/1/pool' + + def test_cluster(self): + for pool in self.response: + self.assertEqual(pool['cluster'], 1) + + def test_fields_are_ints(self): + for pool in self.response: + for field in ('id', 'used_objects', 'used_bytes'): + self.assertIsInstance(pool[field], int) + + def test_name_is_str(self): + for pool in self.response: + self.assertIsInstance(pool['name'], basestring) + + def test_pool_0(self): + poolid = self.response[0]['id'] + pool = self.get_object('GET', 'cluster/1/pool/{id}'.format(id=poolid)) + self.assertEqual(self.response[0], pool) + +class TestServer(RestTest): + + uri = 'cluster/1/server' + + def test_ipaddr(self): + for server in self.response: + octets = server['addr'].split('.') + self.assertEqual(len(octets), 4) + for octetstr in octets: + octet = int(octetstr) + self.assertIsInstance(octet, int) + self.assertGreaterEqual(octet, 0) + self.assertLessEqual(octet, 255) + + def test_hostname_name_strings(self): + for server in self.response: + for field in ('name', 'hostname'): + self.assertIsInstance(server[field], basestring) + + def test_services(self): + for server in self.response: + self.assertIsInstance(server['services'], list) + for service in server['services']: + self.assertIn(service['type'], ('osd', 'mon', 'mds')) + +class TestGraphitePoolIOPS(RestTest): + + uri = '/graphite/render?format=json-array&' \ + 'target=ceph.cluster.ceph.pool.0.num_read&' \ + 'target=ceph.cluster.ceph.pool.0.num_write' + + def test_targets_contain_request(self): + self.assertIn('targets', self.response) + self.assertIn('ceph.cluster.ceph.pool.0.num_read', + self.response['targets']) + self.assertIn('ceph.cluster.ceph.pool.0.num_write', + self.response['targets']) + + def test_datapoints(self): + self.assertIn('datapoints', self.response) + self.assertGreater(len(self.response['datapoints']), 0) + data = self.response['datapoints'][0] + self.assertEqual(len(data), 3) + self.assertIsInstance(data[0], int) + if data[1]: + self.assertIsInstance(data[1], float) + if data[2]: + self.assertIsInstance(data[2], float) + +# +# Utility functions +# + +DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' + +def is_datetime(time): + datetime.datetime.strptime(time, DATETIME_FORMAT) + return True + +if __name__ == '__main__': + unittest.main() diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py new file mode 100644 index 00000000000..2abc11019d8 --- /dev/null +++ b/qa/tasks/ceph.py @@ -0,0 +1,1197 @@ +""" +Ceph cluster task. + +Handle the setup, starting, and clean-up of a Ceph cluster. +""" +from cStringIO import StringIO + +import argparse +import contextlib +import logging +import os +import json +import time + +from ceph_manager import CephManager +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.orchestra import run +from teuthology.orchestra.daemon import DaemonGroup +import ceph_client as cclient + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def ceph_log(ctx, config): + """ + Create /var/log/ceph log directory that is open to everyone. + Add valgrind and profiling-logger directories. + + :param ctx: Context + :param config: Configuration + """ + log.info('Making ceph log dir writeable by non-root...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'chmod', + '777', + '/var/log/ceph', + ], + wait=False, + ) + ) + log.info('Disabling ceph logrotate...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'rm', '-f', '--', + '/etc/logrotate.d/ceph', + ], + wait=False, + ) + ) + log.info('Creating extra log directories...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'install', '-d', '-m0755', '--', + '/var/log/ceph/valgrind', + '/var/log/ceph/profiling-logger', + ], + wait=False, + ) + ) + + try: + yield + + finally: + pass + + +def assign_devs(roles, devs): + """ + Create a dictionary of devs indexed by roles + + :param roles: List of roles + :param devs: Corresponding list of devices. + :returns: Dictionary of devs indexed by roles. + """ + return dict(zip(roles, devs)) + +@contextlib.contextmanager +def valgrind_post(ctx, config): + """ + After the tests run, look throught all the valgrind logs. Exceptions are raised + if textual errors occured in the logs, or if valgrind exceptions were detected in + the logs. + + :param ctx: Context + :param config: Configuration + """ + try: + yield + finally: + lookup_procs = list() + log.info('Checking for errors in any valgrind logs...'); + for remote in ctx.cluster.remotes.iterkeys(): + #look at valgrind logs for each node + proc = remote.run( + args=[ + 'sudo', + 'zgrep', + '', + run.Raw('/var/log/ceph/valgrind/*'), + '/dev/null', # include a second file so that we always get a filename prefix on the output + run.Raw('|'), + 'sort', + run.Raw('|'), + 'uniq', + ], + wait=False, + check_status=False, + stdout=StringIO(), + ) + lookup_procs.append((proc, remote)) + + valgrind_exception = None + for (proc, remote) in lookup_procs: + proc.wait() + out = proc.stdout.getvalue() + for line in out.split('\n'): + if line == '': + continue + try: + (file, kind) = line.split(':') + except Exception: + log.error('failed to split line %s', line) + raise + log.debug('file %s kind %s', file, kind) + if (file.find('mds') >= 0) and kind.find('Lost') > 0: + continue + log.error('saw valgrind issue %s in %s', kind, file) + valgrind_exception = Exception('saw valgrind issues') + + if valgrind_exception is not None: + raise valgrind_exception + + + +@contextlib.contextmanager +def cluster(ctx, config): + """ + Handle the creation and removal of a ceph cluster. + + On startup: + Create directories needed for the cluster. + Create remote journals for all osds. + Create and set keyring. + Copy the monmap to tht test systems. + Setup mon nodes. + Setup mds nodes. + Mkfs osd nodes. + Add keyring information to monmaps + Mkfs mon nodes. + + On exit: + If errors occured, extract a failure message and store in ctx.summary. + Unmount all test files and temporary journaling files. + Save the monitor information and archive all ceph logs. + Cleanup the keyring setup, and remove all monitor map and data files left over. + + :param ctx: Context + :param config: Configuration + """ + if ctx.config.get('use_existing_cluster', False) is True: + log.info("'use_existing_cluster' is true; skipping cluster creation") + yield + + testdir = teuthology.get_testdir(ctx) + log.info('Creating ceph cluster...') + run.wait( + ctx.cluster.run( + args=[ + 'install', '-d', '-m0755', '--', + '{tdir}/data'.format(tdir=testdir), + ], + wait=False, + ) + ) + + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'install', '-d', '-m0777', '--', '/var/run/ceph', + ], + wait=False, + ) + ) + + + devs_to_clean = {} + remote_to_roles_to_devs = {} + remote_to_roles_to_journals = {} + osds = ctx.cluster.only(teuthology.is_type('osd')) + for remote, roles_for_host in osds.remotes.iteritems(): + devs = teuthology.get_scratch_devices(remote) + roles_to_devs = {} + roles_to_journals = {} + if config.get('fs'): + log.info('fs option selected, checking for scratch devs') + log.info('found devs: %s' % (str(devs),)) + devs_id_map = teuthology.get_wwn_id_map(remote, devs) + iddevs = devs_id_map.values() + roles_to_devs = assign_devs( + teuthology.roles_of_type(roles_for_host, 'osd'), iddevs + ) + if len(roles_to_devs) < len(iddevs): + iddevs = iddevs[len(roles_to_devs):] + devs_to_clean[remote] = [] + + if config.get('block_journal'): + log.info('block journal enabled') + roles_to_journals = assign_devs( + teuthology.roles_of_type(roles_for_host, 'osd'), iddevs + ) + log.info('journal map: %s', roles_to_journals) + + if config.get('tmpfs_journal'): + log.info('tmpfs journal enabled') + roles_to_journals = {} + remote.run( args=[ 'sudo', 'mount', '-t', 'tmpfs', 'tmpfs', '/mnt' ] ) + for osd in teuthology.roles_of_type(roles_for_host, 'osd'): + tmpfs = '/mnt/osd.%s' % osd + roles_to_journals[osd] = tmpfs + remote.run( args=[ 'truncate', '-s', '1500M', tmpfs ] ) + log.info('journal map: %s', roles_to_journals) + + log.info('dev map: %s' % (str(roles_to_devs),)) + remote_to_roles_to_devs[remote] = roles_to_devs + remote_to_roles_to_journals[remote] = roles_to_journals + + + log.info('Generating config...') + remotes_and_roles = ctx.cluster.remotes.items() + roles = [role_list for (remote, role_list) in remotes_and_roles] + ips = [host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)] + conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips) + for remote, roles_to_journals in remote_to_roles_to_journals.iteritems(): + for role, journal in roles_to_journals.iteritems(): + key = "osd." + str(role) + if key not in conf: + conf[key] = {} + conf[key]['osd journal'] = journal + for section, keys in config['conf'].iteritems(): + for key, value in keys.iteritems(): + log.info("[%s] %s = %s" % (section, key, value)) + if section not in conf: + conf[section] = {} + conf[section][key] = value + + if config.get('tmpfs_journal'): + conf['journal dio'] = False + + ctx.ceph = argparse.Namespace() + ctx.ceph.conf = conf + + keyring_path = config.get('keyring_path', '/etc/ceph/ceph.keyring') + + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) + + firstmon = teuthology.get_first_mon(ctx, config) + + log.info('Setting up %s...' % firstmon) + ctx.cluster.only(firstmon).run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--create-keyring', + keyring_path, + ], + ) + ctx.cluster.only(firstmon).run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--gen-key', + '--name=mon.', + keyring_path, + ], + ) + ctx.cluster.only(firstmon).run( + args=[ + 'sudo', + 'chmod', + '0644', + keyring_path, + ], + ) + (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + fsid = teuthology.create_simple_monmap( + ctx, + remote=mon0_remote, + conf=conf, + ) + if not 'global' in conf: + conf['global'] = {} + conf['global']['fsid'] = fsid + + log.info('Writing ceph.conf for FSID %s...' % fsid) + conf_path = config.get('conf_path', '/etc/ceph/ceph.conf') + conf_fp = StringIO() + conf.write(conf_fp) + conf_fp.seek(0) + writes = ctx.cluster.run( + args=[ + 'sudo', 'mkdir', '-p', '/etc/ceph', run.Raw('&&'), + 'sudo', 'chmod', '0755', '/etc/ceph', run.Raw('&&'), + 'sudo', 'python', + '-c', + 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))', + conf_path, + run.Raw('&&'), + 'sudo', 'chmod', '0644', conf_path, + ], + stdin=run.PIPE, + wait=False, + ) + teuthology.feed_many_stdins_and_close(conf_fp, writes) + run.wait(writes) + + log.info('Creating admin key on %s...' % firstmon) + ctx.cluster.only(firstmon).run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--gen-key', + '--name=client.admin', + '--set-uid=0', + '--cap', 'mon', 'allow *', + '--cap', 'osd', 'allow *', + '--cap', 'mds', 'allow', + keyring_path, + ], + ) + + log.info('Copying monmap to all nodes...') + keyring = teuthology.get_file( + remote=mon0_remote, + path=keyring_path, + ) + monmap = teuthology.get_file( + remote=mon0_remote, + path='{tdir}/monmap'.format(tdir=testdir), + ) + + for rem in ctx.cluster.remotes.iterkeys(): + # copy mon key and initial monmap + log.info('Sending monmap to node {remote}'.format(remote=rem)) + teuthology.sudo_write_file( + remote=rem, + path=keyring_path, + data=keyring, + perms='0644' + ) + teuthology.write_file( + remote=rem, + path='{tdir}/monmap'.format(tdir=testdir), + data=monmap, + ) + + log.info('Setting up mon nodes...') + mons = ctx.cluster.only(teuthology.is_type('mon')) + run.wait( + mons.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'osdmaptool', + '-c', conf_path, + '--clobber', + '--createsimple', '{num:d}'.format( + num=teuthology.num_instances_of_type(ctx.cluster, 'osd'), + ), + '{tdir}/osdmap'.format(tdir=testdir), + '--pg_bits', '2', + '--pgp_bits', '4', + ], + wait=False, + ), + ) + + log.info('Setting up mds nodes...') + mdss = ctx.cluster.only(teuthology.is_type('mds')) + for remote, roles_for_host in mdss.remotes.iteritems(): + for id_ in teuthology.roles_of_type(roles_for_host, 'mds'): + remote.run( + args=[ + 'sudo', + 'mkdir', + '-p', + '/var/lib/ceph/mds/ceph-{id}'.format(id=id_), + run.Raw('&&'), + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--create-keyring', + '--gen-key', + '--name=mds.{id}'.format(id=id_), + '/var/lib/ceph/mds/ceph-{id}/keyring'.format(id=id_), + ], + ) + + cclient.create_keyring(ctx) + log.info('Running mkfs on osd nodes...') + + ctx.disk_config = argparse.Namespace() + ctx.disk_config.remote_to_roles_to_dev = remote_to_roles_to_devs + ctx.disk_config.remote_to_roles_to_journals = remote_to_roles_to_journals + ctx.disk_config.remote_to_roles_to_dev_mount_options = {} + ctx.disk_config.remote_to_roles_to_dev_fstype = {} + + log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(r=str(ctx.disk_config.remote_to_roles_to_dev))) + for remote, roles_for_host in osds.remotes.iteritems(): + roles_to_devs = remote_to_roles_to_devs[remote] + roles_to_journals = remote_to_roles_to_journals[remote] + + + for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): + remote.run( + args=[ + 'sudo', + 'mkdir', + '-p', + '/var/lib/ceph/osd/ceph-{id}'.format(id=id_), + ]) + log.info(str(roles_to_journals)) + log.info(id_) + if roles_to_devs.get(id_): + dev = roles_to_devs[id_] + fs = config.get('fs') + package = None + mkfs_options = config.get('mkfs_options') + mount_options = config.get('mount_options') + if fs == 'btrfs': + #package = 'btrfs-tools' + if mount_options is None: + mount_options = ['noatime','user_subvol_rm_allowed'] + if mkfs_options is None: + mkfs_options = ['-m', 'single', + '-l', '32768', + '-n', '32768'] + if fs == 'xfs': + #package = 'xfsprogs' + if mount_options is None: + mount_options = ['noatime'] + if mkfs_options is None: + mkfs_options = ['-f', '-i', 'size=2048'] + if fs == 'ext4' or fs == 'ext3': + if mount_options is None: + mount_options = ['noatime','user_xattr'] + + if mount_options is None: + mount_options = [] + if mkfs_options is None: + mkfs_options = [] + mkfs = ['mkfs.%s' % fs] + mkfs_options + log.info('%s on %s on %s' % (mkfs, dev, remote)) + if package is not None: + remote.run( + args=[ + 'sudo', + 'apt-get', 'install', '-y', package + ], + stdout=StringIO(), + ) + + try: + remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) + except run.CommandFailedError: + # Newer btfs-tools doesn't prompt for overwrite, use -f + if '-f' not in mount_options: + mkfs_options.append('-f') + mkfs = ['mkfs.%s' % fs] + mkfs_options + log.info('%s on %s on %s' % (mkfs, dev, remote)) + remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) + + log.info('mount %s on %s -o %s' % (dev, remote, + ','.join(mount_options))) + remote.run( + args=[ + 'sudo', + 'mount', + '-t', fs, + '-o', ','.join(mount_options), + dev, + os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)), + ] + ) + if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options: + ctx.disk_config.remote_to_roles_to_dev_mount_options[remote] = {} + ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][id_] = mount_options + if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype: + ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {} + ctx.disk_config.remote_to_roles_to_dev_fstype[remote][id_] = fs + devs_to_clean[remote].append( + os.path.join( + os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)), + ) + ) + + for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): + remote.run( + args=[ + 'sudo', + 'MALLOC_CHECK_=3', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-osd', + '--mkfs', + '--mkkey', + '-i', id_, + '--monmap', '{tdir}/monmap'.format(tdir=testdir), + ], + ) + + + log.info('Reading keys from all nodes...') + keys_fp = StringIO() + keys = [] + for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for type_ in ['mds','osd']: + for id_ in teuthology.roles_of_type(roles_for_host, type_): + data = teuthology.get_file( + remote=remote, + path='/var/lib/ceph/{type}/ceph-{id}/keyring'.format( + type=type_, + id=id_, + ), + sudo=True, + ) + keys.append((type_, id_, data)) + keys_fp.write(data) + for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for type_ in ['client']: + for id_ in teuthology.roles_of_type(roles_for_host, type_): + data = teuthology.get_file( + remote=remote, + path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) + ) + keys.append((type_, id_, data)) + keys_fp.write(data) + + log.info('Adding keys to all mons...') + writes = mons.run( + args=[ + 'sudo', 'tee', '-a', + keyring_path, + ], + stdin=run.PIPE, + wait=False, + stdout=StringIO(), + ) + keys_fp.seek(0) + teuthology.feed_many_stdins_and_close(keys_fp, writes) + run.wait(writes) + for type_, id_, data in keys: + run.wait( + mons.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + keyring_path, + '--name={type}.{id}'.format( + type=type_, + id=id_, + ), + ] + list(teuthology.generate_caps(type_)), + wait=False, + ), + ) + + log.info('Running mkfs on mon nodes...') + for remote, roles_for_host in mons.remotes.iteritems(): + for id_ in teuthology.roles_of_type(roles_for_host, 'mon'): + remote.run( + args=[ + 'sudo', + 'mkdir', + '-p', + '/var/lib/ceph/mon/ceph-{id}'.format(id=id_), + ], + ) + remote.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-mon', + '--mkfs', + '-i', id_, + '--monmap={tdir}/monmap'.format(tdir=testdir), + '--osdmap={tdir}/osdmap'.format(tdir=testdir), + '--keyring={kpath}'.format(kpath=keyring_path), + ], + ) + + + run.wait( + mons.run( + args=[ + 'rm', + '--', + '{tdir}/monmap'.format(tdir=testdir), + '{tdir}/osdmap'.format(tdir=testdir), + ], + wait=False, + ), + ) + + try: + yield + except Exception: + # we need to know this below + ctx.summary['success'] = False + raise + finally: + (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + + log.info('Checking cluster log for badness...') + def first_in_ceph_log(pattern, excludes): + """ + Find the first occurence of the pattern specified in the Ceph log, + Returns None if none found. + + :param pattern: Pattern scanned for. + :param excludes: Patterns to ignore. + :return: First line of text (or None if not found) + """ + args = [ + 'sudo', + 'egrep', pattern, + '/var/log/ceph/ceph.log', + ] + for exclude in excludes: + args.extend([run.Raw('|'), 'egrep', '-v', exclude]) + args.extend([ + run.Raw('|'), 'head', '-n', '1', + ]) + r = mon0_remote.run( + stdout=StringIO(), + args=args, + ) + stdout = r.stdout.getvalue() + if stdout != '': + return stdout + return None + + if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]', + config['log_whitelist']) is not None: + log.warning('Found errors (ERR|WRN|SEC) in cluster log') + ctx.summary['success'] = False + # use the most severe problem as the failure reason + if 'failure_reason' not in ctx.summary: + for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']: + match = first_in_ceph_log(pattern, config['log_whitelist']) + if match is not None: + ctx.summary['failure_reason'] = \ + '"{match}" in cluster log'.format( + match=match.rstrip('\n'), + ) + break + + for remote, dirs in devs_to_clean.iteritems(): + for dir_ in dirs: + log.info('Unmounting %s on %s' % (dir_, remote)) + remote.run( + args=[ + 'sync', + run.Raw('&&'), + 'sudo', + 'umount', + '-f', + dir_ + ] + ) + + if config.get('tmpfs_journal'): + log.info('tmpfs journal enabled - unmounting tmpfs at /mnt') + for remote, roles_for_host in osds.remotes.iteritems(): + remote.run( + args=[ 'sudo', 'umount', '-f', '/mnt' ], + check_status=False, + ) + + if ctx.archive is not None and \ + not (ctx.config.get('archive-on-error') and ctx.summary['success']): + # archive mon data, too + log.info('Archiving mon data...') + path = os.path.join(ctx.archive, 'data') + os.makedirs(path) + for remote, roles in mons.remotes.iteritems(): + for role in roles: + if role.startswith('mon.'): + teuthology.pull_directory_tarball( + remote, + '/var/lib/ceph/mon', + path + '/' + role + '.tgz') + + # and logs + log.info('Compressing logs...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'find', + '/var/log/ceph', + '-name', + '*.log', + '-print0', + run.Raw('|'), + 'sudo', + 'xargs', + '-0', + '--no-run-if-empty', + '--', + 'gzip', + '--', + ], + wait=False, + ), + ) + + log.info('Archiving logs...') + path = os.path.join(ctx.archive, 'remote') + os.makedirs(path) + for remote in ctx.cluster.remotes.iterkeys(): + sub = os.path.join(path, remote.shortname) + os.makedirs(sub) + teuthology.pull_directory(remote, '/var/log/ceph', + os.path.join(sub, 'log')) + + + log.info('Cleaning ceph cluster...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'rm', + '-rf', + '--', + conf_path, + keyring_path, + '{tdir}/data'.format(tdir=testdir), + '{tdir}/monmap'.format(tdir=testdir), + ], + wait=False, + ), + ) + +def get_all_pg_info(rem_site, testdir): + """ + Get the results of a ceph pg dump + """ + info = rem_site.run(args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph', 'pg', 'dump', + '--format', 'json'], stdout=StringIO()) + all_info = json.loads(info.stdout.getvalue()) + return all_info['pg_stats'] + +def osd_scrub_pgs(ctx, config): + """ + Scrub pgs when we exit. + + First make sure all pgs are active and clean. + Next scrub all osds. + Then periodically check until all pgs have scrub time stamps that + indicate the last scrub completed. Time out if no progess is made + here after two minutes. + """ + retries = 12 + delays = 10 + vlist = ctx.cluster.remotes.values() + testdir = teuthology.get_testdir(ctx) + rem_site = ctx.cluster.remotes.keys()[0] + all_clean = False + for _ in range(0, retries): + stats = get_all_pg_info(rem_site, testdir) + states = [stat['state'] for stat in stats] + if len(set(states)) == 1 and states[0] == 'active+clean': + all_clean = True + break + log.info("Waiting for all osds to be active and clean.") + time.sleep(delays) + if not all_clean: + log.info("Scrubbing terminated -- not all pgs were active and clean.") + return + check_time_now = time.localtime() + time.sleep(1) + for slists in vlist: + for role in slists: + if role.startswith('osd.'): + log.info("Scrubbing osd {osd}".format(osd=role)) + rem_site.run(args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph', 'osd', 'scrub', role]) + prev_good = 0 + gap_cnt = 0 + loop = True + while loop: + stats = get_all_pg_info(rem_site, testdir) + timez = [stat['last_scrub_stamp'] for stat in stats] + loop = False + thiscnt = 0 + for tmval in timez: + pgtm = time.strptime(tmval[0:tmval.find('.')], '%Y-%m-%d %H:%M:%S') + if pgtm > check_time_now: + thiscnt += 1 + else: + loop = True + if thiscnt > prev_good: + prev_good = thiscnt + gap_cnt = 0 + else: + gap_cnt += 1 + if gap_cnt > retries: + log.info('Exiting scrub checking -- not all pgs scrubbed.') + return + if loop: + log.info('Still waiting for all pgs to be scrubbed.') + time.sleep(delays) + +@contextlib.contextmanager +def run_daemon(ctx, config, type_): + """ + Run daemons for a role type. Handle the startup and termination of a a daemon. + On startup -- set coverages, cpu_profile, valgrind values for all remotes, + and a max_mds value for one mds. + On cleanup -- Stop all existing daemons of this type. + + :param ctx: Context + :param config: Configuration + :paran type_: Role type + """ + log.info('Starting %s daemons...' % type_) + testdir = teuthology.get_testdir(ctx) + daemons = ctx.cluster.only(teuthology.is_type(type_)) + + # check whether any daemons if this type are configured + if daemons is None: + return + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) + + daemon_signal = 'kill' + if config.get('coverage') or config.get('valgrind') is not None: + daemon_signal = 'term' + + num_active = 0 + for remote, roles_for_host in daemons.remotes.iteritems(): + for id_ in teuthology.roles_of_type(roles_for_host, type_): + name = '%s.%s' % (type_, id_) + + if not (id_.endswith('-s')) and (id_.find('-s-') == -1): + num_active += 1 + + run_cmd = [ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'daemon-helper', + daemon_signal, + ] + run_cmd_tail = [ + 'ceph-%s' % (type_), + '-f', + '-i', id_] + + if type_ in config.get('cpu_profile', []): + profile_path = '/var/log/ceph/profiling-logger/%s.%s.prof' % (type_, id_) + run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ]) + + if config.get('valgrind') is not None: + valgrind_args = None + if type_ in config['valgrind']: + valgrind_args = config['valgrind'][type_] + if name in config['valgrind']: + valgrind_args = config['valgrind'][name] + run_cmd = teuthology.get_valgrind_args(testdir, name, + run_cmd, + valgrind_args) + + run_cmd.extend(run_cmd_tail) + + ctx.daemons.add_daemon(remote, type_, id_, + args=run_cmd, + logger=log.getChild(name), + stdin=run.PIPE, + wait=False, + ) + + if type_ == 'mds': + firstmon = teuthology.get_first_mon(ctx, config) + (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + + mon0_remote.run(args=[ + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph', + 'mds', 'set_max_mds', str(num_active)]) + + try: + yield + finally: + teuthology.stop_daemons_of_type(ctx, type_) + +def healthy(ctx, config): + """ + Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK. + + :param ctx: Context + :param config: Configuration + """ + log.info('Waiting until ceph is healthy...') + firstmon = teuthology.get_first_mon(ctx, config) + (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + teuthology.wait_until_osds_up( + ctx, + cluster=ctx.cluster, + remote=mon0_remote + ) + teuthology.wait_until_healthy( + ctx, + remote=mon0_remote, + ) + +def wait_for_osds_up(ctx, config): + """ + Wait for all osd's to come up. + + :param ctx: Context + :param config: Configuration + """ + log.info('Waiting until ceph osds are all up...') + firstmon = teuthology.get_first_mon(ctx, config) + (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + teuthology.wait_until_osds_up( + ctx, + cluster=ctx.cluster, + remote=mon0_remote + ) + +def wait_for_mon_quorum(ctx, config): + """ + Check renote ceph status until all monitors are up. + + :param ctx: Context + :param config: Configuration + """ + + assert isinstance(config, list) + firstmon = teuthology.get_first_mon(ctx, config) + (remote,) = ctx.cluster.only(firstmon).remotes.keys() + while True: + r = remote.run( + args=[ + 'ceph', + 'quorum_status', + ], + stdout=StringIO(), + logger=log.getChild('quorum_status'), + ) + j = json.loads(r.stdout.getvalue()) + q = j.get('quorum_names', []) + log.debug('Quorum: %s', q) + if sorted(q) == sorted(config): + break + time.sleep(1) + + +@contextlib.contextmanager +def restart(ctx, config): + """ + restart ceph daemons + + For example:: + tasks: + - ceph.restart: [all] + + For example:: + tasks: + - ceph.restart: [osd.0, mon.1] + + or:: + + tasks: + - ceph.restart: + daemons: [osd.0, mon.1] + wait-for-healthy: false + wait-for-osds-up: true + + :param ctx: Context + :param config: Configuration + """ + if config is None: + config = {} + if isinstance(config, list): + config = { 'daemons': config } + if 'daemons' not in config: + config['daemons'] = [] + type_daemon = ['mon', 'osd', 'mds', 'rgw'] + for d in type_daemon: + type_ = d + for daemon in ctx.daemons.iter_daemons_of_role(type_): + config['daemons'].append(type_ + '.' + daemon.id_) + + assert isinstance(config['daemons'], list) + daemons = dict.fromkeys(config['daemons']) + for i in daemons.keys(): + type_ = i.split('.', 1)[0] + id_ = i.split('.', 1)[1] + ctx.daemons.get_daemon(type_, id_).stop() + ctx.daemons.get_daemon(type_, id_).restart() + + if config.get('wait-for-healthy', True): + healthy(ctx=ctx, config=None) + if config.get('wait-for-osds-up', False): + wait_for_osds_up(ctx=ctx, config=None) + yield + +@contextlib.contextmanager +def task(ctx, config): + """ + Set up and tear down a Ceph cluster. + + For example:: + + tasks: + - ceph: + - interactive: + + You can also specify what branch to run:: + + tasks: + - ceph: + branch: foo + + Or a tag:: + + tasks: + - ceph: + tag: v0.42.13 + + Or a sha1:: + + tasks: + - ceph: + sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed + + Or a local source dir:: + + tasks: + - ceph: + path: /home/sage/ceph + + To capture code coverage data, use:: + + tasks: + - ceph: + coverage: true + + To use btrfs, ext4, or xfs on the target's scratch disks, use:: + + tasks: + - ceph: + fs: xfs + mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1] + mount_options: [nobarrier, inode64] + + Note, this will cause the task to check the /scratch_devs file on each node + for available devices. If no such file is found, /dev/sdb will be used. + + To run some daemons under valgrind, include their names + and the tool/args to use in a valgrind section:: + + tasks: + - ceph: + valgrind: + mds.1: --tool=memcheck + osd.1: [--tool=memcheck, --leak-check=no] + + Those nodes which are using memcheck or valgrind will get + checked for bad results. + + To adjust or modify config options, use:: + + tasks: + - ceph: + conf: + section: + key: value + + For example:: + + tasks: + - ceph: + conf: + mds.0: + some option: value + other key: other value + client.0: + debug client: 10 + debug ms: 1 + + By default, the cluster log is checked for errors and warnings, + and the run marked failed if any appear. You can ignore log + entries by giving a list of egrep compatible regexes, i.e.: + + tasks: + - ceph: + log-whitelist: ['foo.*bar', 'bad message'] + + :param ctx: Context + :param config: Configuration + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + "task ceph only supports a dictionary for configuration" + + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('ceph', {})) + + ctx.daemons = DaemonGroup() + + testdir = teuthology.get_testdir(ctx) + if config.get('coverage'): + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) + log.info('Creating coverage directory...') + run.wait( + ctx.cluster.run( + args=[ + 'install', '-d', '-m0755', '--', + coverage_dir, + ], + wait=False, + ) + ) + + with contextutil.nested( + lambda: ceph_log(ctx=ctx, config=None), + lambda: valgrind_post(ctx=ctx, config=config), + lambda: cluster(ctx=ctx, config=dict( + conf=config.get('conf', {}), + fs=config.get('fs', None), + mkfs_options=config.get('mkfs_options', None), + mount_options=config.get('mount_options',None), + block_journal=config.get('block_journal', None), + tmpfs_journal=config.get('tmpfs_journal', None), + log_whitelist=config.get('log-whitelist', []), + cpu_profile=set(config.get('cpu_profile', [])), + )), + lambda: run_daemon(ctx=ctx, config=config, type_='mon'), + lambda: run_daemon(ctx=ctx, config=config, type_='osd'), + lambda: run_daemon(ctx=ctx, config=config, type_='mds'), + ): + try: + if config.get('wait-for-healthy', True): + healthy(ctx=ctx, config=None) + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + ctx.manager = CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + yield + finally: + osd_scrub_pgs(ctx, config) diff --git a/qa/tasks/ceph_client.py b/qa/tasks/ceph_client.py new file mode 100644 index 00000000000..d7cfd00be3e --- /dev/null +++ b/qa/tasks/ceph_client.py @@ -0,0 +1,40 @@ +""" +Set up client keyring +""" +import logging + +from teuthology import misc as teuthology +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def create_keyring(ctx): + """ + Set up key ring on remote sites + """ + log.info('Setting up client nodes...') + clients = ctx.cluster.only(teuthology.is_type('client')) + testdir = teuthology.get_testdir(ctx) + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) + for remote, roles_for_host in clients.remotes.iteritems(): + for id_ in teuthology.roles_of_type(roles_for_host, 'client'): + client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) + remote.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--create-keyring', + '--gen-key', + # TODO this --name= is not really obeyed, all unknown "types" are munged to "client" + '--name=client.{id}'.format(id=id_), + client_keyring, + run.Raw('&&'), + 'sudo', + 'chmod', + '0644', + client_keyring, + ], + ) diff --git a/qa/tasks/ceph_deploy.py b/qa/tasks/ceph_deploy.py new file mode 100644 index 00000000000..058a798e052 --- /dev/null +++ b/qa/tasks/ceph_deploy.py @@ -0,0 +1,464 @@ +""" +Execute ceph-deploy as a task +""" +from cStringIO import StringIO + +import contextlib +import os +import time +import logging + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.config import config as teuth_config +from teuthology.task import install as install_fn +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def download_ceph_deploy(ctx, config): + """ + Downloads ceph-deploy from the ceph.com git mirror and (by default) + switches to the master branch. If the `ceph-deploy-branch` is specified, it + will use that instead. + """ + log.info('Downloading ceph-deploy...') + testdir = teuthology.get_testdir(ctx) + ceph_admin = teuthology.get_first_mon(ctx, config) + default_cd_branch = {'ceph-deploy-branch': 'master'} + ceph_deploy_branch = config.get( + 'ceph-deploy', + default_cd_branch).get('ceph-deploy-branch') + + ctx.cluster.only(ceph_admin).run( + args=[ + 'git', 'clone', '-b', ceph_deploy_branch, + teuth_config.ceph_git_base_url + 'ceph-deploy.git', + '{tdir}/ceph-deploy'.format(tdir=testdir), + ], + ) + ctx.cluster.only(ceph_admin).run( + args=[ + 'cd', + '{tdir}/ceph-deploy'.format(tdir=testdir), + run.Raw('&&'), + './bootstrap', + ], + ) + + try: + yield + finally: + log.info('Removing ceph-deploy ...') + ctx.cluster.only(ceph_admin).run( + args=[ + 'rm', + '-rf', + '{tdir}/ceph-deploy'.format(tdir=testdir), + ], + ) + + +def is_healthy(ctx, config): + """Wait until a Ceph cluster is healthy.""" + testdir = teuthology.get_testdir(ctx) + ceph_admin = teuthology.get_first_mon(ctx, config) + (remote,) = ctx.cluster.only(ceph_admin).remotes.keys() + max_tries = 90 # 90 tries * 10 secs --> 15 minutes + tries = 0 + while True: + tries += 1 + if tries >= max_tries: + msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes" + raise RuntimeError(msg) + + r = remote.run( + args=[ + 'cd', + '{tdir}'.format(tdir=testdir), + run.Raw('&&'), + 'sudo', 'ceph', + 'health', + ], + stdout=StringIO(), + logger=log.getChild('health'), + ) + out = r.stdout.getvalue() + log.debug('Ceph health: %s', out.rstrip('\n')) + if out.split(None, 1)[0] == 'HEALTH_OK': + break + time.sleep(10) + +def get_nodes_using_roles(ctx, config, role): + """Extract the names of nodes that match a given role from a cluster""" + newl = [] + for _remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for id_ in teuthology.roles_of_type(roles_for_host, role): + rem = _remote + if role == 'mon': + req1 = str(rem).split('@')[-1] + else: + req = str(rem).split('.')[0] + req1 = str(req).split('@')[1] + newl.append(req1) + return newl + +def get_dev_for_osd(ctx, config): + """Get a list of all osd device names.""" + osd_devs = [] + for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + host = remote.name.split('@')[-1] + shortname = host.split('.')[0] + devs = teuthology.get_scratch_devices(remote) + num_osd_per_host = list(teuthology.roles_of_type(roles_for_host, 'osd')) + num_osds = len(num_osd_per_host) + assert num_osds <= len(devs), 'fewer disks than osds on ' + shortname + for dev in devs[:num_osds]: + dev_short = dev.split('/')[-1] + osd_devs.append('{host}:{dev}'.format(host=shortname, dev=dev_short)) + return osd_devs + +def get_all_nodes(ctx, config): + """Return a string of node names separated by blanks""" + nodelist = [] + for t, k in ctx.config['targets'].iteritems(): + host = t.split('@')[-1] + simple_host = host.split('.')[0] + nodelist.append(simple_host) + nodelist = " ".join(nodelist) + return nodelist + +def execute_ceph_deploy(ctx, config, cmd): + """Remotely execute a ceph_deploy command""" + testdir = teuthology.get_testdir(ctx) + ceph_admin = teuthology.get_first_mon(ctx, config) + exec_cmd = cmd + (remote,) = ctx.cluster.only(ceph_admin).remotes.iterkeys() + proc = remote.run( + args = [ + 'cd', + '{tdir}/ceph-deploy'.format(tdir=testdir), + run.Raw('&&'), + run.Raw(exec_cmd), + ], + check_status=False, + ) + exitstatus = proc.exitstatus + return exitstatus + + +@contextlib.contextmanager +def build_ceph_cluster(ctx, config): + """Build a ceph cluster""" + + try: + log.info('Building ceph cluster using ceph-deploy...') + testdir = teuthology.get_testdir(ctx) + ceph_branch = None + if config.get('branch') is not None: + cbranch = config.get('branch') + for var, val in cbranch.iteritems(): + if var == 'testing': + ceph_branch = '--{var}'.format(var=var) + ceph_branch = '--{var}={val}'.format(var=var, val=val) + node_dev_list = [] + all_nodes = get_all_nodes(ctx, config) + mds_nodes = get_nodes_using_roles(ctx, config, 'mds') + mds_nodes = " ".join(mds_nodes) + mon_node = get_nodes_using_roles(ctx, config, 'mon') + mon_nodes = " ".join(mon_node) + new_mon = './ceph-deploy new'+" "+mon_nodes + install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes + purge_nodes = './ceph-deploy purge'+" "+all_nodes + purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes + mon_hostname = mon_nodes.split(' ')[0] + mon_hostname = str(mon_hostname) + gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname + deploy_mds = './ceph-deploy mds create'+" "+mds_nodes + no_of_osds = 0 + + if mon_nodes is None: + raise RuntimeError("no monitor nodes in the config file") + + estatus_new = execute_ceph_deploy(ctx, config, new_mon) + if estatus_new != 0: + raise RuntimeError("ceph-deploy: new command failed") + + log.info('adding config inputs...') + testdir = teuthology.get_testdir(ctx) + conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir) + first_mon = teuthology.get_first_mon(ctx, config) + (remote,) = ctx.cluster.only(first_mon).remotes.keys() + + lines = None + if config.get('conf') is not None: + confp = config.get('conf') + for section, keys in confp.iteritems(): + lines = '[{section}]\n'.format(section=section) + teuthology.append_lines_to_file(remote, conf_path, lines, + sudo=True) + for key, value in keys.iteritems(): + log.info("[%s] %s = %s" % (section, key, value)) + lines = '{key} = {value}\n'.format(key=key, value=value) + teuthology.append_lines_to_file(remote, conf_path, lines, + sudo=True) + + estatus_install = execute_ceph_deploy(ctx, config, install_nodes) + if estatus_install != 0: + raise RuntimeError("ceph-deploy: Failed to install ceph") + + mon_create_nodes = './ceph-deploy mon create-initial' + # If the following fails, it is OK, it might just be that the monitors + # are taking way more than a minute/monitor to form quorum, so lets + # try the next block which will wait up to 15 minutes to gatherkeys. + execute_ceph_deploy(ctx, config, mon_create_nodes) + + estatus_gather = execute_ceph_deploy(ctx, config, gather_keys) + max_gather_tries = 90 + gather_tries = 0 + while (estatus_gather != 0): + gather_tries += 1 + if gather_tries >= max_gather_tries: + msg = 'ceph-deploy was not able to gatherkeys after 15 minutes' + raise RuntimeError(msg) + estatus_gather = execute_ceph_deploy(ctx, config, gather_keys) + time.sleep(10) + + if mds_nodes: + estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds) + if estatus_mds != 0: + raise RuntimeError("ceph-deploy: Failed to deploy mds") + + if config.get('test_mon_destroy') is not None: + for d in range(1, len(mon_node)): + mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d] + estatus_mon_d = execute_ceph_deploy(ctx, config, + mon_destroy_nodes) + if estatus_mon_d != 0: + raise RuntimeError("ceph-deploy: Failed to delete monitor") + + node_dev_list = get_dev_for_osd(ctx, config) + for d in node_dev_list: + osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d + estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds) + if estatus_osd == 0: + log.info('successfully created osd') + no_of_osds += 1 + else: + zap_disk = './ceph-deploy disk zap'+" "+d + execute_ceph_deploy(ctx, config, zap_disk) + estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds) + if estatus_osd == 0: + log.info('successfully created osd') + no_of_osds += 1 + else: + raise RuntimeError("ceph-deploy: Failed to create osds") + + if config.get('wait-for-healthy', True) and no_of_osds >= 2: + is_healthy(ctx=ctx, config=None) + + log.info('Setting up client nodes...') + conf_path = '/etc/ceph/ceph.conf' + admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring' + first_mon = teuthology.get_first_mon(ctx, config) + (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys() + conf_data = teuthology.get_file( + remote=mon0_remote, + path=conf_path, + sudo=True, + ) + admin_keyring = teuthology.get_file( + remote=mon0_remote, + path=admin_keyring_path, + sudo=True, + ) + + clients = ctx.cluster.only(teuthology.is_type('client')) + for remot, roles_for_host in clients.remotes.iteritems(): + for id_ in teuthology.roles_of_type(roles_for_host, 'client'): + client_keyring = \ + '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) + mon0_remote.run( + args=[ + 'cd', + '{tdir}'.format(tdir=testdir), + run.Raw('&&'), + 'sudo', 'bash', '-c', + run.Raw('"'), 'ceph', + 'auth', + 'get-or-create', + 'client.{id}'.format(id=id_), + 'mds', 'allow', + 'mon', 'allow *', + 'osd', 'allow *', + run.Raw('>'), + client_keyring, + run.Raw('"'), + ], + ) + key_data = teuthology.get_file( + remote=mon0_remote, + path=client_keyring, + sudo=True, + ) + teuthology.sudo_write_file( + remote=remot, + path=client_keyring, + data=key_data, + perms='0644' + ) + teuthology.sudo_write_file( + remote=remot, + path=admin_keyring_path, + data=admin_keyring, + perms='0644' + ) + teuthology.sudo_write_file( + remote=remot, + path=conf_path, + data=conf_data, + perms='0644' + ) + else: + raise RuntimeError( + "The cluster is NOT operational due to insufficient OSDs") + yield + + finally: + log.info('Stopping ceph...') + ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'), + 'sudo', 'service', 'ceph', 'stop' ]) + + # Are you really not running anymore? + # try first with the init tooling + # ignoring the status so this becomes informational only + ctx.cluster.run(args=['sudo', 'status', 'ceph-all', run.Raw('||'), + 'sudo', 'service', 'ceph', 'status'], + check_status=False) + + # and now just check for the processes themselves, as if upstart/sysvinit + # is lying to us. Ignore errors if the grep fails + ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'), + 'grep', '-v', 'grep', run.Raw('|'), + 'grep', 'ceph'], check_status=False) + + if ctx.archive is not None: + # archive mon data, too + log.info('Archiving mon data...') + path = os.path.join(ctx.archive, 'data') + os.makedirs(path) + mons = ctx.cluster.only(teuthology.is_type('mon')) + for remote, roles in mons.remotes.iteritems(): + for role in roles: + if role.startswith('mon.'): + teuthology.pull_directory_tarball( + remote, + '/var/lib/ceph/mon', + path + '/' + role + '.tgz') + + log.info('Compressing logs...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'find', + '/var/log/ceph', + '-name', + '*.log', + '-print0', + run.Raw('|'), + 'sudo', + 'xargs', + '-0', + '--no-run-if-empty', + '--', + 'gzip', + '--', + ], + wait=False, + ), + ) + + log.info('Archiving logs...') + path = os.path.join(ctx.archive, 'remote') + os.makedirs(path) + for remote in ctx.cluster.remotes.iterkeys(): + sub = os.path.join(path, remote.shortname) + os.makedirs(sub) + teuthology.pull_directory(remote, '/var/log/ceph', + os.path.join(sub, 'log')) + + # Prevent these from being undefined if the try block fails + all_nodes = get_all_nodes(ctx, config) + purge_nodes = './ceph-deploy purge'+" "+all_nodes + purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes + + log.info('Purging package...') + execute_ceph_deploy(ctx, config, purge_nodes) + log.info('Purging data...') + execute_ceph_deploy(ctx, config, purgedata_nodes) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Set up and tear down a Ceph cluster. + + For example:: + + tasks: + - install: + extras: yes + - ssh_keys: + - ceph-deploy: + branch: + stable: bobtail + mon_initial_members: 1 + + tasks: + - install: + extras: yes + - ssh_keys: + - ceph-deploy: + branch: + dev: master + conf: + mon: + debug mon = 20 + + tasks: + - install: + extras: yes + - ssh_keys: + - ceph-deploy: + branch: + testing: + """ + if config is None: + config = {} + + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('ceph-deploy', {})) + + assert isinstance(config, dict), \ + "task ceph-deploy only supports a dictionary for configuration" + + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('ceph-deploy', {})) + + if config.get('branch') is not None: + assert isinstance(config['branch'], dict), 'branch must be a dictionary' + + with contextutil.nested( + lambda: install_fn.ship_utilities(ctx=ctx, config=None), + lambda: download_ceph_deploy(ctx=ctx, config=config), + lambda: build_ceph_cluster(ctx=ctx, config=dict( + conf=config.get('conf', {}), + branch=config.get('branch',{}), + mon_initial_members=config.get('mon_initial_members', None), + test_mon_destroy=config.get('test_mon_destroy', None), + )), + ): + yield diff --git a/qa/tasks/ceph_fuse.py b/qa/tasks/ceph_fuse.py new file mode 100644 index 00000000000..454473759ad --- /dev/null +++ b/qa/tasks/ceph_fuse.py @@ -0,0 +1,207 @@ +""" +Ceph FUSE client task +""" +import contextlib +import logging +import os +import time +from cStringIO import StringIO + +from teuthology import misc +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Mount/unmount a ``ceph-fuse`` client. + + The config is optional and defaults to mounting on all clients. If + a config is given, it is expected to be a list of clients to do + this operation on. This lets you e.g. set up one client with + ``ceph-fuse`` and another with ``kclient``. + + Example that mounts all clients:: + + tasks: + - ceph: + - ceph-fuse: + - interactive: + + Example that uses both ``kclient` and ``ceph-fuse``:: + + tasks: + - ceph: + - ceph-fuse: [client.0] + - kclient: [client.1] + - interactive: + + Example that enables valgrind: + + tasks: + - ceph: + - ceph-fuse: + client.0: + valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + - interactive: + + :param ctx: Context + :param config: Configuration + """ + log.info('Mounting ceph-fuse clients...') + fuse_daemons = {} + + testdir = misc.get_testdir(ctx) + + if config is None: + config = dict(('client.{id}'.format(id=id_), None) + for id_ in misc.all_roles_of_type(ctx.cluster, 'client')) + elif isinstance(config, list): + config = dict((name, None) for name in config) + + overrides = ctx.config.get('overrides', {}) + misc.deep_merge(config, overrides.get('ceph-fuse', {})) + + clients = list(misc.get_clients(ctx=ctx, roles=config.keys())) + + for id_, remote in clients: + client_config = config.get("client.%s" % id_) + if client_config is None: + client_config = {} + log.info("Client client.%s config is %s" % (id_, client_config)) + + daemon_signal = 'kill' + if client_config.get('coverage') or client_config.get('valgrind') is not None: + daemon_signal = 'term' + + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format( + id=id_, remote=remote,mnt=mnt)) + + remote.run( + args=[ + 'mkdir', + '--', + mnt, + ], + ) + + run_cmd=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'daemon-helper', + daemon_signal, + ] + run_cmd_tail=[ + 'ceph-fuse', + '-f', + '--name', 'client.{id}'.format(id=id_), + # TODO ceph-fuse doesn't understand dash dash '--', + mnt, + ] + + if client_config.get('valgrind') is not None: + run_cmd = misc.get_valgrind_args( + testdir, + 'client.{id}'.format(id=id_), + run_cmd, + client_config.get('valgrind'), + ) + + run_cmd.extend(run_cmd_tail) + + proc = remote.run( + args=run_cmd, + logger=log.getChild('ceph-fuse.{id}'.format(id=id_)), + stdin=run.PIPE, + wait=False, + ) + fuse_daemons[id_] = proc + + for id_, remote in clients: + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + wait_until_fuse_mounted( + remote=remote, + fuse=fuse_daemons[id_], + mountpoint=mnt, + ) + remote.run(args=['sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_)],) + + try: + yield + finally: + log.info('Unmounting ceph-fuse clients...') + for id_, remote in clients: + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + try: + remote.run( + args=[ + 'sudo', + 'fusermount', + '-u', + mnt, + ], + ) + except run.CommandFailedError: + log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=remote.name)) + # abort the fuse mount, killing all hung processes + remote.run( + args=[ + 'if', 'test', '-e', '/sys/fs/fuse/connections/*/abort', + run.Raw(';'), 'then', + 'echo', + '1', + run.Raw('>'), + run.Raw('/sys/fs/fuse/connections/*/abort'), + run.Raw(';'), 'fi', + ], + ) + # make sure its unmounted + remote.run( + args=[ + 'sudo', + 'umount', + '-l', + '-f', + mnt, + ], + ) + + run.wait(fuse_daemons.itervalues()) + + for id_, remote in clients: + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + remote.run( + args=[ + 'rmdir', + '--', + mnt, + ], + ) + + +def wait_until_fuse_mounted(remote, fuse, mountpoint): + while True: + proc = remote.run( + args=[ + 'stat', + '--file-system', + '--printf=%T\n', + '--', + mountpoint, + ], + stdout=StringIO(), + ) + fstype = proc.stdout.getvalue().rstrip('\n') + if fstype == 'fuseblk': + break + log.debug('ceph-fuse not yet mounted, got fs type {fstype!r}'.format(fstype=fstype)) + + # it shouldn't have exited yet; exposes some trivial problems + assert not fuse.poll() + + time.sleep(5) + log.info('ceph-fuse is mounted on %s', mountpoint) diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py new file mode 100644 index 00000000000..8256c2220c9 --- /dev/null +++ b/qa/tasks/ceph_manager.py @@ -0,0 +1,1675 @@ +""" +ceph manager -- Thrasher and CephManager objects +""" +from cStringIO import StringIO +import random +import time +import gevent +import json +import logging +import threading +import os +from teuthology import misc as teuthology +from tasks.scrub import Scrubber +from teuthology.orchestra.remote import Remote + +log = logging.getLogger(__name__) + +def make_admin_daemon_dir(ctx, remote): + """ + Create /var/run/ceph directory on remote site. + + :param ctx: Context + :param remote: Remote site + """ + remote.run( + args=[ + 'sudo', + 'install', '-d', '-m0777', '--', '/var/run/ceph', + ], + ) + + +def mount_osd_data(ctx, remote, osd): + """ + Mount a remote OSD + + :param ctx: Context + :param remote: Remote site + :param ods: Osd name + """ + log.debug('Mounting data for osd.{o} on {r}'.format(o=osd, r=remote)) + if remote in ctx.disk_config.remote_to_roles_to_dev and osd in ctx.disk_config.remote_to_roles_to_dev[remote]: + dev = ctx.disk_config.remote_to_roles_to_dev[remote][osd] + mount_options = ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][osd] + fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][osd] + mnt = os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd)) + + log.info('Mounting osd.{o}: dev: {n}, mountpoint: {p}, type: {t}, options: {v}'.format( + o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options)) + + remote.run( + args=[ + 'sudo', + 'mount', + '-t', fstype, + '-o', ','.join(mount_options), + dev, + mnt, + ] + ) + + +class Thrasher: + """ + Object used to thrash Ceph + """ + def __init__(self, manager, config, logger=None): + self.ceph_manager = manager + self.ceph_manager.wait_for_clean() + osd_status = self.ceph_manager.get_osd_status() + self.in_osds = osd_status['in'] + self.live_osds = osd_status['live'] + self.out_osds = osd_status['out'] + self.dead_osds = osd_status['dead'] + self.stopping = False + self.logger = logger + self.config = config + self.revive_timeout = self.config.get("revive_timeout", 150) + if self.config.get('powercycle'): + self.revive_timeout += 120 + self.clean_wait = self.config.get('clean_wait', 0) + self.minin = self.config.get("min_in", 3) + self.chance_move_pg = self.config.get('chance_move_pg', 1.0) + + num_osds = self.in_osds + self.out_osds + self.max_pgs = self.config.get("max_pgs_per_pool_osd", 1200) * num_osds + if self.logger is not None: + self.log = lambda x: self.logger.info(x) + else: + def tmp(x): + """ + Implement log behavior + """ + print x + self.log = tmp + if self.config is None: + self.config = dict() + # prevent monitor from auto-marking things out while thrasher runs + # try both old and new tell syntax, in case we are testing old code + try: + manager.raw_cluster_cmd('--', 'tell', 'mon.*', 'injectargs', + '--mon-osd-down-out-interval 0') + except Exception: + manager.raw_cluster_cmd('--', 'mon', 'tell', '*', 'injectargs', + '--mon-osd-down-out-interval 0') + self.thread = gevent.spawn(self.do_thrash) + if self.config.get('powercycle') or not self.cmd_exists_on_osds("ceph-objectstore-tool"): + self.ceph_objectstore_tool = False + self.test_rm_past_intervals = False + if self.config.get('powercycle'): + self.log("Unable to test ceph-objectstore-tool, " + "powercycle testing") + else: + self.log("Unable to test ceph-objectstore-tool, " + "not available on all OSD nodes") + else: + self.ceph_objectstore_tool = \ + self.config.get('ceph_objectstore_tool', True) + self.test_rm_past_intervals = \ + self.config.get('test_rm_past_intervals', True) + + def cmd_exists_on_osds(self, cmd): + allremotes = self.ceph_manager.ctx.cluster.only(\ + teuthology.is_type('osd')).remotes.keys() + allremotes = list(set(allremotes)) + for remote in allremotes: + proc = remote.run(args=['type', cmd], wait=True, + check_status=False, stdout=StringIO(), + stderr=StringIO()) + if proc.exitstatus != 0: + return False; + return True; + + def kill_osd(self, osd=None, mark_down=False, mark_out=False): + """ + :param osd: Osd to be killed. + :mark_down: Mark down if true. + :mark_out: Mark out if true. + """ + if osd is None: + osd = random.choice(self.live_osds) + self.log("Killing osd %s, live_osds are %s" % (str(osd), str(self.live_osds))) + self.live_osds.remove(osd) + self.dead_osds.append(osd) + self.ceph_manager.kill_osd(osd) + if mark_down: + self.ceph_manager.mark_down_osd(osd) + if mark_out and osd in self.in_osds: + self.out_osd(osd) + if self.ceph_objectstore_tool: + self.log("Testing ceph-objectstore-tool on down osd") + (remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() + FSPATH = self.ceph_manager.get_filepath() + JPATH = os.path.join(FSPATH, "journal") + exp_osd = imp_osd = osd + exp_remote = imp_remote = remote + # If an older osd is available we'll move a pg from there + if len(self.dead_osds) > 1 and random.random() < self.chance_move_pg: + exp_osd = random.choice(self.dead_osds[:-1]) + (exp_remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=exp_osd)).remotes.iterkeys() + if 'keyvaluestore_backend' in self.ceph_manager.ctx.ceph.conf['osd']: + prefix = "sudo ceph-objectstore-tool --data-path {fpath} --journal-path {jpath} --type keyvaluestore-dev --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH) + else: + prefix = "sudo ceph-objectstore-tool --data-path {fpath} --journal-path {jpath} --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH) + cmd = (prefix + "--op list-pgs").format(id=exp_osd) + proc = exp_remote.run(args=cmd, wait=True, + check_status=False, stdout=StringIO()) + if proc.exitstatus: + raise Exception("ceph-objectstore-tool: exp list-pgs failure with status {ret}".format(ret=proc.exitstatus)) + pgs = proc.stdout.getvalue().split('\n')[:-1] + if len(pgs) == 0: + self.log("No PGs found for osd.{osd}".format(osd=exp_osd)) + return + pg = random.choice(pgs) + exp_path = os.path.join(os.path.join(teuthology.get_testdir(self.ceph_manager.ctx), "data"), "exp.{pg}.{id}".format(pg=pg, id=exp_osd)) + # export + cmd = (prefix + "--op export --pgid {pg} --file {file}").format(id=exp_osd, pg=pg, file=exp_path) + proc = exp_remote.run(args=cmd) + if proc.exitstatus: + raise Exception("ceph-objectstore-tool: export failure with status {ret}".format(ret=proc.exitstatus)) + # remove + cmd = (prefix + "--op remove --pgid {pg}").format(id=exp_osd, pg=pg) + proc = exp_remote.run(args=cmd) + if proc.exitstatus: + raise Exception("ceph-objectstore-tool: remove failure with status {ret}".format(ret=proc.exitstatus)) + # If there are at least 2 dead osds we might move the pg + if exp_osd != imp_osd: + # If pg isn't already on this osd, then we will move it there + cmd = (prefix + "--op list-pgs").format(id=imp_osd) + proc = imp_remote.run(args=cmd, wait=True, + check_status=False, stdout=StringIO()) + if proc.exitstatus: + raise Exception("ceph-objectstore-tool: imp list-pgs failure with status {ret}".format(ret=proc.exitstatus)) + pgs = proc.stdout.getvalue().split('\n')[:-1] + if pg not in pgs: + self.log("Moving pg {pg} from osd.{fosd} to osd.{tosd}".format(pg=pg, fosd=exp_osd, tosd=imp_osd)) + if imp_remote != exp_remote: + # Copy export file to the other machine + self.log("Transfer export file from {srem} to {trem}".format(srem=exp_remote, trem=imp_remote)) + tmpexport = Remote.get_file(exp_remote, exp_path) + Remote.put_file(imp_remote, tmpexport, exp_path) + os.remove(tmpexport) + else: + # Can't move the pg after all + imp_osd = exp_osd + imp_remote = exp_remote + # import + cmd = (prefix + "--op import --file {file}") + cmd = cmd.format(id=imp_osd, file=exp_path) + proc = imp_remote.run(args=cmd, wait=True, check_status=False) + if proc.exitstatus == 10: + self.log("Pool went away before processing an import" + "...ignored") + elif proc.exitstatus == 11: + self.log("Attempt to import an incompatible export" + "...ignored") + elif proc.exitstatus: + raise Exception("ceph-objectstore-tool: " + "import failure with status {ret}". + format(ret=proc.exitstatus)) + cmd = "rm -f {file}".format(file=exp_path) + exp_remote.run(args=cmd) + if imp_remote != exp_remote: + imp_remote.run(args=cmd) + + def rm_past_intervals(self, osd=None): + """ + :param osd: Osd to find pg to remove past intervals + """ + if self.test_rm_past_intervals: + if osd is None: + osd = random.choice(self.dead_osds) + self.log("Use ceph_objectstore_tool to remove past intervals") + (remote,) = self.ceph_manager.ctx.\ + cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() + FSPATH = self.ceph_manager.get_filepath() + JPATH = os.path.join(FSPATH, "journal") + if ('keyvaluestore_backend' in + self.ceph_manager.ctx.ceph.conf['osd']): + prefix = ("sudo ceph-objectstore-tool " + "--data-path {fpath} --journal-path {jpath} " + "--type keyvaluestore-dev " + "--log-file=" + "/var/log/ceph/objectstore_tool.\\$pid.log ". + format(fpath=FSPATH, jpath=JPATH)) + else: + prefix = ("sudo ceph-objectstore-tool " + "--data-path {fpath} --journal-path {jpath} " + "--log-file=" + "/var/log/ceph/objectstore_tool.\\$pid.log ". + format(fpath=FSPATH, jpath=JPATH)) + cmd = (prefix + "--op list-pgs").format(id=osd) + proc = remote.run(args=cmd, wait=True, + check_status=False, stdout=StringIO()) + if proc.exitstatus: + raise Exception("ceph_objectstore_tool: " + "exp list-pgs failure with status {ret}". + format(ret=proc.exitstatus)) + pgs = proc.stdout.getvalue().split('\n')[:-1] + if len(pgs) == 0: + self.log("No PGs found for osd.{osd}".format(osd=osd)) + return + pg = random.choice(pgs) + cmd = (prefix + "--op rm-past-intervals --pgid {pg}").\ + format(id=osd, pg=pg) + proc = remote.run(args=cmd) + if proc.exitstatus: + raise Exception("ceph_objectstore_tool: " + "rm-past-intervals failure with status {ret}". + format(ret=proc.exitstatus)) + + def blackhole_kill_osd(self, osd=None): + """ + If all else fails, kill the osd. + :param osd: Osd to be killed. + """ + if osd is None: + osd = random.choice(self.live_osds) + self.log("Blackholing and then killing osd %s, live_osds are %s" % (str(osd), str(self.live_osds))) + self.live_osds.remove(osd) + self.dead_osds.append(osd) + self.ceph_manager.blackhole_kill_osd(osd) + + def revive_osd(self, osd=None): + """ + Revive the osd. + :param osd: Osd to be revived. + """ + if osd is None: + osd = random.choice(self.dead_osds) + self.log("Reviving osd %s" % (str(osd),)) + self.live_osds.append(osd) + self.dead_osds.remove(osd) + self.ceph_manager.revive_osd(osd, self.revive_timeout) + + def out_osd(self, osd=None): + """ + Mark the osd out + :param osd: Osd to be marked. + """ + if osd is None: + osd = random.choice(self.in_osds) + self.log("Removing osd %s, in_osds are: %s" % (str(osd), str(self.in_osds))) + self.ceph_manager.mark_out_osd(osd) + self.in_osds.remove(osd) + self.out_osds.append(osd) + + def in_osd(self, osd=None): + """ + Mark the osd out + :param osd: Osd to be marked. + """ + if osd is None: + osd = random.choice(self.out_osds) + if osd in self.dead_osds: + return self.revive_osd(osd) + self.log("Adding osd %s" % (str(osd),)) + self.out_osds.remove(osd) + self.in_osds.append(osd) + self.ceph_manager.mark_in_osd(osd) + self.log("Added osd %s"%(str(osd),)) + + def reweight_osd(self, osd=None): + """ + Reweight an osd that is in + :param osd: Osd to be marked. + """ + if osd is None: + osd = random.choice(self.in_osds) + val = random.uniform(.1, 1.0) + self.log("Reweighting osd %s to %s" % (str(osd), str(val))) + self.ceph_manager.raw_cluster_cmd('osd', 'reweight', str(osd), str(val)) + + def primary_affinity(self, osd=None): + if osd is None: + osd = random.choice(self.in_osds) + if random.random() >= .5: + pa = random.random() + elif random.random() >= .5: + pa = 1 + else: + pa = 0 + self.log('Setting osd %s primary_affinity to %f' % (str(osd), pa)) + self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity', str(osd), str(pa)) + + def all_up(self): + """ + Make sure all osds are up and not out. + """ + while len(self.dead_osds) > 0: + self.log("reviving osd") + self.revive_osd() + while len(self.out_osds) > 0: + self.log("inning osd") + self.in_osd() + + def do_join(self): + """ + Break out of this Ceph loop + """ + self.stopping = True + self.thread.get() + + def grow_pool(self): + """ + Increase the size of the pool + """ + pool = self.ceph_manager.get_pool() + self.log("Growing pool %s"%(pool,)) + self.ceph_manager.expand_pool(pool, self.config.get('pool_grow_by', 10), self.max_pgs) + + def fix_pgp_num(self): + """ + Fix number of pgs in pool. + """ + pool = self.ceph_manager.get_pool() + self.log("fixing pg num pool %s"%(pool,)) + self.ceph_manager.set_pool_pgpnum(pool) + + def test_pool_min_size(self): + """ + Kill and revive all osds except one. + """ + self.log("test_pool_min_size") + self.all_up() + self.ceph_manager.wait_for_recovery( + timeout=self.config.get('timeout') + ) + the_one = random.choice(self.in_osds) + self.log("Killing everyone but %s", the_one) + to_kill = filter(lambda x: x != the_one, self.in_osds) + [self.kill_osd(i) for i in to_kill] + [self.out_osd(i) for i in to_kill] + time.sleep(self.config.get("test_pool_min_size_time", 10)) + self.log("Killing %s" % (the_one,)) + self.kill_osd(the_one) + self.out_osd(the_one) + self.log("Reviving everyone but %s" % (the_one,)) + [self.revive_osd(i) for i in to_kill] + [self.in_osd(i) for i in to_kill] + self.log("Revived everyone but %s" % (the_one,)) + self.log("Waiting for clean") + self.ceph_manager.wait_for_recovery( + timeout=self.config.get('timeout') + ) + + def inject_pause(self, conf_key, duration, check_after, should_be_down): + """ + Pause injection testing. Check for osd being down when finished. + """ + the_one = random.choice(self.live_osds) + self.log("inject_pause on {osd}".format(osd = the_one)) + self.log( + "Testing {key} pause injection for duration {duration}".format( + key = conf_key, + duration = duration + )) + self.log( + "Checking after {after}, should_be_down={shouldbedown}".format( + after = check_after, + shouldbedown = should_be_down + )) + self.ceph_manager.set_config(the_one, **{conf_key:duration}) + if not should_be_down: + return + time.sleep(check_after) + status = self.ceph_manager.get_osd_status() + assert the_one in status['down'] + time.sleep(duration - check_after + 20) + status = self.ceph_manager.get_osd_status() + assert not the_one in status['down'] + + def test_backfill_full(self): + """ + Test backfills stopping when the replica fills up. + + First, use osd_backfill_full_ratio to simulate a now full + osd by setting it to 0 on all of the OSDs. + + Second, on a random subset, set + osd_debug_skip_full_check_in_backfill_reservation to force + the more complicated check in do_scan to be exercised. + + Then, verify that all backfills stop. + """ + self.log("injecting osd_backfill_full_ratio = 0") + for i in self.live_osds: + self.ceph_manager.set_config( + i, + osd_debug_skip_full_check_in_backfill_reservation = random.choice( + ['false', 'true']), + osd_backfill_full_ratio = 0) + for i in range(30): + status = self.ceph_manager.compile_pg_status() + if 'backfill' not in status.keys(): + break + self.log( + "waiting for {still_going} backfills".format( + still_going=status.get('backfill'))) + time.sleep(1) + assert('backfill' not in self.ceph_manager.compile_pg_status().keys()) + for i in self.live_osds: + self.ceph_manager.set_config( + i, + osd_debug_skip_full_check_in_backfill_reservation = \ + 'false', + osd_backfill_full_ratio = 0.85) + + def test_map_discontinuity(self): + """ + 1) Allows the osds to recover + 2) kills an osd + 3) allows the remaining osds to recover + 4) waits for some time + 5) revives the osd + This sequence should cause the revived osd to have to handle + a map gap since the mons would have trimmed + """ + while len(self.in_osds) < (self.minin + 1): + self.in_osd() + self.log("Waiting for recovery") + self.ceph_manager.wait_for_all_up( + timeout=self.config.get('timeout') + ) + # now we wait 20s for the pg status to change, if it takes longer, + # the test *should* fail! + time.sleep(20) + self.ceph_manager.wait_for_clean( + timeout=self.config.get('timeout') + ) + + # now we wait 20s for the backfill replicas to hear about the clean + time.sleep(20) + self.log("Recovered, killing an osd") + self.kill_osd(mark_down=True, mark_out=True) + self.log("Waiting for clean again") + self.ceph_manager.wait_for_clean( + timeout=self.config.get('timeout') + ) + self.log("Waiting for trim") + time.sleep(int(self.config.get("map_discontinuity_sleep_time", 40))) + self.revive_osd() + + def choose_action(self): + """ + Random action selector. + """ + chance_down = self.config.get('chance_down', 0.4) + chance_test_min_size = self.config.get('chance_test_min_size', 0) + chance_test_backfill_full = self.config.get('chance_test_backfill_full', 0) + if isinstance(chance_down, int): + chance_down = float(chance_down) / 100 + minin = self.minin + minout = self.config.get("min_out", 0) + minlive = self.config.get("min_live", 2) + mindead = self.config.get("min_dead", 0) + + self.log('choose_action: min_in %d min_out %d min_live %d min_dead %d' % + (minin, minout, minlive, mindead)) + actions = [] + if len(self.in_osds) > minin: + actions.append((self.out_osd, 1.0,)) + if len(self.live_osds) > minlive and chance_down > 0: + actions.append((self.kill_osd, chance_down,)) + if len(self.dead_osds) > 1: + actions.append((self.rm_past_intervals, 1.0,)) + if len(self.out_osds) > minout: + actions.append((self.in_osd, 1.7,)) + if len(self.dead_osds) > mindead: + actions.append((self.revive_osd, 1.0,)) + if self.config.get('thrash_primary_affinity', True): + actions.append((self.primary_affinity, 1.0,)) + actions.append((self.reweight_osd, self.config.get('reweight_osd',.5),)) + actions.append((self.grow_pool, self.config.get('chance_pgnum_grow', 0),)) + actions.append((self.fix_pgp_num, self.config.get('chance_pgpnum_fix', 0),)) + actions.append((self.test_pool_min_size, chance_test_min_size,)) + actions.append((self.test_backfill_full, chance_test_backfill_full,)) + for key in ['heartbeat_inject_failure', 'filestore_inject_stall']: + for scenario in [ + (lambda: self.inject_pause(key, + self.config.get('pause_short', 3), + 0, + False), + self.config.get('chance_inject_pause_short', 1),), + (lambda: self.inject_pause(key, + self.config.get('pause_long', 80), + self.config.get('pause_check_after', 70), + True), + self.config.get('chance_inject_pause_long', 0),)]: + actions.append(scenario) + + total = sum([y for (x, y) in actions]) + val = random.uniform(0, total) + for (action, prob) in actions: + if val < prob: + return action + val -= prob + return None + + def do_thrash(self): + """ + Loop to select random actions to thrash ceph manager with. + """ + cleanint = self.config.get("clean_interval", 60) + scrubint = self.config.get("scrub_interval", -1) + maxdead = self.config.get("max_dead", 0) + delay = self.config.get("op_delay", 5) + self.log("starting do_thrash") + while not self.stopping: + self.log(" ".join([str(x) for x in ["in_osds: ", self.in_osds, " out_osds: ", self.out_osds, + "dead_osds: ", self.dead_osds, "live_osds: ", + self.live_osds]])) + if random.uniform(0, 1) < (float(delay) / cleanint): + while len(self.dead_osds) > maxdead: + self.revive_osd() + for osd in self.in_osds: + self.ceph_manager.raw_cluster_cmd('osd', 'reweight', + str(osd), str(1)) + if random.uniform(0, 1) < float( + self.config.get('chance_test_map_discontinuity', 0)): + self.test_map_discontinuity() + else: + self.ceph_manager.wait_for_recovery( + timeout=self.config.get('timeout') + ) + time.sleep(self.clean_wait) + if scrubint > 0: + if random.uniform(0, 1) < (float(delay) / scrubint): + self.log('Scrubbing while thrashing being performed') + Scrubber(self.ceph_manager, self.config) + self.choose_action()() + time.sleep(delay) + self.all_up() + +class CephManager: + """ + Ceph manager object. + Contains several local functions that form a bulk of this module. + """ + + REPLICATED_POOL = 1 + ERASURE_CODED_POOL = 3 + + def __init__(self, controller, ctx=None, config=None, logger=None): + self.lock = threading.RLock() + self.ctx = ctx + self.config = config + self.controller = controller + self.next_pool_id = 0 + self.created_erasure_pool = False + if (logger): + self.log = lambda x: logger.info(x) + else: + def tmp(x): + """ + implement log behavior. + """ + print x + self.log = tmp + if self.config is None: + self.config = dict() + pools = self.list_pools() + self.pools = {} + for pool in pools: + self.pools[pool] = self.get_pool_property(pool, 'pg_num') + + def raw_cluster_cmd(self, *args): + """ + Start ceph on a raw cluster. Return count + """ + testdir = teuthology.get_testdir(self.ctx) + ceph_args = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph', + ] + ceph_args.extend(args) + proc = self.controller.run( + args=ceph_args, + stdout=StringIO(), + ) + return proc.stdout.getvalue() + + def raw_cluster_cmd_result(self, *args): + """ + Start ceph on a cluster. Return success or failure information. + """ + testdir = teuthology.get_testdir(self.ctx) + ceph_args = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph', + ] + ceph_args.extend(args) + proc = self.controller.run( + args=ceph_args, + check_status=False, + ) + return proc.exitstatus + + def do_rados(self, remote, cmd): + """ + Execute a remote rados command. + """ + testdir = teuthology.get_testdir(self.ctx) + pre = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rados', + ] + pre.extend(cmd) + proc = remote.run( + args=pre, + wait=True, + ) + return proc + + def rados_write_objects( + self, pool, num_objects, size, timelimit, threads, cleanup=False): + """ + Write rados objects + Threads not used yet. + """ + args = [ + '-p', pool, + '--num-objects', num_objects, + '-b', size, + 'bench', timelimit, + 'write' + ] + if not cleanup: args.append('--no-cleanup') + return self.do_rados(self.controller, map(str, args)) + + def do_put(self, pool, obj, fname): + """ + Implement rados put operation + """ + return self.do_rados( + self.controller, + [ + '-p', + pool, + 'put', + obj, + fname + ] + ) + + def do_get(self, pool, obj, fname='/dev/null'): + """ + Implement rados get operation + """ + return self.do_rados( + self.controller, + [ + '-p', + pool, + 'stat', + obj, + fname + ] + ) + + def osd_admin_socket(self, osdnum, command, check_status=True): + """ + Remotely start up ceph specifying the admin socket + :param command a list of words to use as the command to the admin socket + """ + testdir = teuthology.get_testdir(self.ctx) + remote = None + for _remote, roles_for_host in self.ctx.cluster.remotes.iteritems(): + for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): + if int(id_) == int(osdnum): + remote = _remote + assert remote is not None + args = [ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph', + '--admin-daemon', + '/var/run/ceph/ceph-osd.{id}.asok'.format(id=osdnum), + ] + args.extend(command) + return remote.run( + args=args, + stdout=StringIO(), + wait=True, + check_status=check_status + ) + + def get_pgid(self, pool, pgnum): + """ + :param pool: pool name + :param pgnum: pg number + :returns: a string representing this pg. + """ + poolnum = self.get_pool_num(pool) + pg_str = "{poolnum}.{pgnum}".format( + poolnum=poolnum, + pgnum=pgnum) + return pg_str + + def get_pg_replica(self, pool, pgnum): + """ + get replica for pool, pgnum (e.g. (data, 0)->0 + """ + output = self.raw_cluster_cmd("pg", "dump", '--format=json') + j = json.loads('\n'.join(output.split('\n')[1:])) + pg_str = self.get_pgid(pool, pgnum) + for pg in j['pg_stats']: + if pg['pgid'] == pg_str: + return int(pg['acting'][-1]) + assert False + + def get_pg_primary(self, pool, pgnum): + """ + get primary for pool, pgnum (e.g. (data, 0)->0 + """ + output = self.raw_cluster_cmd("pg", "dump", '--format=json') + j = json.loads('\n'.join(output.split('\n')[1:])) + pg_str = self.get_pgid(pool, pgnum) + for pg in j['pg_stats']: + if pg['pgid'] == pg_str: + return int(pg['acting'][0]) + assert False + + def get_pool_num(self, pool): + """ + get number for pool (e.g., data -> 2) + """ + out = self.raw_cluster_cmd('osd', 'dump', '--format=json') + j = json.loads('\n'.join(out.split('\n')[1:])) + for i in j['pools']: + if i['pool_name'] == pool: + return int(i['pool']) + assert False + + def list_pools(self): + """ + list all pool names + """ + out = self.raw_cluster_cmd('osd', 'dump', '--format=json') + j = json.loads('\n'.join(out.split('\n')[1:])) + self.log(j['pools']) + return [str(i['pool_name']) for i in j['pools']] + + def clear_pools(self): + """ + remove all pools + """ + [self.remove_pool(i) for i in self.list_pools()] + + def kick_recovery_wq(self, osdnum): + """ + Run kick_recovery_wq on cluster. + """ + return self.raw_cluster_cmd( + 'tell', "osd.%d" % (int(osdnum),), + 'debug', + 'kick_recovery_wq', + '0') + + def wait_run_admin_socket(self, osdnum, args=['version'], timeout=300): + """ + If osd_admin_socket call suceeds, return. Otherwise wait + five seconds and try again. + """ + tries = 0 + while True: + proc = self.osd_admin_socket( + osdnum, args, + check_status=False) + if proc.exitstatus is 0: + break + else: + tries += 1 + if (tries * 5) > timeout: + raise Exception('timed out waiting for admin_socket to appear after osd.{o} restart'.format(o=osdnum)) + self.log( + "waiting on admin_socket for {osdnum}, {command}".format( + osdnum=osdnum, + command=args)) + time.sleep(5) + + def get_pool_dump(self, pool): + """ + get the osd dump part of a pool + """ + osd_dump = self.get_osd_dump_json() + for i in osd_dump['pools']: + if i['pool_name'] == pool: + return i + assert False + + def set_config(self, osdnum, **argdict): + """ + :param osdnum: osd number + :param argdict: dictionary containing values to set. + """ + for k, v in argdict.iteritems(): + self.wait_run_admin_socket( + osdnum, + ['config', 'set', str(k), str(v)]) + + def raw_cluster_status(self): + """ + Get status from cluster + """ + status = self.raw_cluster_cmd('status', '--format=json-pretty') + return json.loads(status) + + def raw_osd_status(self): + """ + Get osd status from cluster + """ + return self.raw_cluster_cmd('osd', 'dump') + + def get_osd_status(self): + """ + Get osd statuses sorted by states that the osds are in. + """ + osd_lines = filter( + lambda x: x.startswith('osd.') and (("up" in x) or ("down" in x)), + self.raw_osd_status().split('\n')) + self.log(osd_lines) + in_osds = [int(i[4:].split()[0]) for i in filter( + lambda x: " in " in x, + osd_lines)] + out_osds = [int(i[4:].split()[0]) for i in filter( + lambda x: " out " in x, + osd_lines)] + up_osds = [int(i[4:].split()[0]) for i in filter( + lambda x: " up " in x, + osd_lines)] + down_osds = [int(i[4:].split()[0]) for i in filter( + lambda x: " down " in x, + osd_lines)] + dead_osds = [int(x.id_) for x in + filter(lambda x: not x.running(), self.ctx.daemons.iter_daemons_of_role('osd'))] + live_osds = [int(x.id_) for x in + filter(lambda x: x.running(), self.ctx.daemons.iter_daemons_of_role('osd'))] + return { 'in' : in_osds, 'out' : out_osds, 'up' : up_osds, + 'down' : down_osds, 'dead' : dead_osds, 'live' : live_osds, + 'raw' : osd_lines} + + def get_num_pgs(self): + """ + Check cluster status for the number of pgs + """ + status = self.raw_cluster_status() + self.log(status) + return status['pgmap']['num_pgs'] + + def create_pool_with_unique_name(self, pg_num=16, ec_pool=False, ec_m=1, ec_k=2): + """ + Create a pool named unique_pool_X where X is unique. + """ + name = "" + with self.lock: + name = "unique_pool_%s" % (str(self.next_pool_id),) + self.next_pool_id += 1 + self.create_pool( + name, + pg_num, + ec_pool=ec_pool, + ec_m=ec_m, + ec_k=ec_k) + return name + + def create_pool(self, pool_name, pg_num=16, ec_pool=False, ec_m=1, ec_k=2): + """ + Create a pool named from the pool_name parameter. + :param pool_name: name of the pool being created. + :param pg_num: initial number of pgs. + """ + with self.lock: + assert isinstance(pool_name, str) + assert isinstance(pg_num, int) + assert pool_name not in self.pools + self.log("creating pool_name %s"%(pool_name,)) + if ec_pool and not self.created_erasure_pool: + self.created_erasure_pool = True + self.raw_cluster_cmd('osd', 'erasure-code-profile', 'set', 'teuthologyprofile', 'ruleset-failure-domain=osd', 'm='+str(ec_m), 'k='+str(ec_k)) + + if ec_pool: + self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num), str(pg_num), 'erasure', 'teuthologyprofile') + else: + self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num)) + self.pools[pool_name] = pg_num + + def remove_pool(self, pool_name): + """ + Remove the indicated pool + :param pool_name: Pool to be removed + """ + with self.lock: + assert isinstance(pool_name, str) + assert pool_name in self.pools + self.log("removing pool_name %s" % (pool_name,)) + del self.pools[pool_name] + self.do_rados( + self.controller, + ['rmpool', pool_name, pool_name, "--yes-i-really-really-mean-it"] + ) + + def get_pool(self): + """ + Pick a random pool + """ + with self.lock: + return random.choice(self.pools.keys()) + + def get_pool_pg_num(self, pool_name): + """ + Return the number of pgs in the pool specified. + """ + with self.lock: + assert isinstance(pool_name, str) + if pool_name in self.pools: + return self.pools[pool_name] + return 0 + + def get_pool_property(self, pool_name, prop): + """ + :param pool_name: pool + :param prop: property to be checked. + :returns: property as an int value. + """ + with self.lock: + assert isinstance(pool_name, str) + assert isinstance(prop, str) + output = self.raw_cluster_cmd( + 'osd', + 'pool', + 'get', + pool_name, + prop) + return int(output.split()[1]) + + def set_pool_property(self, pool_name, prop, val): + """ + :param pool_name: pool + :param prop: property to be set. + :param val: value to set. + + This routine retries if set operation fails. + """ + with self.lock: + assert isinstance(pool_name, str) + assert isinstance(prop, str) + assert isinstance(val, int) + tries = 0 + while True: + r = self.raw_cluster_cmd_result( + 'osd', + 'pool', + 'set', + pool_name, + prop, + str(val)) + if r != 11: # EAGAIN + break + tries += 1 + if tries > 50: + raise Exception('timed out getting EAGAIN when setting pool property %s %s = %s' % (pool_name, prop, val)) + self.log('got EAGAIN setting pool property, waiting a few seconds...') + time.sleep(2) + + def expand_pool(self, pool_name, by, max_pgs): + """ + Increase the number of pgs in a pool + """ + with self.lock: + assert isinstance(pool_name, str) + assert isinstance(by, int) + assert pool_name in self.pools + if self.get_num_creating() > 0: + return + if (self.pools[pool_name] + by) > max_pgs: + return + self.log("increase pool size by %d"%(by,)) + new_pg_num = self.pools[pool_name] + by + self.set_pool_property(pool_name, "pg_num", new_pg_num) + self.pools[pool_name] = new_pg_num + + def set_pool_pgpnum(self, pool_name): + """ + Set pgpnum property of pool_name pool. + """ + with self.lock: + assert isinstance(pool_name, str) + assert pool_name in self.pools + if self.get_num_creating() > 0: + return + self.set_pool_property(pool_name, 'pgp_num', self.pools[pool_name]) + + def list_pg_missing(self, pgid): + """ + return list of missing pgs with the id specified + """ + r = None + offset = {} + while True: + out = self.raw_cluster_cmd('--', 'pg', pgid, 'list_missing', + json.dumps(offset)) + j = json.loads(out) + if r is None: + r = j + else: + r['objects'].extend(j['objects']) + if not 'more' in j: + break + if j['more'] == 0: + break + offset = j['objects'][-1]['oid'] + if 'more' in r: + del r['more'] + return r + + def get_pg_stats(self): + """ + Dump the cluster and get pg stats + """ + out = self.raw_cluster_cmd('pg', 'dump', '--format=json') + j = json.loads('\n'.join(out.split('\n')[1:])) + return j['pg_stats'] + + def compile_pg_status(self): + """ + Return a histogram of pg state values + """ + ret = {} + j = self.get_pg_stats() + for pg in j: + for status in pg['state'].split('+'): + if status not in ret: + ret[status] = 0 + ret[status] += 1 + return ret + + def pg_scrubbing(self, pool, pgnum): + """ + pg scrubbing wrapper + """ + pgstr = self.get_pgid(pool, pgnum) + stats = self.get_single_pg_stats(pgstr) + return 'scrub' in stats['state'] + + def pg_repairing(self, pool, pgnum): + """ + pg repairing wrapper + """ + pgstr = self.get_pgid(pool, pgnum) + stats = self.get_single_pg_stats(pgstr) + return 'repair' in stats['state'] + + def pg_inconsistent(self, pool, pgnum): + """ + pg inconsistent wrapper + """ + pgstr = self.get_pgid(pool, pgnum) + stats = self.get_single_pg_stats(pgstr) + return 'inconsistent' in stats['state'] + + def get_last_scrub_stamp(self, pool, pgnum): + """ + Get the timestamp of the last scrub. + """ + stats = self.get_single_pg_stats(self.get_pgid(pool, pgnum)) + return stats["last_scrub_stamp"] + + def do_pg_scrub(self, pool, pgnum, stype): + """ + Scrub pg and wait for scrubbing to finish + """ + init = self.get_last_scrub_stamp(pool, pgnum) + self.raw_cluster_cmd('pg', stype, self.get_pgid(pool, pgnum)) + while init == self.get_last_scrub_stamp(pool, pgnum): + self.log("waiting for scrub type %s"%(stype,)) + time.sleep(10) + + def get_single_pg_stats(self, pgid): + """ + Return pg for the pgid specified. + """ + all_stats = self.get_pg_stats() + + for pg in all_stats: + if pg['pgid'] == pgid: + return pg + + return None + + def get_osd_dump_json(self): + """ + osd dump --format=json converted to a python object + :returns: the python object + """ + out = self.raw_cluster_cmd('osd', 'dump', '--format=json') + return json.loads('\n'.join(out.split('\n')[1:])) + + def get_osd_dump(self): + """ + Dump osds + :returns: all osds + """ + out = self.raw_cluster_cmd('osd', 'dump', '--format=json') + j = json.loads('\n'.join(out.split('\n')[1:])) + return j['osds'] + + def get_stuck_pgs(self, type_, threshold): + """ + :returns: stuck pg information from the cluster + """ + out = self.raw_cluster_cmd('pg', 'dump_stuck', type_, str(threshold), + '--format=json') + return json.loads(out) + + def get_num_unfound_objects(self): + """ + Check cluster status to get the number of unfound objects + """ + status = self.raw_cluster_status() + self.log(status) + return status['pgmap'].get('unfound_objects', 0) + + def get_num_creating(self): + """ + Find the number of pgs in creating mode. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if 'creating' in pg['state']: + num += 1 + return num + + def get_num_active_clean(self): + """ + Find the number of active and clean pgs. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if pg['state'].count('active') and pg['state'].count('clean') and not pg['state'].count('stale'): + num += 1 + return num + + def get_num_active_recovered(self): + """ + Find the number of active and recovered pgs. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if pg['state'].count('active') and not pg['state'].count('recover') and not pg['state'].count('backfill') and not pg['state'].count('stale'): + num += 1 + return num + + def get_is_making_recovery_progress(self): + """ + Return whether there is recovery progress discernable in the + raw cluster status + """ + status = self.raw_cluster_status() + kps = status['pgmap'].get('recovering_keys_per_sec', 0) + bps = status['pgmap'].get('recovering_bytes_per_sec', 0) + ops = status['pgmap'].get('recovering_objects_per_sec', 0) + return kps > 0 or bps > 0 or ops > 0 + + def get_num_active(self): + """ + Find the number of active pgs. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if pg['state'].count('active') and not pg['state'].count('stale'): + num += 1 + return num + + def get_num_down(self): + """ + Find the number of pgs that are down. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if (pg['state'].count('down') and not pg['state'].count('stale')) or \ + (pg['state'].count('incomplete') and not pg['state'].count('stale')): + num += 1 + return num + + def get_num_active_down(self): + """ + Find the number of pgs that are either active or down. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if (pg['state'].count('active') and not pg['state'].count('stale')) or \ + (pg['state'].count('down') and not pg['state'].count('stale')) or \ + (pg['state'].count('incomplete') and not pg['state'].count('stale')): + num += 1 + return num + + def is_clean(self): + """ + True if all pgs are clean + """ + return self.get_num_active_clean() == self.get_num_pgs() + + def is_recovered(self): + """ + True if all pgs have recovered + """ + return self.get_num_active_recovered() == self.get_num_pgs() + + def is_active_or_down(self): + """ + True if all pgs are active or down + """ + return self.get_num_active_down() == self.get_num_pgs() + + def wait_for_clean(self, timeout=None): + """ + Returns trues when all pgs are clean. + """ + self.log("waiting for clean") + start = time.time() + num_active_clean = self.get_num_active_clean() + while not self.is_clean(): + if timeout is not None: + if self.get_is_making_recovery_progress(): + self.log("making progress, resetting timeout") + start = time.time() + else: + self.log("no progress seen, keeping timeout for now") + assert time.time() - start < timeout, \ + 'failed to become clean before timeout expired' + cur_active_clean = self.get_num_active_clean() + if cur_active_clean != num_active_clean: + start = time.time() + num_active_clean = cur_active_clean + time.sleep(3) + self.log("clean!") + + def are_all_osds_up(self): + """ + Returns true if all osds are up. + """ + x = self.get_osd_dump() + return (len(x) == \ + sum([(y['up'] > 0) for y in x])) + + def wait_for_all_up(self, timeout=None): + """ + When this exits, either the timeout has expired, or all + osds are up. + """ + self.log("waiting for all up") + start = time.time() + while not self.are_all_osds_up(): + if timeout is not None: + assert time.time() - start < timeout, \ + 'timeout expired in wait_for_all_up' + time.sleep(3) + self.log("all up!") + + def wait_for_recovery(self, timeout=None): + """ + Check peering. When this exists, we have recovered. + """ + self.log("waiting for recovery to complete") + start = time.time() + num_active_recovered = self.get_num_active_recovered() + while not self.is_recovered(): + if timeout is not None: + if self.get_is_making_recovery_progress(): + self.log("making progress, resetting timeout") + start = time.time() + else: + self.log("no progress seen, keeping timeout for now") + assert time.time() - start < timeout, \ + 'failed to recover before timeout expired' + cur_active_recovered = self.get_num_active_recovered() + if cur_active_recovered != num_active_recovered: + start = time.time() + num_active_recovered = cur_active_recovered + time.sleep(3) + self.log("recovered!") + + def wait_for_active(self, timeout=None): + """ + Check peering. When this exists, we are definitely active + """ + self.log("waiting for peering to complete") + start = time.time() + num_active = self.get_num_active() + while not self.is_active(): + if timeout is not None: + assert time.time() - start < timeout, \ + 'failed to recover before timeout expired' + cur_active = self.get_num_active() + if cur_active != num_active: + start = time.time() + num_active = cur_active + time.sleep(3) + self.log("active!") + + def wait_for_active_or_down(self, timeout=None): + """ + Check peering. When this exists, we are definitely either + active or down + """ + self.log("waiting for peering to complete or become blocked") + start = time.time() + num_active_down = self.get_num_active_down() + while not self.is_active_or_down(): + if timeout is not None: + assert time.time() - start < timeout, \ + 'failed to recover before timeout expired' + cur_active_down = self.get_num_active_down() + if cur_active_down != num_active_down: + start = time.time() + num_active_down = cur_active_down + time.sleep(3) + self.log("active or down!") + + def osd_is_up(self, osd): + """ + Wrapper for osd check + """ + osds = self.get_osd_dump() + return osds[osd]['up'] > 0 + + def wait_till_osd_is_up(self, osd, timeout=None): + """ + Loop waiting for osd. + """ + self.log('waiting for osd.%d to be up' % osd) + start = time.time() + while not self.osd_is_up(osd): + if timeout is not None: + assert time.time() - start < timeout, \ + 'osd.%d failed to come up before timeout expired' % osd + time.sleep(3) + self.log('osd.%d is up' % osd) + + def is_active(self): + """ + Wrapper to check if active + """ + return self.get_num_active() == self.get_num_pgs() + + def wait_till_active(self, timeout=None): + """ + Wait until osds are active. + """ + self.log("waiting till active") + start = time.time() + while not self.is_active(): + if timeout is not None: + assert time.time() - start < timeout, \ + 'failed to become active before timeout expired' + time.sleep(3) + self.log("active!") + + def mark_out_osd(self, osd): + """ + Wrapper to mark osd out. + """ + self.raw_cluster_cmd('osd', 'out', str(osd)) + + def kill_osd(self, osd): + """ + Kill osds by either power cycling (if indicated by the config) + or by stopping. + """ + if self.config.get('powercycle'): + (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() + self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name)) + assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." + remote.console.power_off() + else: + self.ctx.daemons.get_daemon('osd', osd).stop() + + def blackhole_kill_osd(self, osd): + """ + Stop osd if nothing else works. + """ + self.raw_cluster_cmd('--', 'tell', 'osd.%d' % osd, + 'injectargs', '--filestore-blackhole') + time.sleep(2) + self.ctx.daemons.get_daemon('osd', osd).stop() + + def revive_osd(self, osd, timeout=150): + """ + Revive osds by either power cycling (if indicated by the config) + or by restarting. + """ + if self.config.get('powercycle'): + (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() + self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name)) + assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." + remote.console.power_on() + if not remote.console.check_status(300): + raise Exception('Failed to revive osd.{o} via ipmi'.format(o=osd)) + teuthology.reconnect(self.ctx, 60, [remote]) + mount_osd_data(self.ctx, remote, str(osd)) + make_admin_daemon_dir(self.ctx, remote) + self.ctx.daemons.get_daemon('osd', osd).reset() + self.ctx.daemons.get_daemon('osd', osd).restart() + # wait for dump_ops_in_flight; this command doesn't appear + # until after the signal handler is installed and it is safe + # to stop the osd again without making valgrind leak checks + # unhappy. see #5924. + self.wait_run_admin_socket(osd, + args=['dump_ops_in_flight'], + timeout=timeout) + + def mark_down_osd(self, osd): + """ + Cluster command wrapper + """ + self.raw_cluster_cmd('osd', 'down', str(osd)) + + def mark_in_osd(self, osd): + """ + Cluster command wrapper + """ + self.raw_cluster_cmd('osd', 'in', str(osd)) + + + ## monitors + + def signal_mon(self, mon, sig): + """ + Wrapper to local get_deamon call + """ + self.ctx.daemons.get_daemon('mon', mon).signal(sig) + + def kill_mon(self, mon): + """ + Kill the monitor by either power cycling (if the config says so), + or by doing a stop. + """ + if self.config.get('powercycle'): + (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys() + self.log('kill_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name)) + assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." + remote.console.power_off() + else: + self.ctx.daemons.get_daemon('mon', mon).stop() + + def revive_mon(self, mon): + """ + Restart by either power cycling (if the config says so), + or by doing a normal restart. + """ + if self.config.get('powercycle'): + (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys() + self.log('revive_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name)) + assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." + remote.console.power_on() + make_admin_daemon_dir(self.ctx, remote) + self.ctx.daemons.get_daemon('mon', mon).restart() + + def get_mon_status(self, mon): + """ + Extract all the monitor status information from the cluster + """ + addr = self.ctx.ceph.conf['mon.%s' % mon]['mon addr'] + out = self.raw_cluster_cmd('-m', addr, 'mon_status') + return json.loads(out) + + def get_mon_quorum(self): + """ + Extract monitor quorum information from the cluster + """ + out = self.raw_cluster_cmd('quorum_status') + j = json.loads(out) + self.log('quorum_status is %s' % out) + return j['quorum'] + + def wait_for_mon_quorum_size(self, size, timeout=300): + """ + Loop until quorum size is reached. + """ + self.log('waiting for quorum size %d' % size) + start = time.time() + while not len(self.get_mon_quorum()) == size: + if timeout is not None: + assert time.time() - start < timeout, \ + 'failed to reach quorum size %d before timeout expired' % size + time.sleep(3) + self.log("quorum is size %d" % size) + + def get_mon_health(self, debug=False): + """ + Extract all the monitor health information. + """ + out = self.raw_cluster_cmd('health', '--format=json') + if debug: + self.log('health:\n{h}'.format(h=out)) + return json.loads(out) + + ## metadata servers + + def kill_mds(self, mds): + """ + Powercyle if set in config, otherwise just stop. + """ + if self.config.get('powercycle'): + (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys() + self.log('kill_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name)) + assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." + remote.console.power_off() + else: + self.ctx.daemons.get_daemon('mds', mds).stop() + + def kill_mds_by_rank(self, rank): + """ + kill_mds wrapper to kill based on rank passed. + """ + status = self.get_mds_status_by_rank(rank) + self.kill_mds(status['name']) + + def revive_mds(self, mds, standby_for_rank=None): + """ + Revive mds -- do an ipmpi powercycle (if indicated by the config) + and then restart (using --hot-standby if specified. + """ + if self.config.get('powercycle'): + (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys() + self.log('revive_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name)) + assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." + remote.console.power_on() + make_admin_daemon_dir(self.ctx, remote) + args = [] + if standby_for_rank: + args.extend(['--hot-standby', standby_for_rank]) + self.ctx.daemons.get_daemon('mds', mds).restart(*args) + + def revive_mds_by_rank(self, rank, standby_for_rank=None): + """ + revive_mds wrapper to revive based on rank passed. + """ + status = self.get_mds_status_by_rank(rank) + self.revive_mds(status['name'], standby_for_rank) + + def get_mds_status(self, mds): + """ + Run cluster commands for the mds in order to get mds information + """ + out = self.raw_cluster_cmd('mds', 'dump', '--format=json') + j = json.loads(' '.join(out.splitlines()[1:])) + # collate; for dup ids, larger gid wins. + for info in j['info'].itervalues(): + if info['name'] == mds: + return info + return None + + def get_mds_status_by_rank(self, rank): + """ + Run cluster commands for the mds in order to get mds information + check rank. + """ + out = self.raw_cluster_cmd('mds', 'dump', '--format=json') + j = json.loads(' '.join(out.splitlines()[1:])) + # collate; for dup ids, larger gid wins. + for info in j['info'].itervalues(): + if info['rank'] == rank: + return info + return None + + def get_mds_status_all(self): + """ + Run cluster command to extract all the mds status. + """ + out = self.raw_cluster_cmd('mds', 'dump', '--format=json') + j = json.loads(' '.join(out.splitlines()[1:])) + return j + + def get_filepath(self): + """ + Return path to osd data with {id} needing to be replaced + """ + return "/var/lib/ceph/osd/ceph-{id}" + +def utility_task(name): + """ + Generate ceph_manager subtask corresponding to ceph_manager + method name + """ + def task(ctx, config): + if config is None: + config = {} + args = config.get('args', []) + kwargs = config.get('kwargs', {}) + fn = getattr(ctx.manager, name) + fn(*args, **kwargs) + return task + +revive_osd = utility_task("revive_osd") +kill_osd = utility_task("kill_osd") +create_pool = utility_task("create_pool") +remove_pool = utility_task("remove_pool") +wait_for_clean = utility_task("wait_for_clean") +set_pool_property = utility_task("set_pool_property") diff --git a/qa/tasks/ceph_objectstore_tool.py b/qa/tasks/ceph_objectstore_tool.py new file mode 100644 index 00000000000..3b899de33b8 --- /dev/null +++ b/qa/tasks/ceph_objectstore_tool.py @@ -0,0 +1,679 @@ +""" +ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility +""" +from cStringIO import StringIO +import contextlib +import logging +import ceph_manager +from teuthology import misc as teuthology +import time +import os +import string +from teuthology.orchestra import run +import sys +import tempfile +import json +from util.rados import (rados, create_replicated_pool, create_ec_pool) +# from util.rados import (rados, create_ec_pool, +# create_replicated_pool, +# create_cache_pool) + +log = logging.getLogger(__name__) + +# Should get cluster name "ceph" from somewhere +# and normal path from osd_data and osd_journal in conf +FSPATH = "/var/lib/ceph/osd/ceph-{id}" +JPATH = "/var/lib/ceph/osd/ceph-{id}/journal" + + +def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR, + BASE_NAME, DATALINECOUNT): + objects = range(1, NUM_OBJECTS + 1) + for i in objects: + NAME = BASE_NAME + "{num}".format(num=i) + LOCALNAME = os.path.join(DATADIR, NAME) + + dataline = range(DATALINECOUNT) + fd = open(LOCALNAME, "w") + data = "This is the data for " + NAME + "\n" + for _ in dataline: + fd.write(data) + fd.close() + + +def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, + BASE_NAME, DATALINECOUNT): + + objects = range(1, NUM_OBJECTS + 1) + for i in objects: + NAME = BASE_NAME + "{num}".format(num=i) + DDNAME = os.path.join(DATADIR, NAME) + + remote.run(args=['rm', '-f', DDNAME]) + + dataline = range(DATALINECOUNT) + data = "This is the data for " + NAME + "\n" + DATA = "" + for _ in dataline: + DATA += data + teuthology.write_file(remote, DDNAME, DATA) + + +def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, + BASE_NAME, DATALINECOUNT, POOL, db, ec): + ERRORS = 0 + log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS)) + + objects = range(1, NUM_OBJECTS + 1) + for i in objects: + NAME = BASE_NAME + "{num}".format(num=i) + DDNAME = os.path.join(DATADIR, NAME) + + proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME], + wait=False) + # proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME]) + ret = proc.wait() + if ret != 0: + log.critical("Rados put failed with status {ret}". + format(ret=proc.exitstatus)) + sys.exit(1) + + db[NAME] = {} + + keys = range(i) + db[NAME]["xattr"] = {} + for k in keys: + if k == 0: + continue + mykey = "key{i}-{k}".format(i=i, k=k) + myval = "val{i}-{k}".format(i=i, k=k) + proc = remote.run(args=['rados', '-p', POOL, 'setxattr', + NAME, mykey, myval]) + ret = proc.wait() + if ret != 0: + log.error("setxattr failed with {ret}".format(ret=ret)) + ERRORS += 1 + db[NAME]["xattr"][mykey] = myval + + # Erasure coded pools don't support omap + if ec: + continue + + # Create omap header in all objects but REPobject1 + if i != 1: + myhdr = "hdr{i}".format(i=i) + proc = remote.run(args=['rados', '-p', POOL, 'setomapheader', + NAME, myhdr]) + ret = proc.wait() + if ret != 0: + log.critical("setomapheader failed with {ret}".format(ret=ret)) + ERRORS += 1 + db[NAME]["omapheader"] = myhdr + + db[NAME]["omap"] = {} + for k in keys: + if k == 0: + continue + mykey = "okey{i}-{k}".format(i=i, k=k) + myval = "oval{i}-{k}".format(i=i, k=k) + proc = remote.run(args=['rados', '-p', POOL, 'setomapval', + NAME, mykey, myval]) + ret = proc.wait() + if ret != 0: + log.critical("setomapval failed with {ret}".format(ret=ret)) + db[NAME]["omap"][mykey] = myval + + return ERRORS + + +def get_lines(filename): + tmpfd = open(filename, "r") + line = True + lines = [] + while line: + line = tmpfd.readline().rstrip('\n') + if line: + lines += [line] + tmpfd.close() + os.unlink(filename) + return lines + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run ceph_objectstore_tool test + + The config should be as follows:: + + ceph_objectstore_tool: + objects: 20 # + pgnum: 12 + """ + + if config is None: + config = {} + assert isinstance(config, dict), \ + 'ceph_objectstore_tool task only accepts a dict for configuration' + + log.info('Beginning ceph_objectstore_tool...') + + log.debug(config) + log.debug(ctx) + clients = ctx.cluster.only(teuthology.is_type('client')) + assert len(clients.remotes) > 0, 'Must specify at least 1 client' + (cli_remote, _) = clients.remotes.popitem() + log.debug(cli_remote) + + # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys())) + # client = clients.popitem() + # log.info(client) + osds = ctx.cluster.only(teuthology.is_type('osd')) + log.info("OSDS") + log.info(osds) + log.info(osds.remotes) + + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + config=config, + logger=log.getChild('ceph_manager'), + ) + ctx.manager = manager + + while (len(manager.get_osd_status()['up']) != + len(manager.get_osd_status()['raw'])): + time.sleep(10) + while (len(manager.get_osd_status()['in']) != + len(manager.get_osd_status()['up'])): + time.sleep(10) + manager.raw_cluster_cmd('osd', 'set', 'noout') + manager.raw_cluster_cmd('osd', 'set', 'nodown') + + PGNUM = config.get('pgnum', 12) + log.info("pgnum: {num}".format(num=PGNUM)) + + ERRORS = 0 + + REP_POOL = "rep_pool" + REP_NAME = "REPobject" + create_replicated_pool(cli_remote, REP_POOL, PGNUM) + ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME) + + EC_POOL = "ec_pool" + EC_NAME = "ECobject" + create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM) + ERRORS += test_objectstore(ctx, config, cli_remote, + EC_POOL, EC_NAME, ec=True) + + if ERRORS == 0: + log.info("TEST PASSED") + else: + log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS)) + + assert ERRORS == 0 + + try: + yield + finally: + log.info('Ending ceph_objectstore_tool') + + +def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): + manager = ctx.manager + + osds = ctx.cluster.only(teuthology.is_type('osd')) + + TEUTHDIR = teuthology.get_testdir(ctx) + DATADIR = os.path.join(TEUTHDIR, "data") + DATALINECOUNT = 10000 + ERRORS = 0 + NUM_OBJECTS = config.get('objects', 10) + log.info("objects: {num}".format(num=NUM_OBJECTS)) + + pool_dump = manager.get_pool_dump(REP_POOL) + REPID = pool_dump['pool'] + + log.debug("repid={num}".format(num=REPID)) + + db = {} + + LOCALDIR = tempfile.mkdtemp("cod") + + cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR, + REP_NAME, DATALINECOUNT) + allremote = [] + allremote.append(cli_remote) + allremote += osds.remotes.keys() + allremote = list(set(allremote)) + for remote in allremote: + cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, + REP_NAME, DATALINECOUNT) + + ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR, + REP_NAME, DATALINECOUNT, REP_POOL, db, ec) + + pgs = {} + for stats in manager.get_pg_stats(): + if stats["pgid"].find(str(REPID) + ".") != 0: + continue + if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL: + for osd in stats["acting"]: + pgs.setdefault(osd, []).append(stats["pgid"]) + elif pool_dump["type"] == ceph_manager.CephManager.ERASURE_CODED_POOL: + shard = 0 + for osd in stats["acting"]: + pgs.setdefault(osd, []).append("{pgid}s{shard}". + format(pgid=stats["pgid"], + shard=shard)) + shard += 1 + else: + raise Exception("{pool} has an unexpected type {type}". + format(pool=REP_POOL, type=pool_dump["type"])) + + log.info(pgs) + log.info(db) + + for osd in manager.get_osd_status()['up']: + manager.kill_osd(osd) + time.sleep(5) + + pgswithobjects = set() + objsinpg = {} + + # Test --op list and generate json for all objects + log.info("Test --op list by generating json for all objects") + prefix = ("sudo ceph-objectstore-tool " + "--data-path {fpath} " + "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH) + for remote in osds.remotes.iterkeys(): + log.debug(remote) + log.debug(osds.remotes[remote]) + for role in osds.remotes[remote]: + if string.find(role, "osd.") != 0: + continue + osdid = int(role.split('.')[1]) + log.info("process osd.{id} on {remote}". + format(id=osdid, remote=remote)) + cmd = (prefix + "--op list").format(id=osdid) + proc = remote.run(args=cmd.split(), check_status=False, + stdout=StringIO()) + if proc.exitstatus != 0: + log.error("Bad exit status {ret} from --op list request". + format(ret=proc.exitstatus)) + ERRORS += 1 + else: + for pgline in proc.stdout.getvalue().splitlines(): + if not pgline: + continue + (pg, obj) = json.loads(pgline) + name = obj['oid'] + if name in db: + pgswithobjects.add(pg) + objsinpg.setdefault(pg, []).append(name) + db[name].setdefault("pg2json", + {})[pg] = json.dumps(obj) + + log.info(db) + log.info(pgswithobjects) + log.info(objsinpg) + + if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL: + # Test get-bytes + log.info("Test get-bytes and set-bytes") + for basename in db.keys(): + file = os.path.join(DATADIR, basename) + GETNAME = os.path.join(DATADIR, "get") + SETNAME = os.path.join(DATADIR, "set") + + for remote in osds.remotes.iterkeys(): + for role in osds.remotes[remote]: + if string.find(role, "osd.") != 0: + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg, JSON in db[basename]["pg2json"].iteritems(): + if pg in pgs[osdid]: + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += ("get-bytes {fname}". + format(fname=GETNAME).split()) + proc = remote.run(args=cmd, check_status=False) + if proc.exitstatus != 0: + remote.run(args="rm -f {getfile}". + format(getfile=GETNAME).split()) + log.error("Bad exit status {ret}". + format(ret=proc.exitstatus)) + ERRORS += 1 + continue + cmd = ("diff -q {file} {getfile}". + format(file=file, getfile=GETNAME)) + proc = remote.run(args=cmd.split()) + if proc.exitstatus != 0: + log.error("Data from get-bytes differ") + # log.debug("Got:") + # cat_file(logging.DEBUG, GETNAME) + # log.debug("Expected:") + # cat_file(logging.DEBUG, file) + ERRORS += 1 + remote.run(args="rm -f {getfile}". + format(getfile=GETNAME).split()) + + data = ("put-bytes going into {file}\n". + format(file=file)) + teuthology.write_file(remote, SETNAME, data) + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += ("set-bytes {fname}". + format(fname=SETNAME).split()) + proc = remote.run(args=cmd, check_status=False) + proc.wait() + if proc.exitstatus != 0: + log.info("set-bytes failed for object {obj} " + "in pg {pg} osd.{id} ret={ret}". + format(obj=basename, pg=pg, + id=osdid, ret=proc.exitstatus)) + ERRORS += 1 + + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += "get-bytes -".split() + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) + proc.wait() + if proc.exitstatus != 0: + log.error("get-bytes after " + "set-bytes ret={ret}". + format(ret=proc.exitstatus)) + ERRORS += 1 + else: + if data != proc.stdout.getvalue(): + log.error("Data inconsistent after " + "set-bytes, got:") + log.error(proc.stdout.getvalue()) + ERRORS += 1 + + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += ("set-bytes {fname}". + format(fname=file).split()) + proc = remote.run(args=cmd, check_status=False) + proc.wait() + if proc.exitstatus != 0: + log.info("set-bytes failed for object {obj} " + "in pg {pg} osd.{id} ret={ret}". + format(obj=basename, pg=pg, + id=osdid, ret=proc.exitstatus)) + ERRORS += 1 + + log.info("Test list-attrs get-attr") + for basename in db.keys(): + file = os.path.join(DATADIR, basename) + GETNAME = os.path.join(DATADIR, "get") + SETNAME = os.path.join(DATADIR, "set") + + for remote in osds.remotes.iterkeys(): + for role in osds.remotes[remote]: + if string.find(role, "osd.") != 0: + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg, JSON in db[basename]["pg2json"].iteritems(): + if pg in pgs[osdid]: + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += ["list-attrs"] + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO(), stderr=StringIO()) + proc.wait() + if proc.exitstatus != 0: + log.error("Bad exit status {ret}". + format(ret=proc.exitstatus)) + ERRORS += 1 + continue + keys = proc.stdout.getvalue().split() + values = dict(db[basename]["xattr"]) + + for key in keys: + if (key == "_" or + key == "snapset" or + key == "hinfo_key"): + continue + key = key.strip("_") + if key not in values: + log.error("The key {key} should be present". + format(key=key)) + ERRORS += 1 + continue + exp = values.pop(key) + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += ("get-attr {key}". + format(key="_" + key).split()) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) + proc.wait() + if proc.exitstatus != 0: + log.error("get-attr failed with {ret}". + format(ret=proc.exitstatus)) + ERRORS += 1 + continue + val = proc.stdout.getvalue() + if exp != val: + log.error("For key {key} got value {got} " + "instead of {expected}". + format(key=key, got=val, + expected=exp)) + ERRORS += 1 + if "hinfo_key" in keys: + cmd_prefix = prefix.format(id=osdid) + cmd = """ + expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64) + echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} - + test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder + echo $expected | base64 --decode | \ + {prefix} --pgid {pg} '{json}' set-attr {key} - + test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected + """.format(prefix=cmd_prefix, pg=pg, json=JSON, + key="hinfo_key") + log.debug(cmd) + proc = remote.run(args=['bash', '-e', '-x', + '-c', cmd], + check_status=False, + stdout=StringIO(), + stderr=StringIO()) + proc.wait() + if proc.exitstatus != 0: + log.error("failed with " + + str(proc.exitstatus)) + log.error(proc.stdout.getvalue() + " " + + proc.stderr.getvalue()) + ERRORS += 1 + + if len(values) != 0: + log.error("Not all keys found, remaining keys:") + log.error(values) + + log.info("Test pg info") + for remote in osds.remotes.iterkeys(): + for role in osds.remotes[remote]: + if string.find(role, "osd.") != 0: + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg in pgs[osdid]: + cmd = ((prefix + "--op info --pgid {pg}"). + format(id=osdid, pg=pg).split()) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) + proc.wait() + if proc.exitstatus != 0: + log.error("Failure of --op info command with {ret}". + format(proc.exitstatus)) + ERRORS += 1 + continue + info = proc.stdout.getvalue() + if not str(pg) in info: + log.error("Bad data from info: {info}".format(info=info)) + ERRORS += 1 + + log.info("Test pg logging") + for remote in osds.remotes.iterkeys(): + for role in osds.remotes[remote]: + if string.find(role, "osd.") != 0: + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg in pgs[osdid]: + cmd = ((prefix + "--op log --pgid {pg}"). + format(id=osdid, pg=pg).split()) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) + proc.wait() + if proc.exitstatus != 0: + log.error("Getting log failed for pg {pg} " + "from osd.{id} with {ret}". + format(pg=pg, id=osdid, ret=proc.exitstatus)) + ERRORS += 1 + continue + HASOBJ = pg in pgswithobjects + MODOBJ = "modify" in proc.stdout.getvalue() + if HASOBJ != MODOBJ: + log.error("Bad log for pg {pg} from osd.{id}". + format(pg=pg, id=osdid)) + MSG = (HASOBJ and [""] or ["NOT "])[0] + log.error("Log should {msg}have a modify entry". + format(msg=MSG)) + ERRORS += 1 + + log.info("Test pg export") + EXP_ERRORS = 0 + for remote in osds.remotes.iterkeys(): + for role in osds.remotes[remote]: + if string.find(role, "osd.") != 0: + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg in pgs[osdid]: + fpath = os.path.join(DATADIR, "osd{id}.{pg}". + format(id=osdid, pg=pg)) + + cmd = ((prefix + "--op export --pgid {pg} --file {file}"). + format(id=osdid, pg=pg, file=fpath)) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) + proc.wait() + if proc.exitstatus != 0: + log.error("Exporting failed for pg {pg} " + "on osd.{id} with {ret}". + format(pg=pg, id=osdid, ret=proc.exitstatus)) + EXP_ERRORS += 1 + + ERRORS += EXP_ERRORS + + log.info("Test pg removal") + RM_ERRORS = 0 + for remote in osds.remotes.iterkeys(): + for role in osds.remotes[remote]: + if string.find(role, "osd.") != 0: + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg in pgs[osdid]: + cmd = ((prefix + "--op remove --pgid {pg}"). + format(pg=pg, id=osdid)) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) + proc.wait() + if proc.exitstatus != 0: + log.error("Removing failed for pg {pg} " + "on osd.{id} with {ret}". + format(pg=pg, id=osdid, ret=proc.exitstatus)) + RM_ERRORS += 1 + + ERRORS += RM_ERRORS + + IMP_ERRORS = 0 + if EXP_ERRORS == 0 and RM_ERRORS == 0: + log.info("Test pg import") + + for remote in osds.remotes.iterkeys(): + for role in osds.remotes[remote]: + if string.find(role, "osd.") != 0: + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg in pgs[osdid]: + fpath = os.path.join(DATADIR, "osd{id}.{pg}". + format(id=osdid, pg=pg)) + + cmd = ((prefix + "--op import --file {file}"). + format(id=osdid, file=fpath)) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) + proc.wait() + if proc.exitstatus != 0: + log.error("Import failed from {file} with {ret}". + format(file=fpath, ret=proc.exitstatus)) + IMP_ERRORS += 1 + else: + log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES") + + ERRORS += IMP_ERRORS + + if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0: + log.info("Restarting OSDs....") + # They are still look to be up because of setting nodown + for osd in manager.get_osd_status()['up']: + manager.revive_osd(osd) + # Wait for health? + time.sleep(5) + # Let scrub after test runs verify consistency of all copies + log.info("Verify replicated import data") + objects = range(1, NUM_OBJECTS + 1) + for i in objects: + NAME = REP_NAME + "{num}".format(num=i) + TESTNAME = os.path.join(DATADIR, "gettest") + REFNAME = os.path.join(DATADIR, NAME) + + proc = rados(ctx, cli_remote, + ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False) + + ret = proc.wait() + if ret != 0: + log.error("After import, rados get failed with {ret}". + format(ret=proc.exitstatus)) + ERRORS += 1 + continue + + cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME, + ref=REFNAME) + proc = cli_remote.run(args=cmd, check_status=False) + proc.wait() + if proc.exitstatus != 0: + log.error("Data comparison failed for {obj}".format(obj=NAME)) + ERRORS += 1 + + return ERRORS diff --git a/qa/tasks/chef.py b/qa/tasks/chef.py new file mode 100644 index 00000000000..9a9f1bc2c82 --- /dev/null +++ b/qa/tasks/chef.py @@ -0,0 +1,35 @@ +""" +Chef-solo task +""" +import logging + +from teuthology.orchestra import run +from teuthology import misc + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Run chef-solo on all nodes. + """ + log.info('Running chef-solo...') + + run.wait( + ctx.cluster.run( + args=[ + 'wget', +# '-q', + '-O-', +# 'https://raw.github.com/ceph/ceph-qa-chef/master/solo/solo-from-scratch', + 'http://git.ceph.com/?p=ceph-qa-chef.git;a=blob_plain;f=solo/solo-from-scratch;hb=HEAD', + run.Raw('|'), + 'sh', + '-x', + ], + wait=False, + ) + ) + + log.info('Reconnecting after ceph-qa-chef run') + misc.reconnect(ctx, 10) #Reconnect for ulimit and other ceph-qa-chef changes + diff --git a/qa/tasks/cifs_mount.py b/qa/tasks/cifs_mount.py new file mode 100644 index 00000000000..b282b0b7dfb --- /dev/null +++ b/qa/tasks/cifs_mount.py @@ -0,0 +1,137 @@ +""" +Mount cifs clients. Unmount when finished. +""" +import contextlib +import logging +import os + +from teuthology import misc as teuthology +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Mount/unmount a cifs client. + + The config is optional and defaults to mounting on all clients. If + a config is given, it is expected to be a list of clients to do + this operation on. + + Example that starts smbd and mounts cifs on all nodes:: + + tasks: + - ceph: + - samba: + - cifs-mount: + - interactive: + + Example that splits smbd and cifs: + + tasks: + - ceph: + - samba: [samba.0] + - cifs-mount: [client.0] + - ceph-fuse: [client.1] + - interactive: + + Example that specifies the share name: + + tasks: + - ceph: + - ceph-fuse: + - samba: + samba.0: + cephfuse: "{testdir}/mnt.0" + - cifs-mount: + client.0: + share: cephfuse + + :param ctx: Context + :param config: Configuration + """ + log.info('Mounting cifs clients...') + + if config is None: + config = dict(('client.{id}'.format(id=id_), None) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) + elif isinstance(config, list): + config = dict((name, None) for name in config) + + clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) + + from .samba import get_sambas + samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')] + sambas = list(get_sambas(ctx=ctx, roles=samba_roles)) + (ip, _) = sambas[0][1].ssh.get_transport().getpeername() + log.info('samba ip: {ip}'.format(ip=ip)) + + for id_, remote in clients: + mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) + log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format( + id=id_, remote=remote,mnt=mnt)) + + remote.run( + args=[ + 'mkdir', + '--', + mnt, + ], + ) + + rolestr = 'client.{id_}'.format(id_=id_) + unc = "ceph" + log.info("config: {c}".format(c=config)) + if config[rolestr] is not None and 'share' in config[rolestr]: + unc = config[rolestr]['share'] + + remote.run( + args=[ + 'sudo', + 'mount', + '-t', + 'cifs', + '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc), + '-o', + 'username=ubuntu,password=ubuntu', + mnt, + ], + ) + + remote.run( + args=[ + 'sudo', + 'chown', + 'ubuntu:ubuntu', + '{m}/'.format(m=mnt), + ], + ) + + try: + yield + finally: + log.info('Unmounting cifs clients...') + for id_, remote in clients: + remote.run( + args=[ + 'sudo', + 'umount', + mnt, + ], + ) + for id_, remote in clients: + while True: + try: + remote.run( + args=[ + 'rmdir', '--', mnt, + run.Raw('2>&1'), + run.Raw('|'), + 'grep', 'Device or resource busy', + ], + ) + import time + time.sleep(1) + except Exception: + break diff --git a/qa/tasks/cram.py b/qa/tasks/cram.py new file mode 100644 index 00000000000..b4539d497d5 --- /dev/null +++ b/qa/tasks/cram.py @@ -0,0 +1,135 @@ +""" +Cram tests +""" +import logging +import os + +from teuthology import misc as teuthology +from teuthology.parallel import parallel +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Run all cram tests from the specified urls on the specified + clients. Each client runs tests in parallel. + + Limitations: + Tests must have a .t suffix. Tests with duplicate names will + overwrite each other, so only the last one will run. + + For example:: + + tasks: + - ceph: + - cram: + clients: + client.0: + - http://ceph.com/qa/test.t + - http://ceph.com/qa/test2.t] + client.1: [http://ceph.com/qa/test.t] + + You can also run a list of cram tests on all clients:: + + tasks: + - ceph: + - cram: + clients: + all: [http://ceph.com/qa/test.t] + + :param ctx: Context + :param config: Configuration + """ + assert isinstance(config, dict) + assert 'clients' in config and isinstance(config['clients'], dict), \ + 'configuration must contain a dictionary of clients' + + clients = teuthology.replace_all_with_clients(ctx.cluster, + config['clients']) + testdir = teuthology.get_testdir(ctx) + + try: + for client, tests in clients.iteritems(): + (remote,) = ctx.cluster.only(client).remotes.iterkeys() + client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) + remote.run( + args=[ + 'mkdir', '--', client_dir, + run.Raw('&&'), + 'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir), + run.Raw('&&'), + '{tdir}/virtualenv/bin/pip'.format(tdir=testdir), + 'install', 'cram', + ], + ) + for test in tests: + log.info('fetching test %s for %s', test, client) + assert test.endswith('.t'), 'tests must end in .t' + remote.run( + args=[ + 'wget', '-nc', '-nv', '-P', client_dir, '--', test, + ], + ) + + with parallel() as p: + for role in clients.iterkeys(): + p.spawn(_run_tests, ctx, role) + finally: + for client, tests in clients.iteritems(): + (remote,) = ctx.cluster.only(client).remotes.iterkeys() + client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) + test_files = set([test.rsplit('/', 1)[1] for test in tests]) + + # remove test files unless they failed + for test_file in test_files: + abs_file = os.path.join(client_dir, test_file) + remote.run( + args=[ + 'test', '-f', abs_file + '.err', + run.Raw('||'), + 'rm', '-f', '--', abs_file, + ], + ) + + # ignore failure since more than one client may + # be run on a host, and the client dir should be + # non-empty if the test failed + remote.run( + args=[ + 'rm', '-rf', '--', + '{tdir}/virtualenv'.format(tdir=testdir), + run.Raw(';'), + 'rmdir', '--ignore-fail-on-non-empty', client_dir, + ], + ) + +def _run_tests(ctx, role): + """ + For each role, check to make sure it's a client, then run the cram on that client + + :param ctx: Context + :param role: Roles + """ + assert isinstance(role, basestring) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + ceph_ref = ctx.summary.get('ceph-sha1', 'master') + + testdir = teuthology.get_testdir(ctx) + log.info('Running tests for %s...', role) + remote.run( + args=[ + run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)), + run.Raw('CEPH_ID="{id}"'.format(id=id_)), + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/virtualenv/bin/cram'.format(tdir=testdir), + '-v', '--', + run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)), + ], + logger=log.getChild(role), + ) diff --git a/qa/tasks/devstack.py b/qa/tasks/devstack.py new file mode 100644 index 00000000000..c5cd41b06bd --- /dev/null +++ b/qa/tasks/devstack.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python +import contextlib +import logging +from cStringIO import StringIO +import textwrap +from configparser import ConfigParser +import time + +from teuthology.orchestra import run +from teuthology import misc +from teuthology.contextutil import nested + +log = logging.getLogger(__name__) + +DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git' +DS_STABLE_BRANCHES = ("havana", "grizzly") + +is_devstack_node = lambda role: role.startswith('devstack') +is_osd_node = lambda role: role.startswith('osd') + + +@contextlib.contextmanager +def task(ctx, config): + if config is None: + config = {} + if not isinstance(config, dict): + raise TypeError("config must be a dict") + with nested(lambda: install(ctx=ctx, config=config), + lambda: smoke(ctx=ctx, config=config), + ): + yield + + +@contextlib.contextmanager +def install(ctx, config): + """ + Install OpenStack DevStack and configure it to use a Ceph cluster for + Glance and Cinder. + + Requires one node with a role 'devstack' + + Since devstack runs rampant on the system it's used on, typically you will + want to reprovision that machine after using devstack on it. + + Also, the default 2GB of RAM that is given to vps nodes is insufficient. I + recommend 4GB. Downburst can be instructed to give 4GB to a vps node by + adding this to the yaml: + + downburst: + ram: 4G + + This was created using documentation found here: + https://github.com/openstack-dev/devstack/blob/master/README.md + http://ceph.com/docs/master/rbd/rbd-openstack/ + """ + if config is None: + config = {} + if not isinstance(config, dict): + raise TypeError("config must be a dict") + + devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0] + an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0] + + devstack_branch = config.get("branch", "master") + install_devstack(devstack_node, devstack_branch) + try: + configure_devstack_and_ceph(ctx, config, devstack_node, an_osd_node) + yield + finally: + pass + + +def install_devstack(devstack_node, branch="master"): + log.info("Cloning DevStack repo...") + + args = ['git', 'clone', DEVSTACK_GIT_REPO] + devstack_node.run(args=args) + + if branch != "master": + if branch in DS_STABLE_BRANCHES and not branch.startswith("stable"): + branch = "stable/" + branch + log.info("Checking out {branch} branch...".format(branch=branch)) + cmd = "cd devstack && git checkout " + branch + devstack_node.run(args=cmd) + + log.info("Installing DevStack...") + args = ['cd', 'devstack', run.Raw('&&'), './stack.sh'] + devstack_node.run(args=args) + + +def configure_devstack_and_ceph(ctx, config, devstack_node, ceph_node): + pool_size = config.get('pool_size', '128') + create_pools(ceph_node, pool_size) + distribute_ceph_conf(devstack_node, ceph_node) + # This is where we would install python-ceph and ceph-common but it appears + # the ceph task does that for us. + generate_ceph_keys(ceph_node) + distribute_ceph_keys(devstack_node, ceph_node) + secret_uuid = set_libvirt_secret(devstack_node, ceph_node) + update_devstack_config_files(devstack_node, secret_uuid) + set_apache_servername(devstack_node) + # Rebooting is the most-often-used method of restarting devstack services + misc.reboot(devstack_node) + start_devstack(devstack_node) + restart_apache(devstack_node) + + +def create_pools(ceph_node, pool_size): + log.info("Creating pools on Ceph cluster...") + + for pool_name in ['volumes', 'images', 'backups']: + args = ['ceph', 'osd', 'pool', 'create', pool_name, pool_size] + ceph_node.run(args=args) + + +def distribute_ceph_conf(devstack_node, ceph_node): + log.info("Copying ceph.conf to DevStack node...") + + ceph_conf_path = '/etc/ceph/ceph.conf' + ceph_conf = misc.get_file(ceph_node, ceph_conf_path, sudo=True) + misc.sudo_write_file(devstack_node, ceph_conf_path, ceph_conf) + + +def generate_ceph_keys(ceph_node): + log.info("Generating Ceph keys...") + + ceph_auth_cmds = [ + ['ceph', 'auth', 'get-or-create', 'client.cinder', 'mon', + 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa + ['ceph', 'auth', 'get-or-create', 'client.glance', 'mon', + 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa + ['ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon', + 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa + ] + for cmd in ceph_auth_cmds: + ceph_node.run(args=cmd) + + +def distribute_ceph_keys(devstack_node, ceph_node): + log.info("Copying Ceph keys to DevStack node...") + + def copy_key(from_remote, key_name, to_remote, dest_path, owner): + key_stringio = StringIO() + from_remote.run( + args=['ceph', 'auth', 'get-or-create', key_name], + stdout=key_stringio) + key_stringio.seek(0) + misc.sudo_write_file(to_remote, dest_path, + key_stringio, owner=owner) + keys = [ + dict(name='client.glance', + path='/etc/ceph/ceph.client.glance.keyring', + # devstack appears to just want root:root + #owner='glance:glance', + ), + dict(name='client.cinder', + path='/etc/ceph/ceph.client.cinder.keyring', + # devstack appears to just want root:root + #owner='cinder:cinder', + ), + dict(name='client.cinder-backup', + path='/etc/ceph/ceph.client.cinder-backup.keyring', + # devstack appears to just want root:root + #owner='cinder:cinder', + ), + ] + for key_dict in keys: + copy_key(ceph_node, key_dict['name'], devstack_node, + key_dict['path'], key_dict.get('owner')) + + +def set_libvirt_secret(devstack_node, ceph_node): + log.info("Setting libvirt secret...") + + cinder_key_stringio = StringIO() + ceph_node.run(args=['ceph', 'auth', 'get-key', 'client.cinder'], + stdout=cinder_key_stringio) + cinder_key = cinder_key_stringio.getvalue().strip() + + uuid_stringio = StringIO() + devstack_node.run(args=['uuidgen'], stdout=uuid_stringio) + uuid = uuid_stringio.getvalue().strip() + + secret_path = '/tmp/secret.xml' + secret_template = textwrap.dedent(""" + + {uuid} + + client.cinder secret + + """) + misc.sudo_write_file(devstack_node, secret_path, + secret_template.format(uuid=uuid)) + devstack_node.run(args=['sudo', 'virsh', 'secret-define', '--file', + secret_path]) + devstack_node.run(args=['sudo', 'virsh', 'secret-set-value', '--secret', + uuid, '--base64', cinder_key]) + return uuid + + +def update_devstack_config_files(devstack_node, secret_uuid): + log.info("Updating DevStack config files to use Ceph...") + + def backup_config(node, file_name, backup_ext='.orig.teuth'): + node.run(args=['cp', '-f', file_name, file_name + backup_ext]) + + def update_config(config_name, config_stream, update_dict, + section='DEFAULT'): + parser = ConfigParser() + parser.read_file(config_stream) + for (key, value) in update_dict.items(): + parser.set(section, key, value) + out_stream = StringIO() + parser.write(out_stream) + out_stream.seek(0) + return out_stream + + updates = [ + dict(name='/etc/glance/glance-api.conf', options=dict( + default_store='rbd', + rbd_store_user='glance', + rbd_store_pool='images', + show_image_direct_url='True',)), + dict(name='/etc/cinder/cinder.conf', options=dict( + volume_driver='cinder.volume.drivers.rbd.RBDDriver', + rbd_pool='volumes', + rbd_ceph_conf='/etc/ceph/ceph.conf', + rbd_flatten_volume_from_snapshot='false', + rbd_max_clone_depth='5', + glance_api_version='2', + rbd_user='cinder', + rbd_secret_uuid=secret_uuid, + backup_driver='cinder.backup.drivers.ceph', + backup_ceph_conf='/etc/ceph/ceph.conf', + backup_ceph_user='cinder-backup', + backup_ceph_chunk_size='134217728', + backup_ceph_pool='backups', + backup_ceph_stripe_unit='0', + backup_ceph_stripe_count='0', + restore_discard_excess_bytes='true', + )), + dict(name='/etc/nova/nova.conf', options=dict( + libvirt_images_type='rbd', + libvirt_images_rbd_pool='volumes', + libvirt_images_rbd_ceph_conf='/etc/ceph/ceph.conf', + rbd_user='cinder', + rbd_secret_uuid=secret_uuid, + libvirt_inject_password='false', + libvirt_inject_key='false', + libvirt_inject_partition='-2', + )), + ] + + for update in updates: + file_name = update['name'] + options = update['options'] + config_str = misc.get_file(devstack_node, file_name, sudo=True) + config_stream = StringIO(config_str) + backup_config(devstack_node, file_name) + new_config_stream = update_config(file_name, config_stream, options) + misc.sudo_write_file(devstack_node, file_name, new_config_stream) + + +def set_apache_servername(node): + # Apache complains: "Could not reliably determine the server's fully + # qualified domain name, using 127.0.0.1 for ServerName" + # So, let's make sure it knows its name. + log.info("Setting Apache ServerName...") + + hostname = node.hostname + config_file = '/etc/apache2/conf.d/servername' + misc.sudo_write_file(node, config_file, + "ServerName {name}".format(name=hostname)) + + +def start_devstack(devstack_node): + log.info("Patching devstack start script...") + # This causes screen to start headless - otherwise rejoin-stack.sh fails + # because there is no terminal attached. + cmd = "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh" + devstack_node.run(args=cmd) + + log.info("Starting devstack...") + cmd = "cd devstack && ./rejoin-stack.sh" + devstack_node.run(args=cmd) + + # This was added because I was getting timeouts on Cinder requests - which + # were trying to access Keystone on port 5000. A more robust way to handle + # this would be to introduce a wait-loop on devstack_node that checks to + # see if a service is listening on port 5000. + log.info("Waiting 30s for devstack to start...") + time.sleep(30) + + +def restart_apache(node): + node.run(args=['sudo', '/etc/init.d/apache2', 'restart'], wait=True) + + +@contextlib.contextmanager +def exercise(ctx, config): + log.info("Running devstack exercises...") + + if config is None: + config = {} + if not isinstance(config, dict): + raise TypeError("config must be a dict") + + devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0] + + # TODO: save the log *and* preserve failures + #devstack_archive_dir = create_devstack_archive(ctx, devstack_node) + + try: + #cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format( # noqa + # dir=devstack_archive_dir) + cmd = "cd devstack && ./exercise.sh" + devstack_node.run(args=cmd, wait=True) + yield + finally: + pass + + +def create_devstack_archive(ctx, devstack_node): + test_dir = misc.get_testdir(ctx) + devstack_archive_dir = "{test_dir}/archive/devstack".format( + test_dir=test_dir) + devstack_node.run(args="mkdir -p " + devstack_archive_dir) + return devstack_archive_dir + + +@contextlib.contextmanager +def smoke(ctx, config): + log.info("Running a basic smoketest...") + + devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0] + an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0] + + try: + create_volume(devstack_node, an_osd_node, 'smoke0', 1) + yield + finally: + pass + + +def create_volume(devstack_node, ceph_node, vol_name, size): + """ + :param size: The size of the volume, in GB + """ + size = str(size) + log.info("Creating a {size}GB volume named {name}...".format( + name=vol_name, + size=size)) + args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create', + '--display-name', vol_name, size] + out_stream = StringIO() + devstack_node.run(args=args, stdout=out_stream, wait=True) + vol_info = parse_os_table(out_stream.getvalue()) + log.debug("Volume info: %s", str(vol_info)) + + out_stream = StringIO() + try: + ceph_node.run(args="rbd --id cinder ls -l volumes", stdout=out_stream, + wait=True) + except run.CommandFailedError: + log.debug("Original rbd call failed; retrying without '--id cinder'") + ceph_node.run(args="rbd ls -l volumes", stdout=out_stream, + wait=True) + + assert vol_info['id'] in out_stream.getvalue(), \ + "Volume not found on Ceph cluster" + assert vol_info['size'] == size, \ + "Volume size on Ceph cluster is different than specified" + return vol_info['id'] + + +def parse_os_table(table_str): + out_dict = dict() + for line in table_str.split('\n'): + if line.startswith('|'): + items = line.split() + out_dict[items[1]] = items[3] + return out_dict diff --git a/qa/tasks/die_on_err.py b/qa/tasks/die_on_err.py new file mode 100644 index 00000000000..bf422ae547d --- /dev/null +++ b/qa/tasks/die_on_err.py @@ -0,0 +1,70 @@ +""" +Raise exceptions on osd coredumps or test err directories +""" +import contextlib +import logging +import time +from teuthology.orchestra import run + +import ceph_manager +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Die if {testdir}/err exists or if an OSD dumps core + """ + if config is None: + config = {} + + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + log.info('num_osds is %s' % num_osds) + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < num_osds: + time.sleep(10) + + testdir = teuthology.get_testdir(ctx) + + while True: + for i in range(num_osds): + (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys() + p = osd_remote.run( + args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ], + wait=True, + check_status=False, + ) + exit_status = p.exitstatus + + if exit_status == 0: + log.info("osd %d has an error" % i) + raise Exception("osd %d error" % i) + + log_path = '/var/log/ceph/osd.%d.log' % (i) + + p = osd_remote.run( + args = [ + 'tail', '-1', log_path, + run.Raw('|'), + 'grep', '-q', 'end dump' + ], + wait=True, + check_status=False, + ) + exit_status = p.exitstatus + + if exit_status == 0: + log.info("osd %d dumped core" % i) + raise Exception("osd %d dumped core" % i) + + time.sleep(5) diff --git a/qa/tasks/divergent_priors.py b/qa/tasks/divergent_priors.py new file mode 100644 index 00000000000..e10f67547ca --- /dev/null +++ b/qa/tasks/divergent_priors.py @@ -0,0 +1,148 @@ +""" +Special case divergence test +""" +import logging +import time + +import ceph_manager +from teuthology import misc as teuthology +from util.rados import rados + + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of divergent entries with prior_version + prior to log_tail + + config: none + + Requires 3 osds. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'divergent_priors task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + ctx.manager = manager + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.raw_cluster_cmd('osd', 'set', 'noout') + manager.raw_cluster_cmd('osd', 'set', 'noin') + manager.raw_cluster_cmd('osd', 'set', 'nodown') + manager.wait_for_clean() + + # something that is always there + dummyfile = '/etc/fstab' + dummyfile2 = '/etc/resolv.conf' + + # create 1 pg pool + log.info('creating foo') + manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1') + + osds = [0, 1, 2] + for i in osds: + manager.set_config(i, osd_min_pg_log_entries=1) + + # determine primary + divergent = manager.get_pg_primary('foo', 0) + log.info("primary and soon to be divergent is %d", divergent) + non_divergent = [0,1,2] + non_divergent.remove(divergent) + + log.info('writing initial objects') + # write 1000 objects + for i in range(1000): + rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) + + manager.wait_for_clean() + + # blackhole non_divergent + log.info("blackholing osds %s", str(non_divergent)) + for i in non_divergent: + manager.set_config(i, filestore_blackhole='') + + # write 1 (divergent) object + log.info('writing divergent object existing_0') + rados( + ctx, mon, ['-p', 'foo', 'put', 'existing_0', dummyfile2], + wait=False) + time.sleep(10) + mon.run( + args=['killall', '-9', 'rados'], + wait=True, + check_status=False) + + # kill all the osds + log.info('killing all the osds') + for i in osds: + manager.kill_osd(i) + for i in osds: + manager.mark_down_osd(i) + for i in osds: + manager.mark_out_osd(i) + + # bring up non-divergent + log.info("bringing up non_divergent %s", str(non_divergent)) + for i in non_divergent: + manager.revive_osd(i) + for i in non_divergent: + manager.mark_in_osd(i) + + log.info('making log long to prevent backfill') + for i in non_divergent: + manager.set_config(i, osd_min_pg_log_entries=100000) + + # write 1 non-divergent object (ensure that old divergent one is divergent) + log.info('writing non-divergent object existing_1') + rados(ctx, mon, ['-p', 'foo', 'put', 'existing_1', dummyfile2]) + + manager.wait_for_recovery() + + # ensure no recovery + log.info('delay recovery') + for i in non_divergent: + manager.set_config(i, osd_recovery_delay_start=100000) + + # bring in our divergent friend + log.info("revive divergent %d", divergent) + manager.revive_osd(divergent) + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + + log.info('delay recovery divergent') + manager.set_config(divergent, osd_recovery_delay_start=100000) + log.info('mark divergent in') + manager.mark_in_osd(divergent) + + log.info('wait for peering') + rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile]) + + log.info("killing divergent %d", divergent) + manager.kill_osd(divergent) + log.info("reviving divergent %d", divergent) + manager.revive_osd(divergent) + + log.info('allowing recovery') + for i in non_divergent: + manager.set_config(i, osd_recovery_delay_start=0) + + log.info('reading existing_0') + exit_status = rados(ctx, mon, + ['-p', 'foo', 'get', 'existing_0', + '-o', '/tmp/existing']) + assert exit_status is 0 + log.info("success") diff --git a/qa/tasks/dump_stuck.py b/qa/tasks/dump_stuck.py new file mode 100644 index 00000000000..9e1780f0156 --- /dev/null +++ b/qa/tasks/dump_stuck.py @@ -0,0 +1,146 @@ +""" +Dump_stuck command +""" +import logging +import re +import time + +import ceph_manager +from teuthology import misc as teuthology + + +log = logging.getLogger(__name__) + +def check_stuck(manager, num_inactive, num_unclean, num_stale, timeout=10): + """ + Do checks. Make sure get_stuck_pgs return the right amout of information, then + extract health information from the raw_cluster_cmd and compare the results with + values passed in. This passes if all asserts pass. + + :param num_manager: Ceph manager + :param num_inactive: number of inaactive pages that are stuck + :param num_unclean: number of unclean pages that are stuck + :paran num_stale: number of stale pages that are stuck + :param timeout: timeout value for get_stuck_pgs calls + """ + inactive = manager.get_stuck_pgs('inactive', timeout) + assert len(inactive) == num_inactive + unclean = manager.get_stuck_pgs('unclean', timeout) + assert len(unclean) == num_unclean + stale = manager.get_stuck_pgs('stale', timeout) + assert len(stale) == num_stale + + # check health output as well + health = manager.raw_cluster_cmd('health') + log.debug('ceph health is: %s', health) + if num_inactive > 0: + m = re.search('(\d+) pgs stuck inactive', health) + assert int(m.group(1)) == num_inactive + if num_unclean > 0: + m = re.search('(\d+) pgs stuck unclean', health) + assert int(m.group(1)) == num_unclean + if num_stale > 0: + m = re.search('(\d+) pgs stuck stale', health) + assert int(m.group(1)) == num_stale + +def task(ctx, config): + """ + Test the dump_stuck command. + + :param ctx: Context + :param config: Configuration + """ + assert config is None, \ + 'dump_stuck requires no configuration' + assert teuthology.num_instances_of_type(ctx.cluster, 'osd') == 2, \ + 'dump_stuck requires exactly 2 osds' + + timeout = 60 + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.wait_for_clean(timeout) + + manager.raw_cluster_cmd('tell', 'mon.0', 'injectargs', '--', +# '--mon-osd-report-timeout 90', + '--mon-pg-stuck-threshold 10') + + check_stuck( + manager, + num_inactive=0, + num_unclean=0, + num_stale=0, + ) + num_pgs = manager.get_num_pgs() + + manager.mark_out_osd(0) + time.sleep(timeout) + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.wait_for_recovery(timeout) + + check_stuck( + manager, + num_inactive=0, + num_unclean=num_pgs, + num_stale=0, + ) + + manager.mark_in_osd(0) + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.wait_for_clean(timeout) + + check_stuck( + manager, + num_inactive=0, + num_unclean=0, + num_stale=0, + ) + + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'): + manager.kill_osd(id_) + manager.mark_down_osd(id_) + + starttime = time.time() + done = False + while not done: + try: + check_stuck( + manager, + num_inactive=0, + num_unclean=0, + num_stale=num_pgs, + ) + done = True + except AssertionError: + # wait up to 15 minutes to become stale + if time.time() - starttime > 900: + raise + + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'): + manager.revive_osd(id_) + manager.mark_in_osd(id_) + while True: + try: + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + break + except Exception: + log.exception('osds must not be started yet, waiting...') + time.sleep(1) + manager.wait_for_clean(timeout) + + check_stuck( + manager, + num_inactive=0, + num_unclean=0, + num_stale=0, + ) diff --git a/qa/tasks/ec_lost_unfound.py b/qa/tasks/ec_lost_unfound.py new file mode 100644 index 00000000000..f12ae74c12f --- /dev/null +++ b/qa/tasks/ec_lost_unfound.py @@ -0,0 +1,134 @@ +""" +Lost_unfound +""" +import logging +import ceph_manager +from teuthology import misc as teuthology +from util.rados import rados + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of lost objects on an ec pool. + + A pretty rigid cluster is brought up andtested by this task + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'lost_unfound task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') + manager.wait_for_clean() + + + pool = manager.create_pool_with_unique_name( + ec_pool=True, + ec_m=2, + ec_k=2) + + # something that is always there + dummyfile = '/etc/fstab' + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', pool, 'put', 'dummy', dummyfile]) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.wait_for_recovery() + + # create old objects + for f in range(1, 10): + rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', pool, 'rm', 'existed_%d' % f]) + + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.1', + 'injectargs', + '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000' + ) + + manager.kill_osd(0) + manager.mark_down_osd(0) + manager.kill_osd(3) + manager.mark_down_osd(3) + + for f in range(1, 10): + rados(ctx, mon, ['-p', pool, 'put', 'new_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile]) + + # take out osd.1 and a necessary shard of those objects. + manager.kill_osd(1) + manager.mark_down_osd(1) + manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it') + manager.revive_osd(0) + manager.wait_till_osd_is_up(0) + manager.revive_osd(3) + manager.wait_till_osd_is_up(3) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') + manager.wait_till_active() + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') + + # verify that there are unfound objects + unfound = manager.get_num_unfound_objects() + log.info("there are %d unfound objects" % unfound) + assert unfound + + # mark stuff lost + pgs = manager.get_pg_stats() + for pg in pgs: + if pg['stat_sum']['num_objects_unfound'] > 0: + # verify that i can list them direct from the osd + log.info('listing missing/lost in %s state %s', pg['pgid'], + pg['state']); + m = manager.list_pg_missing(pg['pgid']) + log.info('%s' % m) + assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound'] + + log.info("reverting unfound in %s", pg['pgid']) + manager.raw_cluster_cmd('pg', pg['pgid'], + 'mark_unfound_lost', 'delete') + else: + log.info("no unfound in %s", pg['pgid']) + + manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.3', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') + manager.wait_for_recovery() + + # verify result + for f in range(1, 10): + err = rados(ctx, mon, ['-p', pool, 'get', 'new_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', pool, 'get', 'existed_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', pool, 'get', 'existing_%d' % f, '-']) + assert err + + # see if osd.1 can cope + manager.revive_osd(1) + manager.wait_till_osd_is_up(1) + manager.wait_for_clean() diff --git a/qa/tasks/filestore_idempotent.py b/qa/tasks/filestore_idempotent.py new file mode 100644 index 00000000000..ac43fb0ffe2 --- /dev/null +++ b/qa/tasks/filestore_idempotent.py @@ -0,0 +1,81 @@ +""" +Filestore/filejournal handler +""" +import logging +from teuthology.orchestra import run +import random + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test filestore/filejournal handling of non-idempotent events. + + Currently this is a kludge; we require the ceph task preceeds us just + so that we get the tarball installed to run the test binary. + + :param ctx: Context + :param config: Configuration + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + # just use the first client... + client = clients[0]; + (remote,) = ctx.cluster.only(client).remotes.iterkeys() + + testdir = teuthology.get_testdir(ctx) + + dir = '%s/data/test.%s' % (testdir, client) + + seed = str(int(random.uniform(1,100))) + + try: + log.info('creating a working dir') + remote.run(args=['mkdir', dir]) + remote.run( + args=[ + 'cd', dir, + run.Raw('&&'), + 'wget','-q', '-Orun_seed_to.sh', + 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to.sh;hb=HEAD', + run.Raw('&&'), + 'wget','-q', '-Orun_seed_to_range.sh', + 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to_range.sh;hb=HEAD', + run.Raw('&&'), + 'chmod', '+x', 'run_seed_to.sh', 'run_seed_to_range.sh', + ]); + + log.info('running a series of tests') + proc = remote.run( + args=[ + 'cd', dir, + run.Raw('&&'), + './run_seed_to_range.sh', seed, '50', '300', + ], + wait=False, + check_status=False) + result = proc.wait(); + + if result != 0: + remote.run( + args=[ + 'cp', '-a', dir, '{tdir}/archive/idempotent_failure'.format(tdir=testdir), + ]) + raise Exception("./run_seed_to_range.sh errored out") + + finally: + remote.run(args=[ + 'rm', '-rf', '--', dir + ]) + diff --git a/qa/tasks/kclient.py b/qa/tasks/kclient.py new file mode 100644 index 00000000000..e06f84561d8 --- /dev/null +++ b/qa/tasks/kclient.py @@ -0,0 +1,112 @@ +""" +Mount/unmount a ``kernel`` client. +""" +import contextlib +import logging +import os + +from teuthology import misc as teuthology +from util.kclient import write_secret_file + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Mount/unmount a ``kernel`` client. + + The config is optional and defaults to mounting on all clients. If + a config is given, it is expected to be a list of clients to do + this operation on. This lets you e.g. set up one client with + ``ceph-fuse`` and another with ``kclient``. + + Example that mounts all clients:: + + tasks: + - ceph: + - kclient: + - interactive: + + Example that uses both ``kclient` and ``ceph-fuse``:: + + tasks: + - ceph: + - ceph-fuse: [client.0] + - kclient: [client.1] + - interactive: + + :param ctx: Context + :param config: Configuration + """ + log.info('Mounting kernel clients...') + assert config is None or isinstance(config, list), \ + "task kclient got invalid config" + + if config is None: + config = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + clients = list(teuthology.get_clients(ctx=ctx, roles=config)) + + testdir = teuthology.get_testdir(ctx) + + for id_, remote in clients: + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format( + id=id_, remote=remote, mnt=mnt)) + + # figure mon ips + remotes_and_roles = ctx.cluster.remotes.items() + roles = [roles for (remote_, roles) in remotes_and_roles] + ips = [host for (host, port) in (remote_.ssh.get_transport().getpeername() for (remote_, roles) in remotes_and_roles)] + mons = teuthology.get_mons(roles, ips).values() + + keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) + secret = '{tdir}/data/client.{id}.secret'.format(tdir=testdir, id=id_) + write_secret_file(ctx, remote, 'client.{id}'.format(id=id_), + keyring, secret) + + remote.run( + args=[ + 'mkdir', + '--', + mnt, + ], + ) + + remote.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + '/sbin/mount.ceph', + '{mons}:/'.format(mons=','.join(mons)), + mnt, + '-v', + '-o', + 'name={id},secretfile={secret}'.format(id=id_, + secret=secret), + ], + ) + + try: + yield + finally: + log.info('Unmounting kernel clients...') + for id_, remote in clients: + log.debug('Unmounting client client.{id}...'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + remote.run( + args=[ + 'sudo', + 'umount', + mnt, + ], + ) + remote.run( + args=[ + 'rmdir', + '--', + mnt, + ], + ) diff --git a/qa/tasks/locktest.py b/qa/tasks/locktest.py new file mode 100755 index 00000000000..59a7122223e --- /dev/null +++ b/qa/tasks/locktest.py @@ -0,0 +1,134 @@ +""" +locktests +""" +import logging + +from teuthology.orchestra import run +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Run locktests, from the xfstests suite, on the given + clients. Whether the clients are ceph-fuse or kernel does not + matter, and the two clients can refer to the same mount. + + The config is a list of two clients to run the locktest on. The + first client will be the host. + + For example: + tasks: + - ceph: + - ceph-fuse: [client.0, client.1] + - locktest: + [client.0, client.1] + + This task does not yield; there would be little point. + + :param ctx: Context + :param config: Configuration + """ + + assert isinstance(config, list) + log.info('fetching and building locktests...') + (host,) = ctx.cluster.only(config[0]).remotes + (client,) = ctx.cluster.only(config[1]).remotes + ( _, _, host_id) = config[0].partition('.') + ( _, _, client_id) = config[1].partition('.') + testdir = teuthology.get_testdir(ctx) + hostmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=host_id) + clientmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=client_id) + + try: + for client_name in config: + log.info('building on {client_}'.format(client_=client_name)) + ctx.cluster.only(client_name).run( + args=[ + # explicitly does not support multiple autotest tasks + # in a single run; the result archival would conflict + 'mkdir', '{tdir}/archive/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'mkdir', '{tdir}/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'wget', + '-nv', + 'https://raw.github.com/gregsfortytwo/xfstests-ceph/master/src/locktest.c', + '-O', '{tdir}/locktest/locktest.c'.format(tdir=testdir), + run.Raw('&&'), + 'g++', '{tdir}/locktest/locktest.c'.format(tdir=testdir), + '-o', '{tdir}/locktest/locktest'.format(tdir=testdir) + ], + logger=log.getChild('locktest_client.{id}'.format(id=client_name)), + ) + + log.info('built locktest on each client') + + host.run(args=['sudo', 'touch', + '{mnt}/locktestfile'.format(mnt=hostmnt), + run.Raw('&&'), + 'sudo', 'chown', 'ubuntu.ubuntu', + '{mnt}/locktestfile'.format(mnt=hostmnt) + ] + ) + + log.info('starting on host') + hostproc = host.run( + args=[ + '{tdir}/locktest/locktest'.format(tdir=testdir), + '-p', '6788', + '-d', + '{mnt}/locktestfile'.format(mnt=hostmnt), + ], + wait=False, + logger=log.getChild('locktest.host'), + ) + log.info('starting on client') + (_,_,hostaddr) = host.name.partition('@') + clientproc = client.run( + args=[ + '{tdir}/locktest/locktest'.format(tdir=testdir), + '-p', '6788', + '-d', + '-h', hostaddr, + '{mnt}/locktestfile'.format(mnt=clientmnt), + ], + logger=log.getChild('locktest.client'), + wait=False + ) + + hostresult = hostproc.wait() + clientresult = clientproc.wait() + if (hostresult != 0) or (clientresult != 0): + raise Exception("Did not pass locking test!") + log.info('finished locktest executable with results {r} and {s}'. \ + format(r=hostresult, s=clientresult)) + + finally: + log.info('cleaning up host dir') + host.run( + args=[ + 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir), + run.Raw('&&'), + 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'rmdir', '{tdir}/locktest' + ], + logger=log.getChild('.{id}'.format(id=config[0])), + ) + log.info('cleaning up client dir') + client.run( + args=[ + 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir), + run.Raw('&&'), + 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'rmdir', '{tdir}/locktest'.format(tdir=testdir) + ], + logger=log.getChild('.{id}'.format(\ + id=config[1])), + ) diff --git a/qa/tasks/lost_unfound.py b/qa/tasks/lost_unfound.py new file mode 100644 index 00000000000..bf209a37090 --- /dev/null +++ b/qa/tasks/lost_unfound.py @@ -0,0 +1,154 @@ +""" +Lost_unfound +""" +import logging +import time +import ceph_manager +from teuthology import misc as teuthology +from util.rados import rados + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of lost objects. + + A pretty rigid cluseter is brought up andtested by this task + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'lost_unfound task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_clean() + + # something that is always there + dummyfile = '/etc/fstab' + + # take an osd out until the very end + manager.kill_osd(2) + manager.mark_down_osd(2) + manager.mark_out_osd(2) + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile]) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.wait_for_recovery() + + # create old objects + for f in range(1, 10): + rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'rm', 'existed_%d' % f]) + + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.1', + 'injectargs', + '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000' + ) + + manager.kill_osd(0) + manager.mark_down_osd(0) + + for f in range(1, 10): + rados(ctx, mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) + + # bring osd.0 back up, let it peer, but don't replicate the new + # objects... + log.info('osd.0 command_args is %s' % 'foo') + log.info(ctx.daemons.get_daemon('osd', 0).command_args) + ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([ + '--osd-recovery-delay-start', '1000' + ]) + manager.revive_osd(0) + manager.mark_in_osd(0) + manager.wait_till_osd_is_up(0) + + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.wait_till_active() + + # take out osd.1 and the only copy of those objects. + manager.kill_osd(1) + manager.mark_down_osd(1) + manager.mark_out_osd(1) + manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it') + + # bring up osd.2 so that things would otherwise, in theory, recovery fully + manager.revive_osd(2) + manager.mark_in_osd(2) + manager.wait_till_osd_is_up(2) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_till_active() + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + + # verify that there are unfound objects + unfound = manager.get_num_unfound_objects() + log.info("there are %d unfound objects" % unfound) + assert unfound + + # mark stuff lost + pgs = manager.get_pg_stats() + for pg in pgs: + if pg['stat_sum']['num_objects_unfound'] > 0: + primary = 'osd.%d' % pg['acting'][0] + + # verify that i can list them direct from the osd + log.info('listing missing/lost in %s state %s', pg['pgid'], + pg['state']); + m = manager.list_pg_missing(pg['pgid']) + #log.info('%s' % m) + assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound'] + num_unfound=0 + for o in m['objects']: + if len(o['locations']) == 0: + num_unfound += 1 + assert m['num_unfound'] == num_unfound + + log.info("reverting unfound in %s on %s", pg['pgid'], primary) + manager.raw_cluster_cmd('pg', pg['pgid'], + 'mark_unfound_lost', 'revert') + else: + log.info("no unfound in %s", pg['pgid']) + + manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_recovery() + + # verify result + for f in range(1, 10): + err = rados(ctx, mon, ['-p', 'data', 'get', 'new_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', 'data', 'get', 'existed_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', 'data', 'get', 'existing_%d' % f, '-']) + assert not err + + # see if osd.1 can cope + manager.revive_osd(1) + manager.mark_in_osd(1) + manager.wait_till_osd_is_up(1) + manager.wait_for_clean() diff --git a/qa/tasks/manypools.py b/qa/tasks/manypools.py new file mode 100644 index 00000000000..1ddcba5c8a9 --- /dev/null +++ b/qa/tasks/manypools.py @@ -0,0 +1,73 @@ +""" +Force pg creation on all osds +""" +from teuthology import misc as teuthology +from teuthology.orchestra import run +import logging + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Create the specified number of pools and write 16 objects to them (thereby forcing + the PG creation on each OSD). This task creates pools from all the clients, + in parallel. It is easy to add other daemon types which have the appropriate + permissions, but I don't think anything else does. + The config is just the number of pools to create. I recommend setting + "mon create pg interval" to a very low value in your ceph config to speed + this up. + + You probably want to do this to look at memory consumption, and + maybe to test how performance changes with the number of PGs. For example: + + tasks: + - ceph: + config: + mon: + mon create pg interval: 1 + - manypools: 3000 + - radosbench: + clients: [client.0] + time: 360 + """ + + log.info('creating {n} pools'.format(n=config)) + + poolnum = int(config) + creator_remotes = [] + client_roles = teuthology.all_roles_of_type(ctx.cluster, 'client') + log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles)) + for role in client_roles: + log.info('role={role_}'.format(role_=role)) + (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys() + creator_remotes.append((creator_remote, 'client.{id}'.format(id=role))) + + remaining_pools = poolnum + poolprocs=dict() + while (remaining_pools > 0): + log.info('{n} pools remaining to create'.format(n=remaining_pools)) + for remote, role_ in creator_remotes: + poolnum = remaining_pools + remaining_pools -= 1 + if remaining_pools < 0: + continue + log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_)) + proc = remote.run( + args=[ + 'rados', + '--name', role_, + 'mkpool', 'pool{num}'.format(num=poolnum), '-1', + run.Raw('&&'), + 'rados', + '--name', role_, + '--pool', 'pool{num}'.format(num=poolnum), + 'bench', '0', 'write', '-t', '16', '--block-size', '1' + ], + wait = False + ) + log.info('waiting for pool and object creates') + poolprocs[remote] = proc + + run.wait(poolprocs.itervalues()) + + log.info('created all {n} pools and wrote 16 objects to each'.format(n=poolnum)) diff --git a/qa/tasks/mds_creation_failure.py b/qa/tasks/mds_creation_failure.py new file mode 100644 index 00000000000..a3d052fb95c --- /dev/null +++ b/qa/tasks/mds_creation_failure.py @@ -0,0 +1,83 @@ + +import logging +import contextlib +import time +import ceph_manager +from teuthology import misc +from teuthology.orchestra.run import CommandFailedError, Raw + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Go through filesystem creation with a synthetic failure in an MDS + in its 'up:creating' state, to exercise the retry behaviour. + """ + # Grab handles to the teuthology objects of interest + mdslist = list(misc.all_roles_of_type(ctx.cluster, 'mds')) + if len(mdslist) != 1: + # Require exactly one MDS, the code path for creation failure when + # a standby is available is different + raise RuntimeError("This task requires exactly one MDS") + + mds_id = mdslist[0] + (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys() + manager = ceph_manager.CephManager( + mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'), + ) + + # Stop the MDS and reset the filesystem so that next start will go into CREATING + mds = ctx.daemons.get_daemon('mds', mds_id) + mds.stop() + data_pool_id = manager.get_pool_num("data") + md_pool_id = manager.get_pool_num("metadata") + manager.raw_cluster_cmd_result('mds', 'newfs', md_pool_id.__str__(), data_pool_id.__str__(), + '--yes-i-really-mean-it') + + # Start the MDS with mds_kill_create_at set, it will crash during creation + mds.restart_with_args(["--mds_kill_create_at=1"]) + try: + mds.wait_for_exit() + except CommandFailedError as e: + if e.exitstatus == 1: + log.info("MDS creation killed as expected") + else: + log.error("Unexpected status code %s" % e.exitstatus) + raise + + # Since I have intentionally caused a crash, I will clean up the resulting core + # file to avoid task.internal.coredump seeing it as a failure. + log.info("Removing core file from synthetic MDS failure") + mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))]) + + # It should have left the MDS map state still in CREATING + status = manager.get_mds_status(mds_id) + assert status['state'] == 'up:creating' + + # Start the MDS again without the kill flag set, it should proceed with creation successfully + mds.restart() + + # Wait for state ACTIVE + t = 0 + create_timeout = 120 + while True: + status = manager.get_mds_status(mds_id) + if status['state'] == 'up:active': + log.info("MDS creation completed successfully") + break + elif status['state'] == 'up:creating': + log.info("MDS still in creating state") + if t > create_timeout: + log.error("Creating did not complete within %ss" % create_timeout) + raise RuntimeError("Creating did not complete within %ss" % create_timeout) + t += 1 + time.sleep(1) + else: + log.error("Unexpected MDS state: %s" % status['state']) + assert(status['state'] in ['up:active', 'up:creating']) + + # The system should be back up in a happy healthy state, go ahead and run any further tasks + # inside this context. + yield diff --git a/qa/tasks/mds_thrash.py b/qa/tasks/mds_thrash.py new file mode 100644 index 00000000000..c60b741a49e --- /dev/null +++ b/qa/tasks/mds_thrash.py @@ -0,0 +1,352 @@ +""" +Thrash mds by simulating failures +""" +import logging +import contextlib +import ceph_manager +import random +import time +from gevent.greenlet import Greenlet +from gevent.event import Event +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + + +class MDSThrasher(Greenlet): + """ + MDSThrasher:: + + The MDSThrasher thrashes MDSs during execution of other tasks (workunits, etc). + + The config is optional. Many of the config parameters are a a maximum value + to use when selecting a random value from a range. To always use the maximum + value, set no_random to true. The config is a dict containing some or all of: + + seed: [no default] seed the random number generator + + randomize: [default: true] enables randomization and use the max/min values + + max_thrash: [default: 1] the maximum number of MDSs that will be thrashed at + any given time. + + max_thrash_delay: [default: 30] maximum number of seconds to delay before + thrashing again. + + max_revive_delay: [default: 10] maximum number of seconds to delay before + bringing back a thrashed MDS + + thrash_in_replay: [default: 0.0] likelihood that the MDS will be thrashed + during replay. Value should be between 0.0 and 1.0 + + max_replay_thrash_delay: [default: 4] maximum number of seconds to delay while in + the replay state before thrashing + + thrash_weights: allows specific MDSs to be thrashed more/less frequently. This option + overrides anything specified by max_thrash. This option is a dict containing + mds.x: weight pairs. For example, [mds.a: 0.7, mds.b: 0.3, mds.c: 0.0]. Each weight + is a value from 0.0 to 1.0. Any MDSs not specified will be automatically + given a weight of 0.0. For a given MDS, by default the trasher delays for up + to max_thrash_delay, trashes, waits for the MDS to recover, and iterates. If a non-zero + weight is specified for an MDS, for each iteration the thrasher chooses whether to thrash + during that iteration based on a random value [0-1] not exceeding the weight of that MDS. + + Examples:: + + + The following example sets the likelihood that mds.a will be thrashed + to 80%, mds.b to 20%, and other MDSs will not be thrashed. It also sets the + likelihood that an MDS will be thrashed in replay to 40%. + Thrash weights do not have to sum to 1. + + tasks: + - ceph: + - mds_thrash: + thrash_weights: + - mds.a: 0.8 + - mds.b: 0.2 + thrash_in_replay: 0.4 + - ceph-fuse: + - workunit: + clients: + all: [suites/fsx.sh] + + The following example disables randomization, and uses the max delay values: + + tasks: + - ceph: + - mds_thrash: + max_thrash_delay: 10 + max_revive_delay: 1 + max_replay_thrash_delay: 4 + + """ + + def __init__(self, ctx, manager, config, logger, failure_group, weight): + super(MDSThrasher, self).__init__() + + self.ctx = ctx + self.manager = manager + assert self.manager.is_clean() + + self.stopping = Event() + self.logger = logger + self.config = config + + self.randomize = bool(self.config.get('randomize', True)) + self.max_thrash_delay = float(self.config.get('thrash_delay', 30.0)) + self.thrash_in_replay = float(self.config.get('thrash_in_replay', False)) + assert self.thrash_in_replay >= 0.0 and self.thrash_in_replay <= 1.0, 'thrash_in_replay ({v}) must be between [0.0, 1.0]'.format( + v=self.thrash_in_replay) + + self.max_replay_thrash_delay = float(self.config.get('max_replay_thrash_delay', 4.0)) + + self.max_revive_delay = float(self.config.get('max_revive_delay', 10.0)) + + self.failure_group = failure_group + self.weight = weight + + def _run(self): + try: + self.do_thrash() + except: + # Log exceptions here so we get the full backtrace (it's lost + # by the time someone does a .get() on this greenlet) + self.logger.exception("Exception in do_thrash:") + raise + + def log(self, x): + """Write data to logger assigned to this MDThrasher""" + self.logger.info(x) + + def stop(self): + self.stopping.set() + + def do_thrash(self): + """ + Perform the random thrashing action + """ + self.log('starting mds_do_thrash for failure group: ' + ', '.join( + ['mds.{_id}'.format(_id=_f) for _f in self.failure_group])) + while not self.stopping.is_set(): + delay = self.max_thrash_delay + if self.randomize: + delay = random.randrange(0.0, self.max_thrash_delay) + + if delay > 0.0: + self.log('waiting for {delay} secs before thrashing'.format(delay=delay)) + self.stopping.wait(delay) + if self.stopping.is_set(): + continue + + skip = random.randrange(0.0, 1.0) + if self.weight < 1.0 and skip > self.weight: + self.log('skipping thrash iteration with skip ({skip}) > weight ({weight})'.format(skip=skip, + weight=self.weight)) + continue + + # find the active mds in the failure group + statuses = [self.manager.get_mds_status(m) for m in self.failure_group] + actives = filter(lambda s: s and s['state'] == 'up:active', statuses) + assert len(actives) == 1, 'Can only have one active in a failure group' + + active_mds = actives[0]['name'] + active_rank = actives[0]['rank'] + + self.log('kill mds.{id} (rank={r})'.format(id=active_mds, r=active_rank)) + self.manager.kill_mds_by_rank(active_rank) + + # wait for mon to report killed mds as crashed + last_laggy_since = None + itercount = 0 + while True: + failed = self.manager.get_mds_status_all()['failed'] + status = self.manager.get_mds_status(active_mds) + if not status: + break + if 'laggy_since' in status: + last_laggy_since = status['laggy_since'] + break + if any([(f == active_mds) for f in failed]): + break + self.log( + 'waiting till mds map indicates mds.{_id} is laggy/crashed, in failed state, or mds.{_id} is removed from mdsmap'.format( + _id=active_mds)) + itercount = itercount + 1 + if itercount > 10: + self.log('mds map: {status}'.format(status=self.manager.get_mds_status_all())) + time.sleep(2) + if last_laggy_since: + self.log( + 'mds.{_id} reported laggy/crashed since: {since}'.format(_id=active_mds, since=last_laggy_since)) + else: + self.log('mds.{_id} down, removed from mdsmap'.format(_id=active_mds, since=last_laggy_since)) + + # wait for a standby mds to takeover and become active + takeover_mds = None + takeover_rank = None + itercount = 0 + while True: + statuses = [self.manager.get_mds_status(m) for m in self.failure_group] + actives = filter(lambda s: s and s['state'] == 'up:active', statuses) + if len(actives) > 0: + assert len(actives) == 1, 'Can only have one active in failure group' + takeover_mds = actives[0]['name'] + takeover_rank = actives[0]['rank'] + break + itercount = itercount + 1 + if itercount > 10: + self.log('mds map: {status}'.format(status=self.manager.get_mds_status_all())) + + self.log('New active mds is mds.{_id}'.format(_id=takeover_mds)) + + # wait for a while before restarting old active to become new + # standby + delay = self.max_revive_delay + if self.randomize: + delay = random.randrange(0.0, self.max_revive_delay) + + self.log('waiting for {delay} secs before reviving mds.{id}'.format( + delay=delay, id=active_mds)) + time.sleep(delay) + + self.log('reviving mds.{id}'.format(id=active_mds)) + self.manager.revive_mds(active_mds, standby_for_rank=takeover_rank) + + status = {} + while True: + status = self.manager.get_mds_status(active_mds) + if status and (status['state'] == 'up:standby' or status['state'] == 'up:standby-replay'): + break + self.log( + 'waiting till mds map indicates mds.{_id} is in standby or standby-replay'.format(_id=active_mds)) + time.sleep(2) + self.log('mds.{_id} reported in {state} state'.format(_id=active_mds, state=status['state'])) + + # don't do replay thrashing right now + continue + # this might race with replay -> active transition... + if status['state'] == 'up:replay' and random.randrange(0.0, 1.0) < self.thrash_in_replay: + + delay = self.max_replay_thrash_delay + if self.randomize: + delay = random.randrange(0.0, self.max_replay_thrash_delay) + time.sleep(delay) + self.log('kill replaying mds.{id}'.format(id=self.to_kill)) + self.manager.kill_mds(self.to_kill) + + delay = self.max_revive_delay + if self.randomize: + delay = random.randrange(0.0, self.max_revive_delay) + + self.log('waiting for {delay} secs before reviving mds.{id}'.format( + delay=delay, id=self.to_kill)) + time.sleep(delay) + + self.log('revive mds.{id}'.format(id=self.to_kill)) + self.manager.revive_mds(self.to_kill) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Stress test the mds by thrashing while another task/workunit + is running. + + Please refer to MDSThrasher class for further information on the + available options. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'mds_thrash task only accepts a dict for configuration' + mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds')) + assert len(mdslist) > 1, \ + 'mds_thrash task requires at least 2 metadata servers' + + # choose random seed + seed = None + if 'seed' in config: + seed = int(config['seed']) + else: + seed = int(time.time()) + log.info('mds thrasher using random seed: {seed}'.format(seed=seed)) + random.seed(seed) + + max_thrashers = config.get('max_thrash', 1) + thrashers = {} + + (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys() + manager = ceph_manager.CephManager( + first, ctx=ctx, logger=log.getChild('ceph_manager'), + ) + + # make sure everyone is in active, standby, or standby-replay + log.info('Wait for all MDSs to reach steady state...') + statuses = None + statuses_by_rank = None + while True: + statuses = {m: manager.get_mds_status(m) for m in mdslist} + statuses_by_rank = {} + for _, s in statuses.iteritems(): + if isinstance(s, dict): + statuses_by_rank[s['rank']] = s + + ready = filter(lambda (_, s): s is not None and (s['state'] == 'up:active' + or s['state'] == 'up:standby' + or s['state'] == 'up:standby-replay'), + statuses.items()) + if len(ready) == len(statuses): + break + time.sleep(2) + log.info('Ready to start thrashing') + + # setup failure groups + failure_groups = {} + actives = {s['name']: s for (_, s) in statuses.iteritems() if s['state'] == 'up:active'} + log.info('Actives is: {d}'.format(d=actives)) + log.info('Statuses is: {d}'.format(d=statuses_by_rank)) + for active in actives: + for (r, s) in statuses.iteritems(): + if s['standby_for_name'] == active: + if not active in failure_groups: + failure_groups[active] = [] + log.info('Assigning mds rank {r} to failure group {g}'.format(r=r, g=active)) + failure_groups[active].append(r) + + manager.wait_for_clean() + for (active, standbys) in failure_groups.iteritems(): + weight = 1.0 + if 'thrash_weights' in config: + weight = int(config['thrash_weights'].get('mds.{_id}'.format(_id=active), '0.0')) + + failure_group = [active] + failure_group.extend(standbys) + + thrasher = MDSThrasher( + ctx, manager, config, + logger=log.getChild('mds_thrasher.failure_group.[{a}, {sbs}]'.format( + a=active, + sbs=', '.join(standbys) + ) + ), + failure_group=failure_group, + weight=weight) + thrasher.start() + thrashers[active] = thrasher + + # if thrash_weights isn't specified and we've reached max_thrash, + # we're done + if not 'thrash_weights' in config and len(thrashers) == max_thrashers: + break + + try: + log.debug('Yielding') + yield + finally: + log.info('joining mds_thrashers') + for t in thrashers: + log.info('join thrasher for failure group [{fg}]'.format(fg=', '.join(failure_group))) + thrashers[t].stop() + thrashers[t].join() + log.info('done joining') diff --git a/qa/tasks/metadata.yaml b/qa/tasks/metadata.yaml new file mode 100644 index 00000000000..ccdc3b077cb --- /dev/null +++ b/qa/tasks/metadata.yaml @@ -0,0 +1,2 @@ +instance-id: test +local-hostname: test diff --git a/qa/tasks/mon_clock_skew_check.py b/qa/tasks/mon_clock_skew_check.py new file mode 100644 index 00000000000..891e6ec484e --- /dev/null +++ b/qa/tasks/mon_clock_skew_check.py @@ -0,0 +1,261 @@ +""" +Handle clock skews in monitors. +""" +import logging +import contextlib +import ceph_manager +import time +import gevent +from StringIO import StringIO +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +class ClockSkewCheck: + """ + Periodically check if there are any clock skews among the monitors in the + quorum. By default, assume no skews are supposed to exist; that can be + changed using the 'expect-skew' option. If 'fail-on-skew' is set to false, + then we will always succeed and only report skews if any are found. + + This class does not spawn a thread. It assumes that, if that is indeed + wanted, it should be done by a third party (for instance, the task using + this class). We intend it as such in order to reuse this class if need be. + + This task accepts the following options: + + interval amount of seconds to wait in-between checks. (default: 30.0) + max-skew maximum skew, in seconds, that is considered tolerable before + issuing a warning. (default: 0.05) + expect-skew 'true' or 'false', to indicate whether to expect a skew during + the run or not. If 'true', the test will fail if no skew is + found, and succeed if a skew is indeed found; if 'false', it's + the other way around. (default: false) + never-fail Don't fail the run if a skew is detected and we weren't + expecting it, or if no skew is detected and we were expecting + it. (default: False) + + at-least-once Runs at least once, even if we are told to stop. + (default: True) + at-least-once-timeout If we were told to stop but we are attempting to + run at least once, timeout after this many seconds. + (default: 600) + + Example: + Expect a skew higher than 0.05 seconds, but only report it without + failing the teuthology run. + + - mon_clock_skew_check: + interval: 30 + max-skew: 0.05 + expect_skew: true + never-fail: true + """ + + def __init__(self, ctx, manager, config, logger): + self.ctx = ctx + self.manager = manager + + self.stopping = False + self.logger = logger + self.config = config + + if self.config is None: + self.config = dict() + + self.check_interval = float(self.config.get('interval', 30.0)) + + first_mon = teuthology.get_first_mon(ctx, config) + remote = ctx.cluster.only(first_mon).remotes.keys()[0] + proc = remote.run( + args=[ + 'sudo', + 'ceph-mon', + '-i', first_mon[4:], + '--show-config-value', 'mon_clock_drift_allowed' + ], stdout=StringIO(), wait=True + ) + self.max_skew = self.config.get('max-skew', float(proc.stdout.getvalue())) + + self.expect_skew = self.config.get('expect-skew', False) + self.never_fail = self.config.get('never-fail', False) + self.at_least_once = self.config.get('at-least-once', True) + self.at_least_once_timeout = self.config.get('at-least-once-timeout', 600.0) + + def info(self, x): + """ + locally define logger for info messages + """ + self.logger.info(x) + + def warn(self, x): + """ + locally define logger for warnings + """ + self.logger.warn(x) + + def debug(self, x): + """ + locally define logger for debug messages + """ + self.logger.info(x) + self.logger.debug(x) + + def finish(self): + """ + Break out of the do_check loop. + """ + self.stopping = True + + def sleep_interval(self): + """ + If a sleep interval is set, sleep for that amount of time. + """ + if self.check_interval > 0.0: + self.debug('sleeping for {s} seconds'.format( + s=self.check_interval)) + time.sleep(self.check_interval) + + def print_skews(self, skews): + """ + Display skew values. + """ + total = len(skews) + if total > 0: + self.info('---------- found {n} skews ----------'.format(n=total)) + for mon_id, values in skews.iteritems(): + self.info('mon.{id}: {v}'.format(id=mon_id, v=values)) + self.info('-------------------------------------') + else: + self.info('---------- no skews were found ----------') + + def do_check(self): + """ + Clock skew checker. Loops until finish() is called. + """ + self.info('start checking for clock skews') + skews = dict() + ran_once = False + + started_on = None + + while not self.stopping or (self.at_least_once and not ran_once): + + if self.at_least_once and not ran_once and self.stopping: + if started_on is None: + self.info('kicking-off timeout (if any)') + started_on = time.time() + elif self.at_least_once_timeout > 0.0: + assert time.time() - started_on < self.at_least_once_timeout, \ + 'failed to obtain a timecheck before timeout expired' + + quorum_size = len(teuthology.get_mon_names(self.ctx)) + self.manager.wait_for_mon_quorum_size(quorum_size) + + health = self.manager.get_mon_health(True) + timechecks = health['timechecks'] + + clean_check = False + + if timechecks['round_status'] == 'finished': + assert (timechecks['round'] % 2) == 0, \ + 'timecheck marked as finished but round ' \ + 'disagrees (r {r})'.format( + r=timechecks['round']) + clean_check = True + else: + assert timechecks['round_status'] == 'on-going', \ + 'timecheck status expected \'on-going\' ' \ + 'but found \'{s}\' instead'.format( + s=timechecks['round_status']) + if 'mons' in timechecks.keys() and len(timechecks['mons']) > 1: + self.info('round still on-going, but there are available reports') + else: + self.info('no timechecks available just yet') + self.sleep_interval() + continue + + assert len(timechecks['mons']) > 1, \ + 'there are not enough reported timechecks; ' \ + 'expected > 1 found {n}'.format(n=len(timechecks['mons'])) + + for check in timechecks['mons']: + mon_skew = float(check['skew']) + mon_health = check['health'] + mon_id = check['name'] + if abs(mon_skew) > self.max_skew: + assert mon_health == 'HEALTH_WARN', \ + 'mon.{id} health is \'{health}\' but skew {s} > max {ms}'.format( + id=mon_id,health=mon_health,s=abs(mon_skew),ms=self.max_skew) + + log_str = 'mon.{id} with skew {s} > max {ms}'.format( + id=mon_id,s=abs(mon_skew),ms=self.max_skew) + + """ add to skew list """ + details = check['details'] + skews[mon_id] = {'skew': mon_skew, 'details': details} + + if self.expect_skew: + self.info('expected skew: {str}'.format(str=log_str)) + else: + self.warn('unexpected skew: {str}'.format(str=log_str)) + + if clean_check or (self.expect_skew and len(skews) > 0): + ran_once = True + self.print_skews(skews) + self.sleep_interval() + + total = len(skews) + self.print_skews(skews) + + error_str = '' + found_error = False + + if self.expect_skew: + if total == 0: + error_str = 'We were expecting a skew, but none was found!' + found_error = True + else: + if total > 0: + error_str = 'We were not expecting a skew, but we did find it!' + found_error = True + + if found_error: + self.info(error_str) + if not self.never_fail: + assert False, error_str + +@contextlib.contextmanager +def task(ctx, config): + """ + Use clas ClockSkewCheck to check for clock skews on the monitors. + This task will spawn a thread running ClockSkewCheck's do_check(). + + All the configuration will be directly handled by ClockSkewCheck, + so please refer to the class documentation for further information. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'mon_clock_skew_check task only accepts a dict for configuration' + log.info('Beginning mon_clock_skew_check...') + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + skew_check = ClockSkewCheck(ctx, + manager, config, + logger=log.getChild('mon_clock_skew_check')) + skew_check_thread = gevent.spawn(skew_check.do_check) + try: + yield + finally: + log.info('joining mon_clock_skew_check') + skew_check.finish() + skew_check_thread.get() + + diff --git a/qa/tasks/mon_recovery.py b/qa/tasks/mon_recovery.py new file mode 100644 index 00000000000..bfa2cdf78f1 --- /dev/null +++ b/qa/tasks/mon_recovery.py @@ -0,0 +1,80 @@ +""" +Monitor recovery +""" +import logging +import ceph_manager +from teuthology import misc as teuthology + + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test monitor recovery. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + mons = [f.split('.')[1] for f in teuthology.get_mon_names(ctx)] + log.info("mon ids = %s" % mons) + + manager.wait_for_mon_quorum_size(len(mons)) + + log.info('verifying all monitors are in the quorum') + for m in mons: + s = manager.get_mon_status(m) + assert s['state'] == 'leader' or s['state'] == 'peon' + assert len(s['quorum']) == len(mons) + + log.info('restarting each monitor in turn') + for m in mons: + # stop a monitor + manager.kill_mon(m) + manager.wait_for_mon_quorum_size(len(mons) - 1) + + # restart + manager.revive_mon(m) + manager.wait_for_mon_quorum_size(len(mons)) + + # in forward and reverse order, + rmons = mons + rmons.reverse() + for mons in mons, rmons: + log.info('stopping all monitors') + for m in mons: + manager.kill_mon(m) + + log.info('forming a minimal quorum for %s, then adding monitors' % mons) + qnum = (len(mons) / 2) + 1 + num = 0 + for m in mons: + manager.revive_mon(m) + num += 1 + if num >= qnum: + manager.wait_for_mon_quorum_size(num) + + # on both leader and non-leader ranks... + for rank in [0, 1]: + # take one out + log.info('removing mon %s' % mons[rank]) + manager.kill_mon(mons[rank]) + manager.wait_for_mon_quorum_size(len(mons) - 1) + + log.info('causing some monitor log activity') + m = 30 + for n in range(1, m): + manager.raw_cluster_cmd('log', '%d of %d' % (n, m)) + + log.info('adding mon %s back in' % mons[rank]) + manager.revive_mon(mons[rank]) + manager.wait_for_mon_quorum_size(len(mons)) diff --git a/qa/tasks/mon_thrash.py b/qa/tasks/mon_thrash.py new file mode 100644 index 00000000000..b45aaa99978 --- /dev/null +++ b/qa/tasks/mon_thrash.py @@ -0,0 +1,343 @@ +""" +Monitor thrash +""" +import logging +import contextlib +import ceph_manager +import random +import time +import gevent +import json +import math +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def _get_mons(ctx): + """ + Get monitor names from the context value. + """ + mons = [f[len('mon.'):] for f in teuthology.get_mon_names(ctx)] + return mons + +class MonitorThrasher: + """ + How it works:: + + - pick a monitor + - kill it + - wait for quorum to be formed + - sleep for 'revive_delay' seconds + - revive monitor + - wait for quorum to be formed + - sleep for 'thrash_delay' seconds + + Options:: + + seed Seed to use on the RNG to reproduce a previous + behaviour (default: None; i.e., not set) + revive_delay Number of seconds to wait before reviving + the monitor (default: 10) + thrash_delay Number of seconds to wait in-between + test iterations (default: 0) + thrash_store Thrash monitor store before killing the monitor being thrashed (default: False) + thrash_store_probability Probability of thrashing a monitor's store + (default: 50) + thrash_many Thrash multiple monitors instead of just one. If + 'maintain-quorum' is set to False, then we will + thrash up to as many monitors as there are + available. (default: False) + maintain_quorum Always maintain quorum, taking care on how many + monitors we kill during the thrashing. If we + happen to only have one or two monitors configured, + if this option is set to True, then we won't run + this task as we cannot guarantee maintenance of + quorum. Setting it to false however would allow the + task to run with as many as just one single monitor. + (default: True) + freeze_mon_probability: how often to freeze the mon instead of killing it, + in % (default: 0) + freeze_mon_duration: how many seconds to freeze the mon (default: 15) + scrub Scrub after each iteration (default: True) + + Note: if 'store-thrash' is set to True, then 'maintain-quorum' must also + be set to True. + + For example:: + + tasks: + - ceph: + - mon_thrash: + revive_delay: 20 + thrash_delay: 1 + thrash_store: true + thrash_store_probability: 40 + seed: 31337 + maintain_quorum: true + thrash_many: true + - ceph-fuse: + - workunit: + clients: + all: + - mon/workloadgen.sh + """ + def __init__(self, ctx, manager, config, logger): + self.ctx = ctx + self.manager = manager + self.manager.wait_for_clean() + + self.stopping = False + self.logger = logger + self.config = config + + if self.config is None: + self.config = dict() + + """ Test reproducibility """ + self.random_seed = self.config.get('seed', None) + + if self.random_seed is None: + self.random_seed = int(time.time()) + + self.rng = random.Random() + self.rng.seed(int(self.random_seed)) + + """ Monitor thrashing """ + self.revive_delay = float(self.config.get('revive_delay', 10.0)) + self.thrash_delay = float(self.config.get('thrash_delay', 0.0)) + + self.thrash_many = self.config.get('thrash_many', False) + self.maintain_quorum = self.config.get('maintain_quorum', True) + + self.scrub = self.config.get('scrub', True) + + self.freeze_mon_probability = float(self.config.get('freeze_mon_probability', 10)) + self.freeze_mon_duration = float(self.config.get('freeze_mon_duration', 15.0)) + + assert self.max_killable() > 0, \ + 'Unable to kill at least one monitor with the current config.' + + """ Store thrashing """ + self.store_thrash = self.config.get('store_thrash', False) + self.store_thrash_probability = int( + self.config.get('store_thrash_probability', 50)) + if self.store_thrash: + assert self.store_thrash_probability > 0, \ + 'store_thrash is set, probability must be > 0' + assert self.maintain_quorum, \ + 'store_thrash = true must imply maintain_quorum = true' + + self.thread = gevent.spawn(self.do_thrash) + + def log(self, x): + """ + locally log info messages + """ + self.logger.info(x) + + def do_join(self): + """ + Break out of this processes thrashing loop. + """ + self.stopping = True + self.thread.get() + + def should_thrash_store(self): + """ + If allowed, indicate that we should thrash a certain percentage of + the time as determined by the store_thrash_probability value. + """ + if not self.store_thrash: + return False + return self.rng.randrange(0, 101) < self.store_thrash_probability + + def thrash_store(self, mon): + """ + Thrash the monitor specified. + :param mon: monitor to thrash + """ + addr = self.ctx.ceph.conf['mon.%s' % mon]['mon addr'] + self.log('thrashing mon.{id}@{addr} store'.format(id=mon, addr=addr)) + out = self.manager.raw_cluster_cmd('-m', addr, 'sync', 'force') + j = json.loads(out) + assert j['ret'] == 0, \ + 'error forcing store sync on mon.{id}:\n{ret}'.format( + id=mon,ret=out) + + def should_freeze_mon(self): + """ + Indicate that we should freeze a certain percentago of the time + as determined by the freeze_mon_probability value. + """ + return self.rng.randrange(0, 101) < self.freeze_mon_probability + + def freeze_mon(self, mon): + """ + Send STOP signal to freeze the monitor. + """ + log.info('Sending STOP to mon %s', mon) + self.manager.signal_mon(mon, 19) # STOP + + def unfreeze_mon(self, mon): + """ + Send CONT signal to unfreeze the monitor. + """ + log.info('Sending CONT to mon %s', mon) + self.manager.signal_mon(mon, 18) # CONT + + def kill_mon(self, mon): + """ + Kill the monitor specified + """ + self.log('killing mon.{id}'.format(id=mon)) + self.manager.kill_mon(mon) + + def revive_mon(self, mon): + """ + Revive the monitor specified + """ + self.log('killing mon.{id}'.format(id=mon)) + self.log('reviving mon.{id}'.format(id=mon)) + self.manager.revive_mon(mon) + + def max_killable(self): + """ + Return the maximum number of monitors we can kill. + """ + m = len(_get_mons(self.ctx)) + if self.maintain_quorum: + return max(math.ceil(m/2.0)-1, 0) + else: + return m + + def do_thrash(self): + """ + Cotinuously loop and thrash the monitors. + """ + self.log('start thrashing') + self.log('seed: {s}, revive delay: {r}, thrash delay: {t} '\ + 'thrash many: {tm}, maintain quorum: {mq} '\ + 'store thrash: {st}, probability: {stp} '\ + 'freeze mon: prob {fp} duration {fd}'.format( + s=self.random_seed,r=self.revive_delay,t=self.thrash_delay, + tm=self.thrash_many, mq=self.maintain_quorum, + st=self.store_thrash,stp=self.store_thrash_probability, + fp=self.freeze_mon_probability,fd=self.freeze_mon_duration, + )) + + while not self.stopping: + mons = _get_mons(self.ctx) + self.manager.wait_for_mon_quorum_size(len(mons)) + self.log('making sure all monitors are in the quorum') + for m in mons: + s = self.manager.get_mon_status(m) + assert s['state'] == 'leader' or s['state'] == 'peon' + assert len(s['quorum']) == len(mons) + + kill_up_to = self.rng.randrange(1, self.max_killable()+1) + mons_to_kill = self.rng.sample(mons, kill_up_to) + self.log('monitors to thrash: {m}'.format(m=mons_to_kill)) + + mons_to_freeze = [] + for mon in mons: + if mon in mons_to_kill: + continue + if self.should_freeze_mon(): + mons_to_freeze.append(mon) + self.log('monitors to freeze: {m}'.format(m=mons_to_freeze)) + + for mon in mons_to_kill: + self.log('thrashing mon.{m}'.format(m=mon)) + + """ we only thrash stores if we are maintaining quorum """ + if self.should_thrash_store() and self.maintain_quorum: + self.thrash_store(mon) + + self.kill_mon(mon) + + if mons_to_freeze: + for mon in mons_to_freeze: + self.freeze_mon(mon) + self.log('waiting for {delay} secs to unfreeze mons'.format( + delay=self.freeze_mon_duration)) + time.sleep(self.freeze_mon_duration) + for mon in mons_to_freeze: + self.unfreeze_mon(mon) + + if self.maintain_quorum: + self.manager.wait_for_mon_quorum_size(len(mons)-len(mons_to_kill)) + for m in mons: + if m in mons_to_kill: + continue + s = self.manager.get_mon_status(m) + assert s['state'] == 'leader' or s['state'] == 'peon' + assert len(s['quorum']) == len(mons)-len(mons_to_kill) + + self.log('waiting for {delay} secs before reviving monitors'.format( + delay=self.revive_delay)) + time.sleep(self.revive_delay) + + for mon in mons_to_kill: + self.revive_mon(mon) + # do more freezes + if mons_to_freeze: + for mon in mons_to_freeze: + self.freeze_mon(mon) + self.log('waiting for {delay} secs to unfreeze mons'.format( + delay=self.freeze_mon_duration)) + time.sleep(self.freeze_mon_duration) + for mon in mons_to_freeze: + self.unfreeze_mon(mon) + + self.manager.wait_for_mon_quorum_size(len(mons)) + for m in mons: + s = self.manager.get_mon_status(m) + assert s['state'] == 'leader' or s['state'] == 'peon' + assert len(s['quorum']) == len(mons) + + if self.scrub: + self.log('triggering scrub') + try: + self.manager.raw_cluster_cmd('scrub') + except Exception: + log.exception("Saw exception while triggering scrub") + + if self.thrash_delay > 0.0: + self.log('waiting for {delay} secs before continuing thrashing'.format( + delay=self.thrash_delay)) + time.sleep(self.thrash_delay) + +@contextlib.contextmanager +def task(ctx, config): + """ + Stress test the monitor by thrashing them while another task/workunit + is running. + + Please refer to MonitorThrasher class for further information on the + available options. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'mon_thrash task only accepts a dict for configuration' + assert len(_get_mons(ctx)) > 2, \ + 'mon_thrash task requires at least 3 monitors' + log.info('Beginning mon_thrash...') + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + thrash_proc = MonitorThrasher(ctx, + manager, config, + logger=log.getChild('mon_thrasher')) + try: + log.debug('Yielding') + yield + finally: + log.info('joining mon_thrasher') + thrash_proc.do_join() + mons = _get_mons(ctx) + manager.wait_for_mon_quorum_size(len(mons)) diff --git a/qa/tasks/multibench.py b/qa/tasks/multibench.py new file mode 100644 index 00000000000..bc22b470593 --- /dev/null +++ b/qa/tasks/multibench.py @@ -0,0 +1,57 @@ +""" +Multibench testing +""" +import contextlib +import logging +import radosbench +import time +import copy +import gevent + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run multibench + + The config should be as follows: + + multibench: + time: + segments: + radosbench: + + example: + + tasks: + - ceph: + - multibench: + clients: [client.0] + time: 360 + - interactive: + """ + log.info('Beginning multibench...') + assert isinstance(config, dict), \ + "please list clients to run on" + + def run_one(num): + """Run test spawn from gevent""" + start = time.time() + benchcontext = copy.copy(config.get('radosbench')) + iterations = 0 + while time.time() - start < int(config.get('time', 600)): + log.info("Starting iteration %s of segment %s"%(iterations, num)) + benchcontext['pool'] = str(num) + "-" + str(iterations) + with radosbench.task(ctx, benchcontext): + time.sleep() + iterations += 1 + log.info("Starting %s threads"%(str(config.get('segments', 3)),)) + segments = [ + gevent.spawn(run_one, i) + for i in range(0, int(config.get('segments', 3)))] + + try: + yield + finally: + [i.get() for i in segments] diff --git a/qa/tasks/object_source_down.py b/qa/tasks/object_source_down.py new file mode 100644 index 00000000000..17b94490668 --- /dev/null +++ b/qa/tasks/object_source_down.py @@ -0,0 +1,103 @@ +""" +Test Object locations going down +""" +import logging +import ceph_manager +from teuthology import misc as teuthology +from util.rados import rados + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of object location going down + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'lost_unfound task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + manager.sleep(10) + manager.wait_for_clean() + + # something that is always there + dummyfile = '/etc/fstab' + + # take 0, 1 out + manager.mark_out_osd(0) + manager.mark_out_osd(1) + manager.wait_for_clean() + + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.0', + 'injectargs', + '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' + ) + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.1', + 'injectargs', + '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' + ) + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.2', + 'injectargs', + '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' + ) + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.3', + 'injectargs', + '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' + ) + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile]) + + # create old objects + for f in range(1, 10): + rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) + + manager.mark_out_osd(3) + manager.wait_till_active() + + manager.mark_in_osd(0) + manager.wait_till_active() + + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + + manager.mark_out_osd(2) + manager.wait_till_active() + + # bring up 1 + manager.mark_in_osd(1) + manager.wait_till_active() + + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + log.info("Getting unfound objects") + unfound = manager.get_num_unfound_objects() + assert not unfound + + manager.kill_osd(2) + manager.mark_down_osd(2) + manager.kill_osd(3) + manager.mark_down_osd(3) + + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + log.info("Getting unfound objects") + unfound = manager.get_num_unfound_objects() + assert unfound diff --git a/qa/tasks/omapbench.py b/qa/tasks/omapbench.py new file mode 100644 index 00000000000..e026c74dbc0 --- /dev/null +++ b/qa/tasks/omapbench.py @@ -0,0 +1,83 @@ +""" +Run omapbench executable within teuthology +""" +import contextlib +import logging + +from teuthology.orchestra import run +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run omapbench + + The config should be as follows:: + + omapbench: + clients: [client list] + threads: + objects: + entries: + keysize: + valsize: + increment: + omaptype: + + example:: + + tasks: + - ceph: + - omapbench: + clients: [client.0] + threads: 30 + objects: 1000 + entries: 10 + keysize: 10 + valsize: 100 + increment: 100 + omaptype: uniform + - interactive: + """ + log.info('Beginning omapbench...') + assert isinstance(config, dict), \ + "please list clients to run on" + omapbench = {} + testdir = teuthology.get_testdir(ctx) + print(str(config.get('increment',-1))) + for role in config.get('clients', ['client.0']): + assert isinstance(role, basestring) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + proc = remote.run( + args=[ + "/bin/sh", "-c", + " ".join(['adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage', + 'omapbench', + '--name', role[len(PREFIX):], + '-t', str(config.get('threads', 30)), + '-o', str(config.get('objects', 1000)), + '--entries', str(config.get('entries',10)), + '--keysize', str(config.get('keysize',10)), + '--valsize', str(config.get('valsize',1000)), + '--inc', str(config.get('increment',10)), + '--omaptype', str(config.get('omaptype','uniform')) + ]).format(tdir=testdir), + ], + logger=log.getChild('omapbench.{id}'.format(id=id_)), + stdin=run.PIPE, + wait=False + ) + omapbench[id_] = proc + + try: + yield + finally: + log.info('joining omapbench') + run.wait(omapbench.itervalues()) diff --git a/qa/tasks/osd_backfill.py b/qa/tasks/osd_backfill.py new file mode 100644 index 00000000000..d034d791a5e --- /dev/null +++ b/qa/tasks/osd_backfill.py @@ -0,0 +1,105 @@ +""" +Osd backfill test +""" +import logging +import ceph_manager +import time +from teuthology import misc as teuthology + + +log = logging.getLogger(__name__) + + +def rados_start(ctx, remote, cmd): + """ + Run a remote rados command (currently used to only write data) + """ + log.info("rados %s" % ' '.join(cmd)) + testdir = teuthology.get_testdir(ctx) + pre = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rados', + ]; + pre.extend(cmd) + proc = remote.run( + args=pre, + wait=False, + ) + return proc + +def task(ctx, config): + """ + Test backfill + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'thrashosds task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + log.info('num_osds is %s' % num_osds) + assert num_osds == 3 + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + manager.sleep(10) + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_clean() + + # write some data + p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096', + '--no-cleanup']) + err = p.wait(); + log.info('err is %d' % err) + + # mark osd.0 out to trigger a rebalance/backfill + manager.mark_out_osd(0) + + # also mark it down to it won't be included in pg_temps + manager.kill_osd(0) + manager.mark_down_osd(0) + + # wait for everything to peer and be happy... + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_recovery() + + # write some new data + p = rados_start(ctx, mon, ['-p', 'data', 'bench', '30', 'write', '-b', '4096', + '--no-cleanup']) + + time.sleep(15) + + # blackhole + restart osd.1 + # this triggers a divergent backfill target + manager.blackhole_kill_osd(1) + time.sleep(2) + manager.revive_osd(1) + + # wait for our writes to complete + succeed + err = p.wait() + log.info('err is %d' % err) + + # cluster must recover + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_recovery() + + # re-add osd.0 + manager.revive_osd(0) + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_clean() + + diff --git a/qa/tasks/osd_failsafe_enospc.py b/qa/tasks/osd_failsafe_enospc.py new file mode 100644 index 00000000000..bf089988022 --- /dev/null +++ b/qa/tasks/osd_failsafe_enospc.py @@ -0,0 +1,218 @@ +""" +Handle osdfailsafe configuration settings (nearfull ratio and full ratio) +""" +from cStringIO import StringIO +import logging +import time + +import ceph_manager +from teuthology.orchestra import run +from util.rados import rados +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of osd_failsafe_nearfull_ratio and osd_failsafe_full_ratio + configuration settings + + In order for test to pass must use log-whitelist as follows + + tasks: + - chef: + - install: + - ceph: + log-whitelist: ['OSD near full', 'OSD full dropping all updates'] + - osd_failsafe_enospc: + + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'osd_failsafe_enospc task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + ctx.manager = manager + + # Give 2 seconds for injectargs + osd_op_complaint_time (30) + 2 * osd_heartbeat_interval (6) + 6 padding + sleep_time = 50 + + # something that is always there + dummyfile = '/etc/fstab' + dummyfile2 = '/etc/resolv.conf' + + # create 1 pg pool with 1 rep which can only be on osd.0 + osds = manager.get_osd_dump() + for osd in osds: + if osd['osd'] != 0: + manager.mark_out_osd(osd['osd']) + + log.info('creating pool foo') + manager.create_pool("foo") + manager.raw_cluster_cmd('osd', 'pool', 'set', 'foo', 'size', '1') + + # State NONE -> NEAR + log.info('1. Verify warning messages when exceeding nearfull_ratio') + + proc = mon.run( + args=[ + 'daemon-helper', + 'kill', + 'ceph', '-w' + ], + stdin=run.PIPE, + stdout=StringIO(), + wait=False, + ) + + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .00001') + + time.sleep(sleep_time) + proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w + proc.wait() + + lines = proc.stdout.getvalue().split('\n') + + count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) + assert count == 2, 'Incorrect number of warning messages expected 2 got %d' % count + count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) + assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count + + # State NEAR -> FULL + log.info('2. Verify error messages when exceeding full_ratio') + + proc = mon.run( + args=[ + 'daemon-helper', + 'kill', + 'ceph', '-w' + ], + stdin=run.PIPE, + stdout=StringIO(), + wait=False, + ) + + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001') + + time.sleep(sleep_time) + proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w + proc.wait() + + lines = proc.stdout.getvalue().split('\n') + + count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) + assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count + + log.info('3. Verify write failure when exceeding full_ratio') + + # Write data should fail + ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile1', dummyfile]) + assert ret != 0, 'Expected write failure but it succeeded with exit status 0' + + # Put back default + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97') + time.sleep(10) + + # State FULL -> NEAR + log.info('4. Verify write success when NOT exceeding full_ratio') + + # Write should succeed + ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile2', dummyfile2]) + assert ret == 0, 'Expected write to succeed, but got exit status %d' % ret + + log.info('5. Verify warning messages again when exceeding nearfull_ratio') + + proc = mon.run( + args=[ + 'daemon-helper', + 'kill', + 'ceph', '-w' + ], + stdin=run.PIPE, + stdout=StringIO(), + wait=False, + ) + + time.sleep(sleep_time) + proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w + proc.wait() + + lines = proc.stdout.getvalue().split('\n') + + count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) + assert count == 1 or count == 2, 'Incorrect number of warning messages expected 1 or 2 got %d' % count + count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) + assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count + + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .90') + time.sleep(10) + + # State NONE -> FULL + log.info('6. Verify error messages again when exceeding full_ratio') + + proc = mon.run( + args=[ + 'daemon-helper', + 'kill', + 'ceph', '-w' + ], + stdin=run.PIPE, + stdout=StringIO(), + wait=False, + ) + + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001') + + time.sleep(sleep_time) + proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w + proc.wait() + + lines = proc.stdout.getvalue().split('\n') + + count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) + assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count + count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) + assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count + + # State FULL -> NONE + log.info('7. Verify no messages settings back to default') + + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97') + time.sleep(10) + + proc = mon.run( + args=[ + 'daemon-helper', + 'kill', + 'ceph', '-w' + ], + stdin=run.PIPE, + stdout=StringIO(), + wait=False, + ) + + time.sleep(sleep_time) + proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w + proc.wait() + + lines = proc.stdout.getvalue().split('\n') + + count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) + assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count + count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) + assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count + + log.info('Test Passed') + + # Bring all OSDs back in + manager.remove_pool("foo") + for osd in osds: + if osd['osd'] != 0: + manager.mark_in_osd(osd['osd']) diff --git a/qa/tasks/osd_recovery.py b/qa/tasks/osd_recovery.py new file mode 100644 index 00000000000..ff88fb47d74 --- /dev/null +++ b/qa/tasks/osd_recovery.py @@ -0,0 +1,206 @@ +""" +osd recovery +""" +import logging +import ceph_manager +import time +from teuthology import misc as teuthology + + +log = logging.getLogger(__name__) + + +def rados_start(testdir, remote, cmd): + """ + Run a remote rados command (currently used to only write data) + """ + log.info("rados %s" % ' '.join(cmd)) + pre = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rados', + ]; + pre.extend(cmd) + proc = remote.run( + args=pre, + wait=False, + ) + return proc + +def task(ctx, config): + """ + Test (non-backfill) recovery + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'task only accepts a dict for configuration' + testdir = teuthology.get_testdir(ctx) + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + log.info('num_osds is %s' % num_osds) + assert num_osds == 3 + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + manager.sleep(10) + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_clean() + + # test some osdmap flags + manager.raw_cluster_cmd('osd', 'set', 'noin') + manager.raw_cluster_cmd('osd', 'set', 'noout') + manager.raw_cluster_cmd('osd', 'set', 'noup') + manager.raw_cluster_cmd('osd', 'set', 'nodown') + manager.raw_cluster_cmd('osd', 'unset', 'noin') + manager.raw_cluster_cmd('osd', 'unset', 'noout') + manager.raw_cluster_cmd('osd', 'unset', 'noup') + manager.raw_cluster_cmd('osd', 'unset', 'nodown') + + # write some new data + p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '60', 'write', '-b', '4096', + '--no-cleanup']) + + time.sleep(15) + + # trigger a divergent target: + # blackhole + restart osd.1 (shorter log) + manager.blackhole_kill_osd(1) + # kill osd.2 (longer log... we'll make it divergent below) + manager.kill_osd(2) + time.sleep(2) + manager.revive_osd(1) + + # wait for our writes to complete + succeed + err = p.wait() + log.info('err is %d' % err) + + # cluster must repeer + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.wait_for_active_or_down() + + # write some more (make sure osd.2 really is divergent) + p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096']) + p.wait(); + + # revive divergent osd + manager.revive_osd(2) + + while len(manager.get_osd_status()['up']) < 3: + log.info('waiting a bit...') + time.sleep(2) + log.info('3 are up!') + + # cluster must recover + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_clean() + + +def test_incomplete_pgs(ctx, config): + """ + Test handling of incomplete pgs. Requires 4 osds. + """ + testdir = teuthology.get_testdir(ctx) + if config is None: + config = {} + assert isinstance(config, dict), \ + 'task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + log.info('num_osds is %s' % num_osds) + assert num_osds == 4 + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 4: + time.sleep(10) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') + manager.wait_for_clean() + + log.info('Testing incomplete pgs...') + + for i in range(4): + manager.set_config( + i, + osd_recovery_delay_start=1000) + + # move data off of osd.0, osd.1 + manager.raw_cluster_cmd('osd', 'out', '0', '1') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') + manager.wait_for_clean() + + # lots of objects in rbd (no pg log, will backfill) + p = rados_start(testdir, mon, + ['-p', 'rbd', 'bench', '60', 'write', '-b', '1', + '--no-cleanup']) + p.wait() + + # few objects in metadata pool (with pg log, normal recovery) + for f in range(1, 20): + p = rados_start(testdir, mon, ['-p', 'metadata', 'put', + 'foo.%d' % f, '/etc/passwd']) + p.wait() + + # move it back + manager.raw_cluster_cmd('osd', 'in', '0', '1') + manager.raw_cluster_cmd('osd', 'out', '2', '3') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') + manager.wait_for_active() + + assert not manager.is_clean() + assert not manager.is_recovered() + + # kill 2 + 3 + log.info('stopping 2,3') + manager.kill_osd(2) + manager.kill_osd(3) + log.info('...') + manager.raw_cluster_cmd('osd', 'down', '2', '3') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.wait_for_active_or_down() + + assert manager.get_num_down() > 0 + + # revive 2 + 3 + manager.revive_osd(2) + manager.revive_osd(3) + while len(manager.get_osd_status()['up']) < 4: + log.info('waiting a bit...') + time.sleep(2) + log.info('all are up!') + + for i in range(4): + manager.kick_recovery_wq(i) + + # cluster must recover + manager.wait_for_clean() diff --git a/qa/tasks/peer.py b/qa/tasks/peer.py new file mode 100644 index 00000000000..f1789cf12d6 --- /dev/null +++ b/qa/tasks/peer.py @@ -0,0 +1,96 @@ +""" +Peer test (Single test, not much configurable here) +""" +import logging +import json + +import ceph_manager +from teuthology import misc as teuthology +from util.rados import rados + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test peering. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'peer task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + manager.sleep(10) + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_clean() + + for i in range(3): + manager.set_config( + i, + osd_recovery_delay_start=120) + + # take on osd down + manager.kill_osd(2) + manager.mark_down_osd(2) + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', 'data', 'get', 'dummy', '-']) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.wait_for_recovery() + + # kill another and revive 2, so that some pgs can't peer. + manager.kill_osd(1) + manager.mark_down_osd(1) + manager.revive_osd(2) + manager.wait_till_osd_is_up(2) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + + manager.wait_for_active_or_down() + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + + # look for down pgs + num_down_pgs = 0 + pgs = manager.get_pg_stats() + for pg in pgs: + out = manager.raw_cluster_cmd('pg', pg['pgid'], 'query') + log.debug("out string %s",out) + j = json.loads(out) + log.info("pg is %s, query json is %s", pg, j) + + if pg['state'].count('down'): + num_down_pgs += 1 + # verify that it is blocked on osd.1 + rs = j['recovery_state'] + assert len(rs) > 0 + assert rs[0]['name'] == 'Started/Primary/Peering/GetInfo' + assert rs[1]['name'] == 'Started/Primary/Peering' + assert rs[1]['blocked'] + assert rs[1]['down_osds_we_would_probe'] == [1] + assert len(rs[1]['peering_blocked_by']) == 1 + assert rs[1]['peering_blocked_by'][0]['osd'] == 1 + + assert num_down_pgs > 0 + + # bring it all back + manager.revive_osd(1) + manager.wait_till_osd_is_up(1) + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_clean() diff --git a/qa/tasks/peering_speed_test.py b/qa/tasks/peering_speed_test.py new file mode 100644 index 00000000000..6c885f1c961 --- /dev/null +++ b/qa/tasks/peering_speed_test.py @@ -0,0 +1,93 @@ +""" +Remotely run peering tests. +""" +import logging +import time +from teuthology import misc as teuthology +import ceph_manager + +log = logging.getLogger(__name__) + +from args import argify + +POOLNAME = "POOLNAME" +ARGS = [ + ('num_pgs', 'number of pgs to create', 256, int), + ('max_time', 'seconds to complete peering', 0, int), + ('runs', 'trials to run', 10, int), + ('num_objects', 'objects to create', 256 * 1024, int), + ('object_size', 'size in bytes for objects', 64, int), + ('creation_time_limit', 'time limit for pool population', 60*60, int), + ('create_threads', 'concurrent writes for create', 256, int) + ] + +def setup(ctx, config): + """ + Setup peering test on remotes. + """ + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + ctx.manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + ctx.manager.clear_pools() + ctx.manager.create_pool(POOLNAME, config.num_pgs) + log.info("populating pool") + ctx.manager.rados_write_objects( + POOLNAME, + config.num_objects, + config.object_size, + config.creation_time_limit, + config.create_threads) + log.info("done populating pool") + +def do_run(ctx, config): + """ + Perform the test. + """ + start = time.time() + # mark in osd + ctx.manager.mark_in_osd(0) + log.info("writing out objects") + ctx.manager.rados_write_objects( + POOLNAME, + config.num_pgs, # write 1 object per pg or so + 1, + config.creation_time_limit, + config.num_pgs, # lots of concurrency + cleanup = True) + peering_end = time.time() + + log.info("peering done, waiting on recovery") + ctx.manager.wait_for_clean() + + log.info("recovery done") + recovery_end = time.time() + if config.max_time: + assert(peering_end - start < config.max_time) + ctx.manager.mark_out_osd(0) + ctx.manager.wait_for_clean() + return { + 'time_to_active': peering_end - start, + 'time_to_clean': recovery_end - start + } + +@argify("peering_speed_test", ARGS) +def task(ctx, config): + """ + Peering speed test + """ + setup(ctx, config) + ctx.manager.mark_out_osd(0) + ctx.manager.wait_for_clean() + ret = [] + for i in range(config.runs): + log.info("Run {i}".format(i = i)) + ret.append(do_run(ctx, config)) + + ctx.manager.mark_in_osd(0) + ctx.summary['recovery_times'] = { + 'runs': ret + } diff --git a/qa/tasks/qemu.py b/qa/tasks/qemu.py new file mode 100644 index 00000000000..bcd79caa64f --- /dev/null +++ b/qa/tasks/qemu.py @@ -0,0 +1,412 @@ +""" +Qemu task +""" +from cStringIO import StringIO + +import contextlib +import logging +import os + +from teuthology import misc as teuthology +from teuthology import contextutil +from tasks import rbd +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +DEFAULT_NUM_RBD = 1 +DEFAULT_IMAGE_URL = 'http://ceph.com/qa/ubuntu-12.04.qcow2' +DEFAULT_MEM = 4096 # in megabytes + +@contextlib.contextmanager +def create_dirs(ctx, config): + """ + Handle directory creation and cleanup + """ + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.iteritems(): + assert 'test' in client_config, 'You must specify a test to run' + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'install', '-d', '-m0755', '--', + '{tdir}/qemu'.format(tdir=testdir), + '{tdir}/archive/qemu'.format(tdir=testdir), + ] + ) + try: + yield + finally: + for client, client_config in config.iteritems(): + assert 'test' in client_config, 'You must specify a test to run' + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true', + ] + ) + +@contextlib.contextmanager +def generate_iso(ctx, config): + """Execute system commands to generate iso""" + log.info('generating iso...') + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.iteritems(): + assert 'test' in client_config, 'You must specify a test to run' + (remote,) = ctx.cluster.only(client).remotes.keys() + src_dir = os.path.dirname(__file__) + userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client) + metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client) + + with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f: + test_setup = ''.join(f.readlines()) + # configuring the commands to setup the nfs mount + mnt_dir = "/export/{client}".format(client=client) + test_setup = test_setup.format( + mnt_dir=mnt_dir + ) + + with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f: + test_teardown = ''.join(f.readlines()) + + user_data = test_setup + if client_config.get('type', 'filesystem') == 'filesystem': + for i in xrange(0, client_config.get('num_rbd', DEFAULT_NUM_RBD)): + dev_letter = chr(ord('b') + i) + user_data += """ +- | + #!/bin/bash + mkdir /mnt/test_{dev_letter} + mkfs -t xfs /dev/vd{dev_letter} + mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter} +""".format(dev_letter=dev_letter) + + # this may change later to pass the directories as args to the + # script or something. xfstests needs that. + user_data += """ +- | + #!/bin/bash + test -d /mnt/test_b && cd /mnt/test_b + /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success +""" + test_teardown + + teuthology.write_file(remote, userdata_path, StringIO(user_data)) + + with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f: + teuthology.write_file(remote, metadata_path, f) + + test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client) + remote.run( + args=[ + 'wget', '-nv', '-O', test_file, + client_config['test'], + run.Raw('&&'), + 'chmod', '755', test_file, + ], + ) + remote.run( + args=[ + 'genisoimage', '-quiet', '-input-charset', 'utf-8', + '-volid', 'cidata', '-joliet', '-rock', + '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), + '-graft-points', + 'user-data={userdata}'.format(userdata=userdata_path), + 'meta-data={metadata}'.format(metadata=metadata_path), + 'test.sh={file}'.format(file=test_file), + ], + ) + try: + yield + finally: + for client in config.iterkeys(): + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'rm', '-f', + '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), + os.path.join(testdir, 'qemu', 'userdata.' + client), + os.path.join(testdir, 'qemu', 'metadata.' + client), + '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client), + ], + ) + +@contextlib.contextmanager +def download_image(ctx, config): + """Downland base image, remove image file when done""" + log.info('downloading base image') + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.iteritems(): + (remote,) = ctx.cluster.only(client).remotes.keys() + base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client) + remote.run( + args=[ + 'wget', '-nv', '-O', base_file, DEFAULT_IMAGE_URL, + ] + ) + try: + yield + finally: + log.debug('cleaning up base image files') + for client in config.iterkeys(): + base_file = '{tdir}/qemu/base.{client}.qcow2'.format( + tdir=testdir, + client=client, + ) + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'rm', '-f', base_file, + ], + ) + + +def _setup_nfs_mount(remote, client, mount_dir): + """ + Sets up an nfs mount on the remote that the guest can use to + store logs. This nfs mount is also used to touch a file + at the end of the test to indiciate if the test was successful + or not. + """ + export_dir = "/export/{client}".format(client=client) + log.info("Creating the nfs export directory...") + remote.run(args=[ + 'sudo', 'mkdir', '-p', export_dir, + ]) + log.info("Mounting the test directory...") + remote.run(args=[ + 'sudo', 'mount', '--bind', mount_dir, export_dir, + ]) + log.info("Adding mount to /etc/exports...") + export = "{dir} *(rw,no_root_squash,no_subtree_check,insecure)".format( + dir=export_dir + ) + remote.run(args=[ + 'echo', export, run.Raw("|"), + 'sudo', 'tee', '-a', "/etc/exports", + ]) + log.info("Restarting NFS...") + if remote.os.package_type == "deb": + remote.run(args=['sudo', 'service', 'nfs-kernel-server', 'restart']) + else: + remote.run(args=['sudo', 'systemctl', 'restart', 'nfs']) + + +def _teardown_nfs_mount(remote, client): + """ + Tears down the nfs mount on the remote used for logging and reporting the + status of the tests being ran in the guest. + """ + log.info("Tearing down the nfs mount for {remote}".format(remote=remote)) + export_dir = "/export/{client}".format(client=client) + log.info("Stopping NFS...") + if remote.os.package_type == "deb": + remote.run(args=[ + 'sudo', 'service', 'nfs-kernel-server', 'stop' + ]) + else: + remote.run(args=[ + 'sudo', 'systemctl', 'stop', 'nfs' + ]) + log.info("Unmounting exported directory...") + remote.run(args=[ + 'sudo', 'umount', export_dir + ]) + log.info("Deleting exported directory...") + remote.run(args=[ + 'sudo', 'rm', '-r', '/export' + ]) + log.info("Deleting export from /etc/exports...") + remote.run(args=[ + 'sudo', 'sed', '-i', '$ d', '/etc/exports' + ]) + log.info("Starting NFS...") + if remote.os.package_type == "deb": + remote.run(args=[ + 'sudo', 'service', 'nfs-kernel-server', 'start' + ]) + else: + remote.run(args=[ + 'sudo', 'systemctl', 'start', 'nfs' + ]) + + +@contextlib.contextmanager +def run_qemu(ctx, config): + """Setup kvm environment and start qemu""" + procs = [] + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.iteritems(): + (remote,) = ctx.cluster.only(client).remotes.keys() + log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client) + remote.run( + args=[ + 'mkdir', log_dir, run.Raw('&&'), + 'sudo', 'modprobe', 'kvm', + ] + ) + + # make an nfs mount to use for logging and to + # allow to test to tell teuthology the tests outcome + _setup_nfs_mount(remote, client, log_dir) + + base_file = '{tdir}/qemu/base.{client}.qcow2'.format( + tdir=testdir, + client=client + ) + qemu_cmd = 'qemu-system-x86_64' + if remote.os.package_type == "rpm": + qemu_cmd = "/usr/libexec/qemu-kvm" + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'daemon-helper', + 'term', + qemu_cmd, '-enable-kvm', '-nographic', + '-m', str(client_config.get('memory', DEFAULT_MEM)), + # base OS device + '-drive', + 'file={base},format=qcow2,if=virtio'.format(base=base_file), + # cd holding metadata for cloud-init + '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), + ] + + cachemode = 'none' + ceph_config = ctx.ceph.conf.get('global', {}) + ceph_config.update(ctx.ceph.conf.get('client', {})) + ceph_config.update(ctx.ceph.conf.get(client, {})) + if ceph_config.get('rbd cache'): + if ceph_config.get('rbd cache max dirty', 1) > 0: + cachemode = 'writeback' + else: + cachemode = 'writethrough' + + for i in xrange(client_config.get('num_rbd', DEFAULT_NUM_RBD)): + args.extend([ + '-drive', + 'file=rbd:rbd/{img}:id={id},format=raw,if=virtio,cache={cachemode}'.format( + img='{client}.{num}'.format(client=client, num=i), + id=client[len('client.'):], + cachemode=cachemode, + ), + ]) + + log.info('starting qemu...') + procs.append( + remote.run( + args=args, + logger=log.getChild(client), + stdin=run.PIPE, + wait=False, + ) + ) + + try: + yield + finally: + log.info('waiting for qemu tests to finish...') + run.wait(procs) + + log.debug('checking that qemu tests succeeded...') + for client in config.iterkeys(): + (remote,) = ctx.cluster.only(client).remotes.keys() + # teardown nfs mount + _teardown_nfs_mount(remote, client) + # check for test status + remote.run( + args=[ + 'test', '-f', + '{tdir}/archive/qemu/{client}/success'.format( + tdir=testdir, + client=client + ), + ], + ) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run a test inside of QEMU on top of rbd. Only one test + is supported per client. + + For example, you can specify which clients to run on:: + + tasks: + - ceph: + - qemu: + client.0: + test: http://ceph.com/qa/test.sh + client.1: + test: http://ceph.com/qa/test2.sh + + Or use the same settings on all clients: + + tasks: + - ceph: + - qemu: + all: + test: http://ceph.com/qa/test.sh + + For tests that don't need a filesystem, set type to block:: + + tasks: + - ceph: + - qemu: + client.0: + test: http://ceph.com/qa/test.sh + type: block + + The test should be configured to run on /dev/vdb and later + devices. + + If you want to run a test that uses more than one rbd image, + specify how many images to use:: + + tasks: + - ceph: + - qemu: + client.0: + test: http://ceph.com/qa/test.sh + type: block + num_rbd: 2 + + You can set the amount of memory the VM has (default is 1024 MB):: + + tasks: + - ceph: + - qemu: + client.0: + test: http://ceph.com/qa/test.sh + memory: 512 # megabytes + """ + assert isinstance(config, dict), \ + "task qemu only supports a dictionary for configuration" + + config = teuthology.replace_all_with_clients(ctx.cluster, config) + + managers = [] + for client, client_config in config.iteritems(): + num_rbd = client_config.get('num_rbd', 1) + assert num_rbd > 0, 'at least one rbd device must be used' + for i in xrange(num_rbd): + create_config = { + client: { + 'image_name': + '{client}.{num}'.format(client=client, num=i), + } + } + managers.append( + lambda create_config=create_config: + rbd.create_image(ctx=ctx, config=create_config) + ) + + managers.extend([ + lambda: create_dirs(ctx=ctx, config=config), + lambda: generate_iso(ctx=ctx, config=config), + lambda: download_image(ctx=ctx, config=config), + lambda: run_qemu(ctx=ctx, config=config), + ]) + + with contextutil.nested(*managers): + yield diff --git a/qa/tasks/rados.py b/qa/tasks/rados.py new file mode 100644 index 00000000000..3d44fdff1c3 --- /dev/null +++ b/qa/tasks/rados.py @@ -0,0 +1,160 @@ +""" +Rados modle-based integration tests +""" +import contextlib +import logging +import gevent +from teuthology import misc as teuthology + +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run RadosModel-based integration tests. + + The config should be as follows:: + + rados: + clients: [client list] + ops: + objects: + max_in_flight: + object_size: + min_stride_size: + max_stride_size: + op_weights: + runs: - the pool is remade between runs + ec_pool: use an ec pool + + For example:: + + tasks: + - ceph: + - rados: + clients: [client.0] + ops: 1000 + max_seconds: 0 # 0 for no limit + objects: 25 + max_in_flight: 16 + object_size: 4000000 + min_stride_size: 1024 + max_stride_size: 4096 + op_weights: + read: 20 + write: 10 + delete: 2 + snap_create: 3 + rollback: 2 + snap_remove: 0 + ec_pool: true + runs: 10 + - interactive: + + Optionally, you can provide the pool name to run against: + + tasks: + - ceph: + - exec: + client.0: + - ceph osd pool create foo + - rados: + clients: [client.0] + pools: [foo] + ... + + Alternatively, you can provide a pool prefix: + + tasks: + - ceph: + - exec: + client.0: + - ceph osd pool create foo.client.0 + - rados: + clients: [client.0] + pool_prefix: foo + ... + + """ + log.info('Beginning rados...') + assert isinstance(config, dict), \ + "please list clients to run on" + + object_size = int(config.get('object_size', 4000000)) + op_weights = config.get('op_weights', {}) + testdir = teuthology.get_testdir(ctx) + args = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph_test_rados'] + if config.get('ec_pool', False): + args.extend(['--ec-pool']) + args.extend([ + '--op', 'read', str(op_weights.get('read', 100)), + '--op', 'write', str(op_weights.get('write', 100)), + '--op', 'delete', str(op_weights.get('delete', 10)), + '--max-ops', str(config.get('ops', 10000)), + '--objects', str(config.get('objects', 500)), + '--max-in-flight', str(config.get('max_in_flight', 16)), + '--size', str(object_size), + '--min-stride-size', str(config.get('min_stride_size', object_size / 10)), + '--max-stride-size', str(config.get('max_stride_size', object_size / 5)), + '--max-seconds', str(config.get('max_seconds', 0)) + ]) + for field in [ + 'copy_from', 'is_dirty', 'undirty', 'cache_flush', + 'cache_try_flush', 'cache_evict', + 'snap_create', 'snap_remove', 'rollback', 'setattr', 'rmattr', + 'watch', 'append', + ]: + if field in op_weights: + args.extend([ + '--op', field, str(op_weights[field]), + ]) + + def thread(): + """Thread spawned by gevent""" + clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + log.info('clients are %s' % clients) + for i in range(int(config.get('runs', '1'))): + log.info("starting run %s out of %s", str(i), config.get('runs', '1')) + tests = {} + existing_pools = config.get('pools', []) + created_pools = [] + for role in config.get('clients', clients): + assert isinstance(role, basestring) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + + pool = config.get('pool', None) + if not pool and existing_pools: + pool = existing_pools.pop() + else: + pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False)) + created_pools.append(pool) + + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + proc = remote.run( + args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args + + ["--pool", pool], + logger=log.getChild("rados.{id}".format(id=id_)), + stdin=run.PIPE, + wait=False + ) + tests[id_] = proc + run.wait(tests.itervalues()) + + for pool in created_pools: + ctx.manager.remove_pool(pool) + + running = gevent.spawn(thread) + + try: + yield + finally: + log.info('joining rados') + running.get() diff --git a/qa/tasks/radosbench.py b/qa/tasks/radosbench.py new file mode 100644 index 00000000000..1c5bd5486ab --- /dev/null +++ b/qa/tasks/radosbench.py @@ -0,0 +1,87 @@ +""" +Rados benchmarking +""" +import contextlib +import logging + +from teuthology.orchestra import run +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run radosbench + + The config should be as follows: + + radosbench: + clients: [client list] + time: + pool: + size: write size to use + unique_pool: use a unique pool, defaults to False + ec_pool: create ec pool, defaults to False + create_pool: create pool, defaults to False + + example: + + tasks: + - ceph: + - radosbench: + clients: [client.0] + time: 360 + - interactive: + """ + log.info('Beginning radosbench...') + assert isinstance(config, dict), \ + "please list clients to run on" + radosbench = {} + + testdir = teuthology.get_testdir(ctx) + + for role in config.get('clients', ['client.0']): + assert isinstance(role, basestring) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + + pool = 'data' + if config.get('create_pool', True): + if config.get('pool'): + pool = config.get('pool') + if pool != 'data': + ctx.manager.create_pool(pool, ec_pool=config.get('ec_pool', False)) + else: + pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False)) + + proc = remote.run( + args=[ + "/bin/sh", "-c", + " ".join(['adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage', + 'rados', + '--name', role, + '-b', str(config.get('size', 4<<20)), + '-p' , pool, + 'bench', str(config.get('time', 360)), 'write', + ]).format(tdir=testdir), + ], + logger=log.getChild('radosbench.{id}'.format(id=id_)), + stdin=run.PIPE, + wait=False + ) + radosbench[id_] = proc + + try: + yield + finally: + timeout = config.get('time', 360) * 5 + log.info('joining radosbench (timing out after %ss)', timeout) + run.wait(radosbench.itervalues(), timeout=timeout) + + if pool is not 'data': + ctx.manager.remove_pool(pool) diff --git a/qa/tasks/radosgw_admin.py b/qa/tasks/radosgw_admin.py new file mode 100644 index 00000000000..453f4f561de --- /dev/null +++ b/qa/tasks/radosgw_admin.py @@ -0,0 +1,991 @@ +""" +Rgw admin testing against a running instance +""" +# The test cases in this file have been annotated for inventory. +# To extract the inventory (in csv format) use the command: +# +# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //' +# + +import copy +import json +import logging +import time + +from cStringIO import StringIO + +import boto.exception +import boto.s3.connection +import boto.s3.acl + +import util.rgw as rgw_utils + +from teuthology import misc as teuthology +from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops + +log = logging.getLogger(__name__) + +def get_acl(key): + """ + Helper function to get the xml acl from a key, ensuring that the xml + version tag is removed from the acl response + """ + raw_acl = key.get_xml_acl() + + def remove_version(string): + return string.split( + '' + )[-1] + + def remove_newlines(string): + return string.strip('\n') + + return remove_version( + remove_newlines(raw_acl) + ) + + +def task(ctx, config): + """ + Test radosgw-admin functionality against a running rgw instance. + """ + global log + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task s3tests only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + multi_region_run = rgw_utils.multi_region_enabled(ctx) + + client = clients[0]; # default choice, multi-region code may overwrite this + if multi_region_run: + client = rgw_utils.get_master_client(ctx, clients) + + # once the client is chosen, pull the host name and assigned port out of + # the role_endpoints that were assigned by the rgw task + (remote_host, remote_port) = ctx.rgw.role_endpoints[client] + + ## + user1='foo' + user2='fud' + subuser1='foo:foo1' + subuser2='foo:foo2' + display_name1='Foo' + display_name2='Fud' + email='foo@foo.com' + email2='bar@bar.com' + access_key='9te6NH5mcdcq0Tc5i8i1' + secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu' + access_key2='p5YnriCv1nAtykxBrupQ' + secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh' + swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL' + swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy' + + bucket_name='myfoo' + bucket_name2='mybar' + + # connect to rgw + connection = boto.s3.connection.S3Connection( + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + is_secure=False, + port=remote_port, + host=remote_host, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + connection2 = boto.s3.connection.S3Connection( + aws_access_key_id=access_key2, + aws_secret_access_key=secret_key2, + is_secure=False, + port=remote_port, + host=remote_host, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + + # legend (test cases can be easily grep-ed out) + # TESTCASE 'testname','object','method','operation','assertion' + # TESTCASE 'info-nosuch','user','info','non-existent user','fails' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1]) + assert err + + # TESTCASE 'create-ok','user','create','w/all valid info','succeeds' + (err, out) = rgwadmin(ctx, client, [ + 'user', 'create', + '--uid', user1, + '--display-name', display_name1, + '--email', email, + '--access-key', access_key, + '--secret', secret_key, + '--max-buckets', '4' + ], + check_status=True) + + # TESTCASE 'duplicate email','user','create','existing user email','fails' + (err, out) = rgwadmin(ctx, client, [ + 'user', 'create', + '--uid', user2, + '--display-name', display_name2, + '--email', email, + ]) + assert err + + # TESTCASE 'info-existing','user','info','existing user','returns correct info' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) + assert out['user_id'] == user1 + assert out['email'] == email + assert out['display_name'] == display_name1 + assert len(out['keys']) == 1 + assert out['keys'][0]['access_key'] == access_key + assert out['keys'][0]['secret_key'] == secret_key + assert not out['suspended'] + + # this whole block should only be run if regions have been configured + if multi_region_run: + rgw_utils.radosgw_agent_sync_all(ctx) + # post-sync, validate that user1 exists on the sync destination host + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + dest_client = c_config['dest'] + (err, out) = rgwadmin(ctx, dest_client, ['metadata', 'list', 'user']) + (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True) + assert out['user_id'] == user1 + assert out['email'] == email + assert out['display_name'] == display_name1 + assert len(out['keys']) == 1 + assert out['keys'][0]['access_key'] == access_key + assert out['keys'][0]['secret_key'] == secret_key + assert not out['suspended'] + + # compare the metadata between different regions, make sure it matches + log.debug('compare the metadata between different regions, make sure it matches') + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + source_client = c_config['src'] + dest_client = c_config['dest'] + (err1, out1) = rgwadmin(ctx, source_client, + ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True) + (err2, out2) = rgwadmin(ctx, dest_client, + ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True) + assert out1 == out2 + + # suspend a user on the master, then check the status on the destination + log.debug('suspend a user on the master, then check the status on the destination') + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + source_client = c_config['src'] + dest_client = c_config['dest'] + (err, out) = rgwadmin(ctx, source_client, ['user', 'suspend', '--uid', user1]) + rgw_utils.radosgw_agent_sync_all(ctx) + (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True) + assert out['suspended'] + + # delete a user on the master, then check that it's gone on the destination + log.debug('delete a user on the master, then check that it\'s gone on the destination') + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + source_client = c_config['src'] + dest_client = c_config['dest'] + (err, out) = rgwadmin(ctx, source_client, ['user', 'rm', '--uid', user1], check_status=True) + rgw_utils.radosgw_agent_sync_all(ctx) + (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user1]) + assert out is None + (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1]) + assert out is None + + # then recreate it so later tests pass + (err, out) = rgwadmin(ctx, client, [ + 'user', 'create', + '--uid', user1, + '--display-name', display_name1, + '--email', email, + '--access-key', access_key, + '--secret', secret_key, + '--max-buckets', '4' + ], + check_status=True) + + # now do the multi-region bucket tests + log.debug('now do the multi-region bucket tests') + + # Create a second user for the following tests + log.debug('Create a second user for the following tests') + (err, out) = rgwadmin(ctx, client, [ + 'user', 'create', + '--uid', user2, + '--display-name', display_name2, + '--email', email2, + '--access-key', access_key2, + '--secret', secret_key2, + '--max-buckets', '4' + ], + check_status=True) + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user2], check_status=True) + assert out is not None + + # create a bucket and do a sync + log.debug('create a bucket and do a sync') + bucket = connection.create_bucket(bucket_name2) + rgw_utils.radosgw_agent_sync_all(ctx) + + # compare the metadata for the bucket between different regions, make sure it matches + log.debug('compare the metadata for the bucket between different regions, make sure it matches') + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + source_client = c_config['src'] + dest_client = c_config['dest'] + (err1, out1) = rgwadmin(ctx, source_client, + ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + check_status=True) + (err2, out2) = rgwadmin(ctx, dest_client, + ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + check_status=True) + assert out1 == out2 + + # get the bucket.instance info and compare that + src_bucket_id = out1['data']['bucket']['bucket_id'] + dest_bucket_id = out2['data']['bucket']['bucket_id'] + (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get', + 'bucket.instance:{bucket_name}:{bucket_instance}'.format( + bucket_name=bucket_name2,bucket_instance=src_bucket_id)], + check_status=True) + (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get', + 'bucket.instance:{bucket_name}:{bucket_instance}'.format( + bucket_name=bucket_name2,bucket_instance=dest_bucket_id)], + check_status=True) + del out1['data']['bucket_info']['bucket']['pool'] + del out1['data']['bucket_info']['bucket']['index_pool'] + del out2['data']['bucket_info']['bucket']['pool'] + del out2['data']['bucket_info']['bucket']['index_pool'] + assert out1 == out2 + + same_region = 0 + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + source_client = c_config['src'] + dest_client = c_config['dest'] + + source_region = rgw_utils.region_for_client(ctx, source_client) + dest_region = rgw_utils.region_for_client(ctx, dest_client) + + # 301 is only returned for requests to something in a different region + if source_region == dest_region: + log.debug('301 is only returned for requests to something in a different region') + same_region += 1 + continue + + # Attempt to create a new connection with user1 to the destination RGW + log.debug('Attempt to create a new connection with user1 to the destination RGW') + # and use that to attempt a delete (that should fail) + exception_encountered = False + try: + (dest_remote_host, dest_remote_port) = ctx.rgw.role_endpoints[dest_client] + connection_dest = boto.s3.connection.S3Connection( + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + is_secure=False, + port=dest_remote_port, + host=dest_remote_host, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + + # this should fail + connection_dest.delete_bucket(bucket_name2) + except boto.exception.S3ResponseError as e: + assert e.status == 301 + exception_encountered = True + + # confirm that the expected exception was seen + assert exception_encountered + + # now delete the bucket on the source RGW and do another sync + log.debug('now delete the bucket on the source RGW and do another sync') + bucket.delete() + rgw_utils.radosgw_agent_sync_all(ctx) + + if same_region == len(ctx.radosgw_agent.config): + bucket.delete() + rgw_utils.radosgw_agent_sync_all(ctx) + + # make sure that the bucket no longer exists in either region + log.debug('make sure that the bucket no longer exists in either region') + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + source_client = c_config['src'] + dest_client = c_config['dest'] + (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get', + 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)]) + (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get', + 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)]) + # Both of the previous calls should have errors due to requesting + # metadata for non-existent buckets + assert err1 + assert err2 + + # create a bucket and then sync it + log.debug('create a bucket and then sync it') + bucket = connection.create_bucket(bucket_name2) + rgw_utils.radosgw_agent_sync_all(ctx) + + # compare the metadata for the bucket between different regions, make sure it matches + log.debug('compare the metadata for the bucket between different regions, make sure it matches') + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + source_client = c_config['src'] + dest_client = c_config['dest'] + (err1, out1) = rgwadmin(ctx, source_client, + ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + check_status=True) + (err2, out2) = rgwadmin(ctx, dest_client, + ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + check_status=True) + assert out1 == out2 + + # Now delete the bucket and recreate it with a different user + log.debug('Now delete the bucket and recreate it with a different user') + # within the same window of time and then sync. + bucket.delete() + bucket = connection2.create_bucket(bucket_name2) + rgw_utils.radosgw_agent_sync_all(ctx) + + # compare the metadata for the bucket between different regions, make sure it matches + log.debug('compare the metadata for the bucket between different regions, make sure it matches') + # user2 should own the bucket in both regions + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + source_client = c_config['src'] + dest_client = c_config['dest'] + (err1, out1) = rgwadmin(ctx, source_client, + ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + check_status=True) + (err2, out2) = rgwadmin(ctx, dest_client, + ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + check_status=True) + assert out1 == out2 + assert out1['data']['owner'] == user2 + assert out1['data']['owner'] != user1 + + # now we're going to use this bucket to test meta-data update propagation + log.debug('now we\'re going to use this bucket to test meta-data update propagation') + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + source_client = c_config['src'] + dest_client = c_config['dest'] + + # get the metadata so we can tweak it + log.debug('get the metadata so we can tweak it') + (err, orig_data) = rgwadmin(ctx, source_client, + ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + check_status=True) + + # manually edit mtime for this bucket to be 300 seconds in the past + log.debug('manually edit mtime for this bucket to be 300 seconds in the past') + new_data = copy.deepcopy(orig_data) + new_data['mtime'] = orig_data['mtime'] - 300 + assert new_data != orig_data + (err, out) = rgwadmin(ctx, source_client, + ['metadata', 'put', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + stdin=StringIO(json.dumps(new_data)), + check_status=True) + + # get the metadata and make sure that the 'put' worked + log.debug('get the metadata and make sure that the \'put\' worked') + (err, out) = rgwadmin(ctx, source_client, + ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + check_status=True) + assert out == new_data + + # sync to propagate the new metadata + log.debug('sync to propagate the new metadata') + rgw_utils.radosgw_agent_sync_all(ctx) + + # get the metadata from the dest and compare it to what we just set + log.debug('get the metadata from the dest and compare it to what we just set') + # and what the source region has. + (err1, out1) = rgwadmin(ctx, source_client, + ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + check_status=True) + (err2, out2) = rgwadmin(ctx, dest_client, + ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], + check_status=True) + # yeah for the transitive property + assert out1 == out2 + assert out1 == new_data + + # now we delete the bucket + log.debug('now we delete the bucket') + bucket.delete() + + log.debug('sync to propagate the deleted bucket') + rgw_utils.radosgw_agent_sync_all(ctx) + + # Delete user2 as later tests do not expect it to exist. + # Verify that it is gone on both regions + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + source_client = c_config['src'] + dest_client = c_config['dest'] + (err, out) = rgwadmin(ctx, source_client, + ['user', 'rm', '--uid', user2], check_status=True) + rgw_utils.radosgw_agent_sync_all(ctx) + # The two 'user info' calls should fail and not return any data + # since we just deleted this user. + (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user2]) + assert out is None + (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user2]) + assert out is None + + # Test data sync + + # First create a bucket for data sync test purpose + bucket = connection.create_bucket(bucket_name + 'data') + + # Create a tiny file and check if in sync + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + if c_config.get('metadata-only'): + continue + + source_client = c_config['src'] + dest_client = c_config['dest'] + k = boto.s3.key.Key(bucket) + k.key = 'tiny_file' + k.set_contents_from_string("123456789") + time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client)) + rgw_utils.radosgw_agent_sync_all(ctx, data=True) + (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client] + dest_connection = boto.s3.connection.S3Connection( + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + is_secure=False, + port=dest_port, + host=dest_host, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + dest_k = dest_connection.get_bucket(bucket_name + 'data').get_key('tiny_file') + assert k.get_contents_as_string() == dest_k.get_contents_as_string() + + # check that deleting it removes it from the dest zone + k.delete() + time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client)) + rgw_utils.radosgw_agent_sync_all(ctx, data=True) + + dest_bucket = dest_connection.get_bucket(bucket_name + 'data') + dest_k = dest_bucket.get_key('tiny_file') + assert dest_k == None, 'object not deleted from destination zone' + + # finally we delete the bucket + bucket.delete() + + bucket = connection.create_bucket(bucket_name + 'data2') + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + if c_config.get('metadata-only'): + continue + + source_client = c_config['src'] + dest_client = c_config['dest'] + (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client] + dest_connection = boto.s3.connection.S3Connection( + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + is_secure=False, + port=dest_port, + host=dest_host, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + for i in range(20): + k = boto.s3.key.Key(bucket) + k.key = 'tiny_file_' + str(i) + k.set_contents_from_string(str(i) * 100) + + time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client)) + rgw_utils.radosgw_agent_sync_all(ctx, data=True) + + for i in range(20): + dest_k = dest_connection.get_bucket(bucket_name + 'data2').get_key('tiny_file_' + str(i)) + assert (str(i) * 100) == dest_k.get_contents_as_string() + k = boto.s3.key.Key(bucket) + k.key = 'tiny_file_' + str(i) + k.delete() + + # check that deleting removes the objects from the dest zone + time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client)) + rgw_utils.radosgw_agent_sync_all(ctx, data=True) + + for i in range(20): + dest_bucket = dest_connection.get_bucket(bucket_name + 'data2') + dest_k = dest_bucket.get_key('tiny_file_' + str(i)) + assert dest_k == None, 'object %d not deleted from destination zone' % i + bucket.delete() + + # end of 'if multi_region_run:' + + # TESTCASE 'suspend-ok','user','suspend','active user','succeeds' + (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1], + check_status=True) + + # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) + assert out['suspended'] + + # TESTCASE 're-enable','user','enable','suspended user','succeeds' + (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], check_status=True) + + # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) + assert not out['suspended'] + + # TESTCASE 'add-keys','key','create','w/valid info','succeeds' + (err, out) = rgwadmin(ctx, client, [ + 'key', 'create', '--uid', user1, + '--access-key', access_key2, '--secret', secret_key2, + ], check_status=True) + + # TESTCASE 'info-new-key','user','info','after key addition','returns all keys' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], + check_status=True) + assert len(out['keys']) == 2 + assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2 + assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2 + + # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed' + (err, out) = rgwadmin(ctx, client, [ + 'key', 'rm', '--uid', user1, + '--access-key', access_key2, + ], check_status=True) + assert len(out['keys']) == 1 + assert out['keys'][0]['access_key'] == access_key + assert out['keys'][0]['secret_key'] == secret_key + + # TESTCASE 'add-swift-key','key','create','swift key','succeeds' + subuser_access = 'full' + subuser_perm = 'full-control' + + (err, out) = rgwadmin(ctx, client, [ + 'subuser', 'create', '--subuser', subuser1, + '--access', subuser_access + ], check_status=True) + + # TESTCASE 'add-swift-key','key','create','swift key','succeeds' + (err, out) = rgwadmin(ctx, client, [ + 'subuser', 'modify', '--subuser', subuser1, + '--secret', swift_secret1, + '--key-type', 'swift', + ], check_status=True) + + # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1]) + + assert out['subusers'][0]['permissions'] == subuser_perm + + # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) + assert len(out['swift_keys']) == 1 + assert out['swift_keys'][0]['user'] == subuser1 + assert out['swift_keys'][0]['secret_key'] == swift_secret1 + + # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds' + (err, out) = rgwadmin(ctx, client, [ + 'subuser', 'create', '--subuser', subuser2, + '--secret', swift_secret2, + '--key-type', 'swift', + ], check_status=True) + + # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) + assert len(out['swift_keys']) == 2 + assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2 + assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2 + + # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed' + (err, out) = rgwadmin(ctx, client, [ + 'key', 'rm', '--subuser', subuser1, + '--key-type', 'swift', + ], check_status=True) + assert len(out['swift_keys']) == 1 + + # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed' + (err, out) = rgwadmin(ctx, client, [ + 'subuser', 'rm', '--subuser', subuser1, + ], check_status=True) + assert len(out['subusers']) == 1 + + # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed' + (err, out) = rgwadmin(ctx, client, [ + 'subuser', 'rm', '--subuser', subuser2, + '--key-type', 'swift', '--purge-keys', + ], check_status=True) + assert len(out['swift_keys']) == 0 + assert len(out['subusers']) == 0 + + # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list' + (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], + check_status=True) + assert len(out) == 0 + + if multi_region_run: + rgw_utils.radosgw_agent_sync_all(ctx) + + # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list' + (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True) + assert len(out) == 0 + + # create a first bucket + bucket = connection.create_bucket(bucket_name) + + # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list' + (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True) + assert len(out) == 1 + assert out[0] == bucket_name + + # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list' + (err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True) + assert len(out) >= 1 + assert bucket_name in out; + + # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4' + bucket2 = connection.create_bucket(bucket_name + '2') + bucket3 = connection.create_bucket(bucket_name + '3') + bucket4 = connection.create_bucket(bucket_name + '4') + # the 5th should fail. + failed = False + try: + connection.create_bucket(bucket_name + '5') + except Exception: + failed = True + assert failed + + # delete the buckets + bucket2.delete() + bucket3.delete() + bucket4.delete() + + # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list' + (err, out) = rgwadmin(ctx, client, [ + 'bucket', 'stats', '--bucket', bucket_name], check_status=True) + assert out['owner'] == user1 + bucket_id = out['id'] + + # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID' + (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], check_status=True) + assert len(out) == 1 + assert out[0]['id'] == bucket_id # does it return the same ID twice in a row? + + # use some space + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('one') + + # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object' + (err, out) = rgwadmin(ctx, client, [ + 'bucket', 'stats', '--bucket', bucket_name], check_status=True) + assert out['id'] == bucket_id + assert out['usage']['rgw.main']['num_objects'] == 1 + assert out['usage']['rgw.main']['size_kb'] > 0 + + # reclaim it + key.delete() + + # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error' + (err, out) = rgwadmin(ctx, client, + ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name], + check_status=True) + + # create a second user to link the bucket to + (err, out) = rgwadmin(ctx, client, [ + 'user', 'create', + '--uid', user2, + '--display-name', display_name2, + '--access-key', access_key2, + '--secret', secret_key2, + '--max-buckets', '1', + ], + check_status=True) + + # try creating an object with the first user before the bucket is relinked + denied = False + key = boto.s3.key.Key(bucket) + + try: + key.set_contents_from_string('two') + except boto.exception.S3ResponseError: + denied = True + + assert not denied + + # delete the object + key.delete() + + # link the bucket to another user + (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name], + check_status=True) + + # try to remove user, should fail (has a linked bucket) + (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2]) + assert err + + # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked' + (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name], + check_status=True) + + # relink the bucket to the first user and delete the second user + (err, out) = rgwadmin(ctx, client, + ['bucket', 'link', '--uid', user1, '--bucket', bucket_name], + check_status=True) + + (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2], + check_status=True) + + # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed' + + # upload an object + object_name = 'four' + key = boto.s3.key.Key(bucket, object_name) + key.set_contents_from_string(object_name) + + # now delete it + (err, out) = rgwadmin(ctx, client, + ['object', 'rm', '--bucket', bucket_name, '--object', object_name], + check_status=True) + + # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects' + (err, out) = rgwadmin(ctx, client, [ + 'bucket', 'stats', '--bucket', bucket_name], + check_status=True) + assert out['id'] == bucket_id + assert out['usage']['rgw.main']['num_objects'] == 0 + + # list log objects + # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects' + (err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True) + assert len(out) > 0 + + for obj in out: + # TESTCASE 'log-show','log','show','after activity','returns expected info' + if obj[:4] == 'meta' or obj[:4] == 'data': + continue + + (err, rgwlog) = rgwadmin(ctx, client, ['log', 'show', '--object', obj], + check_status=True) + assert len(rgwlog) > 0 + + # exempt bucket_name2 from checking as it was only used for multi-region tests + assert rgwlog['bucket'].find(bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0 + assert rgwlog['bucket'] != bucket_name or rgwlog['bucket_id'] == bucket_id + assert rgwlog['bucket_owner'] == user1 or rgwlog['bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2 + for entry in rgwlog['log_entries']: + log.debug('checking log entry: ', entry) + assert entry['bucket'] == rgwlog['bucket'] + possible_buckets = [bucket_name + '5', bucket_name2] + user = entry['user'] + assert user == user1 or user.endswith('system-user') or \ + rgwlog['bucket'] in possible_buckets + + # TESTCASE 'log-rm','log','rm','delete log objects','succeeds' + (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj], + check_status=True) + + # TODO: show log by bucket+date + + # need to wait for all usage data to get flushed, should take up to 30 seconds + timestamp = time.time() + while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes + (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj']) # last operation we did is delete obj, wait for it to flush + if get_user_successful_ops(out, user1) > 0: + break + time.sleep(1) + + assert time.time() - timestamp <= (20 * 60) + + # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds' + (err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True) + assert len(out['entries']) > 0 + assert len(out['summary']) > 0 + + user_summary = get_user_summary(out, user1) + + total = user_summary['total'] + assert total['successful_ops'] > 0 + + # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds' + (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1], + check_status=True) + assert len(out['entries']) > 0 + assert len(out['summary']) > 0 + user_summary = out['summary'][0] + for entry in user_summary['categories']: + assert entry['successful_ops'] > 0 + assert user_summary['user'] == user1 + + # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds' + test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket'] + for cat in test_categories: + (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat], + check_status=True) + assert len(out['summary']) > 0 + user_summary = out['summary'][0] + assert user_summary['user'] == user1 + assert len(user_summary['categories']) == 1 + entry = user_summary['categories'][0] + assert entry['category'] == cat + assert entry['successful_ops'] > 0 + + # the usage flush interval is 30 seconds, wait that much an then some + # to make sure everything has been flushed + time.sleep(35) + + # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed' + (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1], + check_status=True) + (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1], + check_status=True) + assert len(out['entries']) == 0 + assert len(out['summary']) == 0 + + # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds' + (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1], + check_status=True) + + # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects' + try: + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('five') + except boto.exception.S3ResponseError as e: + assert e.status == 403 + + # TESTCASE 'user-renable2','user','enable','suspended user','succeeds' + (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], + check_status=True) + + # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects' + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('six') + + # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection' + + # create an object large enough to be split into multiple parts + test_string = 'foo'*10000000 + + big_key = boto.s3.key.Key(bucket) + big_key.set_contents_from_string(test_string) + + # now delete the head + big_key.delete() + + # wait a bit to give the garbage collector time to cycle + time.sleep(15) + + (err, out) = rgwadmin(ctx, client, ['gc', 'list']) + + assert len(out) > 0 + + # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage' + (err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True) + + #confirm + (err, out) = rgwadmin(ctx, client, ['gc', 'list']) + + assert len(out) == 0 + + # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets' + (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1]) + assert err + + # delete should fail because ``key`` still exists + try: + bucket.delete() + except boto.exception.S3ResponseError as e: + assert e.status == 409 + + key.delete() + bucket.delete() + + # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy' + bucket = connection.create_bucket(bucket_name) + + # create an object + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('seven') + + # should be private already but guarantee it + key.set_acl('private') + + (err, out) = rgwadmin(ctx, client, + ['policy', '--bucket', bucket.name, '--object', key.key], + check_status=True) + + acl = get_acl(key) + + assert acl == out.strip('\n') + + # add another grantee by making the object public read + key.set_acl('public-read') + + (err, out) = rgwadmin(ctx, client, + ['policy', '--bucket', bucket.name, '--object', key.key], + check_status=True) + + acl = get_acl(key) + + assert acl == out.strip('\n') + + # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds' + bucket = connection.create_bucket(bucket_name) + key_name = ['eight', 'nine', 'ten', 'eleven'] + for i in range(4): + key = boto.s3.key.Key(bucket) + key.set_contents_from_string(key_name[i]) + + (err, out) = rgwadmin(ctx, client, + ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'], + check_status=True) + + # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds' + caps='user=read' + (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps]) + + assert out['caps'][0]['perm'] == 'read' + + # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds' + (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps]) + + assert not out['caps'] + + # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets' + bucket = connection.create_bucket(bucket_name) + key = boto.s3.key.Key(bucket) + + (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1]) + assert err + + # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds' + bucket = connection.create_bucket(bucket_name) + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('twelve') + + (err, out) = rgwadmin(ctx, client, + ['user', 'rm', '--uid', user1, '--purge-data' ], + check_status=True) + + # TESTCASE 'rm-user3','user','rm','deleted user','fails' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1]) + assert err + + # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule' + # + + (err, out) = rgwadmin(ctx, client, ['zone', 'get']) + orig_placement_pools = len(out['placement_pools']) + + # removed this test, it is not correct to assume that zone has default placement, it really + # depends on how we set it up before + # + # assert len(out) > 0 + # assert len(out['placement_pools']) == 1 + + # default_rule = out['placement_pools'][0] + # assert default_rule['key'] == 'default-placement' + + rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}} + + out['placement_pools'].append(rule) + + (err, out) = rgwadmin(ctx, client, ['zone', 'set'], + stdin=StringIO(json.dumps(out)), + check_status=True) + + (err, out) = rgwadmin(ctx, client, ['zone', 'get']) + assert len(out) > 0 + assert len(out['placement_pools']) == orig_placement_pools + 1 diff --git a/qa/tasks/radosgw_admin_rest.py b/qa/tasks/radosgw_admin_rest.py new file mode 100644 index 00000000000..7bd72d19536 --- /dev/null +++ b/qa/tasks/radosgw_admin_rest.py @@ -0,0 +1,668 @@ +""" +Run a series of rgw admin commands through the rest interface. + +The test cases in this file have been annotated for inventory. +To extract the inventory (in csv format) use the command: + + grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //' + +""" +from cStringIO import StringIO +import logging +import json + +import boto.exception +import boto.s3.connection +import boto.s3.acl + +import requests +import time + +from boto.connection import AWSAuthConnection +from teuthology import misc as teuthology +from util.rgw import get_user_summary, get_user_successful_ops + +log = logging.getLogger(__name__) + +def rgwadmin(ctx, client, cmd): + """ + Perform rgw admin command + + :param client: client + :param cmd: command to execute. + :return: command exit status, json result. + """ + log.info('radosgw-admin: %s' % cmd) + testdir = teuthology.get_testdir(ctx) + pre = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '--log-to-stderr', + '--format', 'json', + ] + pre.extend(cmd) + (remote,) = ctx.cluster.only(client).remotes.iterkeys() + proc = remote.run( + args=pre, + check_status=False, + stdout=StringIO(), + stderr=StringIO(), + ) + r = proc.exitstatus + out = proc.stdout.getvalue() + j = None + if not r and out != '': + try: + j = json.loads(out) + log.info(' json result: %s' % j) + except ValueError: + j = out + log.info(' raw result: %s' % j) + return (r, j) + + +def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False): + """ + perform a rest command + """ + log.info('radosgw-admin-rest: %s %s' % (cmd, params)) + put_cmds = ['create', 'link', 'add'] + post_cmds = ['unlink', 'modify'] + delete_cmds = ['trim', 'rm', 'process'] + get_cmds = ['check', 'info', 'show', 'list'] + + bucket_sub_resources = ['object', 'policy', 'index'] + user_sub_resources = ['subuser', 'key', 'caps'] + zone_sub_resources = ['pool', 'log', 'garbage'] + + def get_cmd_method_and_handler(cmd): + """ + Get the rest command and handler from information in cmd and + from the imported requests object. + """ + if cmd[1] in put_cmds: + return 'PUT', requests.put + elif cmd[1] in delete_cmds: + return 'DELETE', requests.delete + elif cmd[1] in post_cmds: + return 'POST', requests.post + elif cmd[1] in get_cmds: + return 'GET', requests.get + + def get_resource(cmd): + """ + Get the name of the resource from information in cmd. + """ + if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources: + if cmd[0] == 'bucket': + return 'bucket', '' + else: + return 'bucket', cmd[0] + elif cmd[0] == 'user' or cmd[0] in user_sub_resources: + if cmd[0] == 'user': + return 'user', '' + else: + return 'user', cmd[0] + elif cmd[0] == 'usage': + return 'usage', '' + elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources: + if cmd[0] == 'zone': + return 'zone', '' + else: + return 'zone', cmd[0] + + def build_admin_request(conn, method, resource = '', headers=None, data='', + query_args=None, params=None): + """ + Build an administative request adapted from the build_request() + method of boto.connection + """ + + path = conn.calling_format.build_path_base('admin', resource) + auth_path = conn.calling_format.build_auth_path('admin', resource) + host = conn.calling_format.build_host(conn.server_name(), 'admin') + if query_args: + path += '?' + query_args + boto.log.debug('path=%s' % path) + auth_path += '?' + query_args + boto.log.debug('auth_path=%s' % auth_path) + return AWSAuthConnection.build_base_http_request(conn, method, path, + auth_path, params, headers, data, host) + + method, handler = get_cmd_method_and_handler(cmd) + resource, query_args = get_resource(cmd) + request = build_admin_request(connection, method, resource, + query_args=query_args, headers=headers) + + url = '{protocol}://{host}{path}'.format(protocol=request.protocol, + host=request.host, path=request.path) + + request.authorize(connection=connection) + result = handler(url, params=params, headers=request.headers) + + if raw: + log.info(' text result: %s' % result.txt) + return result.status_code, result.txt + else: + log.info(' json result: %s' % result.json()) + return result.status_code, result.json() + + +def task(ctx, config): + """ + Test radosgw-admin functionality through the RESTful interface + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task s3tests only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + # just use the first client... + client = clients[0] + + ## + admin_user = 'ada' + admin_display_name = 'Ms. Admin User' + admin_access_key = 'MH1WC2XQ1S8UISFDZC8W' + admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG' + admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write' + + user1 = 'foo' + user2 = 'fud' + subuser1 = 'foo:foo1' + subuser2 = 'foo:foo2' + display_name1 = 'Foo' + display_name2 = 'Fud' + email = 'foo@foo.com' + access_key = '9te6NH5mcdcq0Tc5i8i1' + secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu' + access_key2 = 'p5YnriCv1nAtykxBrupQ' + secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh' + swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL' + swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy' + + bucket_name = 'myfoo' + + # legend (test cases can be easily grep-ed out) + # TESTCASE 'testname','object','method','operation','assertion' + # TESTCASE 'create-admin-user','user','create','administrative user','succeeds' + (err, out) = rgwadmin(ctx, client, [ + 'user', 'create', + '--uid', admin_user, + '--display-name', admin_display_name, + '--access-key', admin_access_key, + '--secret', admin_secret_key, + '--max-buckets', '0', + '--caps', admin_caps + ]) + logging.error(out) + logging.error(err) + assert not err + + (remote,) = ctx.cluster.only(client).remotes.iterkeys() + remote_host = remote.name.split('@')[1] + admin_conn = boto.s3.connection.S3Connection( + aws_access_key_id=admin_access_key, + aws_secret_access_key=admin_secret_key, + is_secure=False, + port=7280, + host=remote_host, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + + # TESTCASE 'info-nosuch','user','info','non-existent user','fails' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1}) + assert ret == 404 + + # TESTCASE 'create-ok','user','create','w/all valid info','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, + ['user', 'create'], + {'uid' : user1, + 'display-name' : display_name1, + 'email' : email, + 'access-key' : access_key, + 'secret-key' : secret_key, + 'max-buckets' : '4' + }) + + assert ret == 200 + + # TESTCASE 'info-existing','user','info','existing user','returns correct info' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + + assert out['user_id'] == user1 + assert out['email'] == email + assert out['display_name'] == display_name1 + assert len(out['keys']) == 1 + assert out['keys'][0]['access_key'] == access_key + assert out['keys'][0]['secret_key'] == secret_key + assert not out['suspended'] + + # TESTCASE 'suspend-ok','user','suspend','active user','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True}) + assert ret == 200 + + # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 200 + assert out['suspended'] + + # TESTCASE 're-enable','user','enable','suspended user','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'}) + assert not err + + # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 200 + assert not out['suspended'] + + # TESTCASE 'add-keys','key','create','w/valid info','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, + ['key', 'create'], + {'uid' : user1, + 'access-key' : access_key2, + 'secret-key' : secret_key2 + }) + + + assert ret == 200 + + # TESTCASE 'info-new-key','user','info','after key addition','returns all keys' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 200 + assert len(out['keys']) == 2 + assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2 + assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2 + + # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed' + (ret, out) = rgwadmin_rest(admin_conn, + ['key', 'rm'], + {'uid' : user1, + 'access-key' : access_key2 + }) + + assert ret == 200 + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + + assert len(out['keys']) == 1 + assert out['keys'][0]['access_key'] == access_key + assert out['keys'][0]['secret_key'] == secret_key + + # TESTCASE 'add-swift-key','key','create','swift key','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, + ['subuser', 'create'], + {'subuser' : subuser1, + 'secret-key' : swift_secret1, + 'key-type' : 'swift' + }) + + assert ret == 200 + + # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 200 + assert len(out['swift_keys']) == 1 + assert out['swift_keys'][0]['user'] == subuser1 + assert out['swift_keys'][0]['secret_key'] == swift_secret1 + + # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, + ['subuser', 'create'], + {'subuser' : subuser2, + 'secret-key' : swift_secret2, + 'key-type' : 'swift' + }) + + assert ret == 200 + + # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 200 + assert len(out['swift_keys']) == 2 + assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2 + assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2 + + # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed' + (ret, out) = rgwadmin_rest(admin_conn, + ['key', 'rm'], + {'subuser' : subuser1, + 'key-type' :'swift' + }) + + assert ret == 200 + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert len(out['swift_keys']) == 1 + + # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed' + (ret, out) = rgwadmin_rest(admin_conn, + ['subuser', 'rm'], + {'subuser' : subuser1 + }) + + assert ret == 200 + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert len(out['subusers']) == 1 + + # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed' + (ret, out) = rgwadmin_rest(admin_conn, + ['subuser', 'rm'], + {'subuser' : subuser2, + 'key-type' : 'swift', + '{purge-keys' :True + }) + + assert ret == 200 + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert len(out['swift_keys']) == 0 + assert len(out['subusers']) == 0 + + # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1}) + assert ret == 200 + assert len(out) == 0 + + # connect to rgw + connection = boto.s3.connection.S3Connection( + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + is_secure=False, + port=7280, + host=remote_host, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + + # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True}) + assert ret == 200 + assert len(out) == 0 + + # create a first bucket + bucket = connection.create_bucket(bucket_name) + + # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1}) + assert ret == 200 + assert len(out) == 1 + assert out[0] == bucket_name + + # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list' + (ret, out) = rgwadmin_rest(admin_conn, + ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True}) + + assert ret == 200 + assert out['owner'] == user1 + bucket_id = out['id'] + + # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True}) + assert ret == 200 + assert len(out) == 1 + assert out[0]['id'] == bucket_id # does it return the same ID twice in a row? + + # use some space + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('one') + + # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True}) + assert ret == 200 + assert out['id'] == bucket_id + assert out['usage']['rgw.main']['num_objects'] == 1 + assert out['usage']['rgw.main']['size_kb'] > 0 + + # reclaim it + key.delete() + + # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name}) + + assert ret == 200 + + # create a second user to link the bucket to + (ret, out) = rgwadmin_rest(admin_conn, + ['user', 'create'], + {'uid' : user2, + 'display-name' : display_name2, + 'access-key' : access_key2, + 'secret-key' : secret_key2, + 'max-buckets' : '1', + }) + + assert ret == 200 + + # try creating an object with the first user before the bucket is relinked + denied = False + key = boto.s3.key.Key(bucket) + + try: + key.set_contents_from_string('two') + except boto.exception.S3ResponseError: + denied = True + + assert not denied + + # delete the object + key.delete() + + # link the bucket to another user + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user2, 'bucket' : bucket_name}) + + assert ret == 200 + + # try creating an object with the first user which should cause an error + key = boto.s3.key.Key(bucket) + + try: + key.set_contents_from_string('three') + except boto.exception.S3ResponseError: + denied = True + + assert denied + + # relink the bucket to the first user and delete the second user + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user1, 'bucket' : bucket_name}) + assert ret == 200 + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2}) + assert ret == 200 + + # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed' + + # upload an object + object_name = 'four' + key = boto.s3.key.Key(bucket, object_name) + key.set_contents_from_string(object_name) + + # now delete it + (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name}) + assert ret == 200 + + # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True}) + assert ret == 200 + assert out['id'] == bucket_id + assert out['usage']['rgw.main']['num_objects'] == 0 + + # create a bucket for deletion stats + useless_bucket = connection.create_bucket('useless_bucket') + useless_key = useless_bucket.new_key('useless_key') + useless_key.set_contents_from_string('useless string') + + # delete it + useless_key.delete() + useless_bucket.delete() + + # wait for the statistics to flush + time.sleep(60) + + # need to wait for all usage data to get flushed, should take up to 30 seconds + timestamp = time.time() + while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'}) # last operation we did is delete obj, wait for it to flush + + if get_user_successful_ops(out, user1) > 0: + break + time.sleep(1) + + assert time.time() - timestamp <= (20 * 60) + + # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show']) + assert ret == 200 + assert len(out['entries']) > 0 + assert len(out['summary']) > 0 + user_summary = get_user_summary(out, user1) + total = user_summary['total'] + assert total['successful_ops'] > 0 + + # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1}) + assert ret == 200 + assert len(out['entries']) > 0 + assert len(out['summary']) > 0 + user_summary = out['summary'][0] + for entry in user_summary['categories']: + assert entry['successful_ops'] > 0 + assert user_summary['user'] == user1 + + # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds' + test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket'] + for cat in test_categories: + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat}) + assert ret == 200 + assert len(out['summary']) > 0 + user_summary = out['summary'][0] + assert user_summary['user'] == user1 + assert len(user_summary['categories']) == 1 + entry = user_summary['categories'][0] + assert entry['category'] == cat + assert entry['successful_ops'] > 0 + + # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed' + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1}) + assert ret == 200 + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1}) + assert ret == 200 + assert len(out['entries']) == 0 + assert len(out['summary']) == 0 + + # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True}) + assert ret == 200 + + # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects' + try: + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('five') + except boto.exception.S3ResponseError as e: + assert e.status == 403 + + # TESTCASE 'user-renable2','user','enable','suspended user','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'}) + assert ret == 200 + + # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects' + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('six') + + # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection' + + # create an object large enough to be split into multiple parts + test_string = 'foo'*10000000 + + big_key = boto.s3.key.Key(bucket) + big_key.set_contents_from_string(test_string) + + # now delete the head + big_key.delete() + + # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1}) + assert ret == 409 + + # delete should fail because ``key`` still exists + try: + bucket.delete() + except boto.exception.S3ResponseError as e: + assert e.status == 409 + + key.delete() + bucket.delete() + + # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy' + bucket = connection.create_bucket(bucket_name) + + # create an object + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('seven') + + # should be private already but guarantee it + key.set_acl('private') + + (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key}) + assert ret == 200 + + acl = key.get_xml_acl() + assert acl == out.strip('\n') + + # add another grantee by making the object public read + key.set_acl('public-read') + + (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key}) + assert ret == 200 + + acl = key.get_xml_acl() + assert acl == out.strip('\n') + + # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds' + bucket = connection.create_bucket(bucket_name) + key_name = ['eight', 'nine', 'ten', 'eleven'] + for i in range(4): + key = boto.s3.key.Key(bucket) + key.set_contents_from_string(key_name[i]) + + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True}) + assert ret == 200 + + # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds' + caps = 'usage=read' + (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' : user1, 'user-caps' : caps}) + assert ret == 200 + assert out[0]['perm'] == 'read' + + # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' : user1, 'user-caps' : caps}) + assert ret == 200 + assert not out + + # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets' + bucket = connection.create_bucket(bucket_name) + key = boto.s3.key.Key(bucket) + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1}) + assert ret == 409 + + # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds' + bucket = connection.create_bucket(bucket_name) + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('twelve') + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True}) + assert ret == 200 + + # TESTCASE 'rm-user3','user','info','deleted user','fails' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 404 + diff --git a/qa/tasks/radosgw_agent.py b/qa/tasks/radosgw_agent.py new file mode 100644 index 00000000000..0254805d2af --- /dev/null +++ b/qa/tasks/radosgw_agent.py @@ -0,0 +1,211 @@ +""" +Run rados gateway agent in test mode +""" +import contextlib +import logging +import argparse + +from teuthology.orchestra import run +from teuthology import misc as teuthology +import util.rgw as rgw_utils + +log = logging.getLogger(__name__) + +def run_radosgw_agent(ctx, config): + """ + Run a single radosgw-agent. See task() for config format. + """ + return_list = list() + for (client, cconf) in config.items(): + # don't process entries that are not clients + if not client.startswith('client.'): + log.debug('key {data} does not start with \'client.\', moving on'.format( + data=client)) + continue + + src_client = cconf['src'] + dest_client = cconf['dest'] + + src_zone = rgw_utils.zone_for_client(ctx, src_client) + dest_zone = rgw_utils.zone_for_client(ctx, dest_client) + + log.info("source is %s", src_zone) + log.info("dest is %s", dest_zone) + + testdir = teuthology.get_testdir(ctx) + (remote,) = ctx.cluster.only(client).remotes.keys() + # figure out which branch to pull from + branch = cconf.get('force-branch', None) + if not branch: + branch = cconf.get('branch', 'master') + sha1 = cconf.get('sha1') + remote.run( + args=[ + 'cd', testdir, run.Raw('&&'), + 'git', 'clone', + '-b', branch, +# 'https://github.com/ceph/radosgw-agent.git', + 'git://git.ceph.com/radosgw-agent.git', + 'radosgw-agent.{client}'.format(client=client), + ] + ) + if sha1 is not None: + remote.run( + args=[ + 'cd', testdir, run.Raw('&&'), + run.Raw('&&'), + 'git', 'reset', '--hard', sha1, + ] + ) + remote.run( + args=[ + 'cd', testdir, run.Raw('&&'), + 'cd', 'radosgw-agent.{client}'.format(client=client), + run.Raw('&&'), + './bootstrap', + ] + ) + + src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client, + src_zone) + dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client, + dest_zone) + src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client, + src_zone) + dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client, + dest_zone) + sync_scope = cconf.get('sync-scope', None) + port = cconf.get('port', 8000) + daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port) + in_args=[ + 'daemon-helper', + 'kill', + '{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir, + client=client), + '-v', + '--src-access-key', src_access, + '--src-secret-key', src_secret, + '--source', "http://{addr}:{port}".format(addr=src_host, port=src_port), + '--dest-access-key', dest_access, + '--dest-secret-key', dest_secret, + '--max-entries', str(cconf.get('max-entries', 1000)), + '--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format( + tdir=testdir, + client=client), + '--object-sync-timeout', '30', + ] + + if cconf.get('metadata-only', False): + in_args.append('--metadata-only') + + # the test server and full/incremental flags are mutually exclusive + if sync_scope is None: + in_args.append('--test-server-host') + in_args.append('0.0.0.0') + in_args.append('--test-server-port') + in_args.append(str(port)) + log.debug('Starting a sync test server on {client}'.format(client=client)) + # Stash the radosgw-agent server / port # for use by subsequent tasks + ctx.radosgw_agent.endpoint = (client, str(port)) + else: + in_args.append('--sync-scope') + in_args.append(sync_scope) + log.debug('Starting a {scope} sync on {client}'.format(scope=sync_scope,client=client)) + + # positional arg for destination must come last + in_args.append("http://{addr}:{port}".format(addr=dest_host, + port=dest_port)) + + return_list.append((client, remote.run( + args=in_args, + wait=False, + stdin=run.PIPE, + logger=log.getChild(daemon_name), + ))) + return return_list + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run radosgw-agents in test mode. + + Configuration is clients to run the agents on, with settings for + source client, destination client, and port to listen on. Binds + to 0.0.0.0. Port defaults to 8000. This must be run on clients + that have the correct zone root pools and rgw zone set in + ceph.conf, or the task cannot read the region information from the + cluster. + + By default, this task will start an HTTP server that will trigger full + or incremental syncs based on requests made to it. + Alternatively, a single full sync can be triggered by + specifying 'sync-scope: full' or a loop of incremental syncs can be triggered + by specifying 'sync-scope: incremental' (the loop will sleep + '--incremental-sync-delay' seconds between each sync, default is 30 seconds). + + By default, both data and metadata are synced. To only sync + metadata, for example because you want to sync between regions, + set metadata-only: true. + + An example:: + + tasks: + - ceph: + conf: + client.0: + rgw zone = foo + rgw zone root pool = .root.pool + client.1: + rgw zone = bar + rgw zone root pool = .root.pool2 + - rgw: # region configuration omitted for brevity + - radosgw-agent: + client.0: + branch: wip-next-feature-branch + src: client.0 + dest: client.1 + sync-scope: full + metadata-only: true + # port: 8000 (default) + client.1: + src: client.1 + dest: client.0 + port: 8001 + """ + assert isinstance(config, dict), 'rgw_sync_agent requires a dictionary config' + log.debug("config is %s", config) + + overrides = ctx.config.get('overrides', {}) + # merge each client section, but only if it exists in config since there isn't + # a sensible default action for this task + for client in config.iterkeys(): + if config[client]: + log.debug('config[{client}]: {data}'.format(client=client, data=config[client])) + teuthology.deep_merge(config[client], overrides.get('radosgw-agent', {})) + + ctx.radosgw_agent = argparse.Namespace() + ctx.radosgw_agent.config = config + + procs = run_radosgw_agent(ctx, config) + + ctx.radosgw_agent.procs = procs + + try: + yield + finally: + testdir = teuthology.get_testdir(ctx) + try: + for client, proc in procs: + log.info("shutting down sync agent on %s", client) + proc.stdin.close() + proc.wait() + finally: + for client, proc in procs: + ctx.cluster.only(client).run( + args=[ + 'rm', '-rf', + '{tdir}/radosgw-agent.{client}'.format(tdir=testdir, + client=client) + ] + ) diff --git a/qa/tasks/rbd.py b/qa/tasks/rbd.py new file mode 100644 index 00000000000..92db23278ab --- /dev/null +++ b/qa/tasks/rbd.py @@ -0,0 +1,506 @@ +""" +Rbd testing task +""" +import contextlib +import logging +import os + +from cStringIO import StringIO +from teuthology.orchestra import run +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.parallel import parallel +from teuthology.task.common_fs_utils import generic_mkfs +from teuthology.task.common_fs_utils import generic_mount +from teuthology.task.common_fs_utils import default_image_name + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def create_image(ctx, config): + """ + Create an rbd image. + + For example:: + + tasks: + - ceph: + - rbd.create_image: + client.0: + image_name: testimage + image_size: 100 + image_format: 1 + client.1: + + Image size is expressed as a number of megabytes; default value + is 10240. + + Image format value must be either 1 or 2; default value is 1. + + """ + assert isinstance(config, dict) or isinstance(config, list), \ + "task create_image only supports a list or dictionary for configuration" + + if isinstance(config, dict): + images = config.items() + else: + images = [(role, None) for role in config] + + testdir = teuthology.get_testdir(ctx) + for role, properties in images: + if properties is None: + properties = {} + name = properties.get('image_name', default_image_name(role)) + size = properties.get('image_size', 10240) + fmt = properties.get('image_format', 1) + (remote,) = ctx.cluster.only(role).remotes.keys() + log.info('Creating image {name} with size {size}'.format(name=name, + size=size)) + args = [ + 'adjust-ulimits', + 'ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rbd', + '-p', 'rbd', + 'create', + '--size', str(size), + name, + ] + # omit format option if using the default (format 1) + # since old versions of don't support it + if int(fmt) != 1: + args += ['--format', str(fmt)] + remote.run(args=args) + try: + yield + finally: + log.info('Deleting rbd images...') + for role, properties in images: + if properties is None: + properties = {} + name = properties.get('image_name', default_image_name(role)) + (remote,) = ctx.cluster.only(role).remotes.keys() + remote.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rbd', + '-p', 'rbd', + 'rm', + name, + ], + ) + +@contextlib.contextmanager +def modprobe(ctx, config): + """ + Load the rbd kernel module.. + + For example:: + + tasks: + - ceph: + - rbd.create_image: [client.0] + - rbd.modprobe: [client.0] + """ + log.info('Loading rbd kernel module...') + for role in config: + (remote,) = ctx.cluster.only(role).remotes.keys() + remote.run( + args=[ + 'sudo', + 'modprobe', + 'rbd', + ], + ) + try: + yield + finally: + log.info('Unloading rbd kernel module...') + for role in config: + (remote,) = ctx.cluster.only(role).remotes.keys() + remote.run( + args=[ + 'sudo', + 'modprobe', + '-r', + 'rbd', + # force errors to be ignored; necessary if more + # than one device was created, which may mean + # the module isn't quite ready to go the first + # time through. + run.Raw('||'), + 'true', + ], + ) + +@contextlib.contextmanager +def dev_create(ctx, config): + """ + Map block devices to rbd images. + + For example:: + + tasks: + - ceph: + - rbd.create_image: [client.0] + - rbd.modprobe: [client.0] + - rbd.dev_create: + client.0: testimage.client.0 + """ + assert isinstance(config, dict) or isinstance(config, list), \ + "task dev_create only supports a list or dictionary for configuration" + + if isinstance(config, dict): + role_images = config.items() + else: + role_images = [(role, None) for role in config] + + log.info('Creating rbd block devices...') + + testdir = teuthology.get_testdir(ctx) + + for role, image in role_images: + if image is None: + image = default_image_name(role) + (remote,) = ctx.cluster.only(role).remotes.keys() + + remote.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rbd', + '--user', role.rsplit('.')[-1], + '-p', 'rbd', + 'map', + image, + run.Raw('&&'), + # wait for the symlink to be created by udev + 'while', 'test', '!', '-e', '/dev/rbd/rbd/{image}'.format(image=image), run.Raw(';'), 'do', + 'sleep', '1', run.Raw(';'), + 'done', + ], + ) + try: + yield + finally: + log.info('Unmapping rbd devices...') + for role, image in role_images: + if image is None: + image = default_image_name(role) + (remote,) = ctx.cluster.only(role).remotes.keys() + remote.run( + args=[ + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rbd', + '-p', 'rbd', + 'unmap', + '/dev/rbd/rbd/{imgname}'.format(imgname=image), + run.Raw('&&'), + # wait for the symlink to be deleted by udev + 'while', 'test', '-e', '/dev/rbd/rbd/{image}'.format(image=image), + run.Raw(';'), + 'do', + 'sleep', '1', run.Raw(';'), + 'done', + ], + ) + + +def rbd_devname_rtn(ctx, image): + return '/dev/rbd/rbd/{image}'.format(image=image) + +def canonical_path(ctx, role, path): + """ + Determine the canonical path for a given path on the host + representing the given role. A canonical path contains no + . or .. components, and includes no symbolic links. + """ + version_fp = StringIO() + ctx.cluster.only(role).run( + args=[ 'readlink', '-f', path ], + stdout=version_fp, + ) + canonical_path = version_fp.getvalue().rstrip('\n') + version_fp.close() + return canonical_path + +@contextlib.contextmanager +def run_xfstests(ctx, config): + """ + Run xfstests over specified devices. + + Warning: both the test and scratch devices specified will be + overwritten. Normally xfstests modifies (but does not destroy) + the test device, but for now the run script used here re-makes + both filesystems. + + Note: Only one instance of xfstests can run on a single host at + a time, although this is not enforced. + + This task in its current form needs some improvement. For + example, it assumes all roles provided in the config are + clients, and that the config provided is a list of key/value + pairs. For now please use the xfstests() interface, below. + + For example:: + + tasks: + - ceph: + - rbd.run_xfstests: + client.0: + count: 2 + test_dev: 'test_dev' + scratch_dev: 'scratch_dev' + fs_type: 'xfs' + tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48' + """ + with parallel() as p: + for role, properties in config.items(): + p.spawn(run_xfstests_one_client, ctx, role, properties) + yield + +def run_xfstests_one_client(ctx, role, properties): + """ + Spawned routine to handle xfs tests for a single client + """ + testdir = teuthology.get_testdir(ctx) + try: + count = properties.get('count') + test_dev = properties.get('test_dev') + assert test_dev is not None, \ + "task run_xfstests requires test_dev to be defined" + test_dev = canonical_path(ctx, role, test_dev) + + scratch_dev = properties.get('scratch_dev') + assert scratch_dev is not None, \ + "task run_xfstests requires scratch_dev to be defined" + scratch_dev = canonical_path(ctx, role, scratch_dev) + + fs_type = properties.get('fs_type') + tests = properties.get('tests') + + (remote,) = ctx.cluster.only(role).remotes.keys() + + # Fetch the test script + test_root = teuthology.get_testdir(ctx) + test_script = 'run_xfstests.sh' + test_path = os.path.join(test_root, test_script) + + git_branch = 'master' + test_url = 'https://raw.github.com/ceph/ceph/{branch}/qa/{script}'.format(branch=git_branch, script=test_script) + # test_url = 'http://ceph.newdream.net/git/?p=ceph.git;a=blob_plain;hb=refs/heads/{branch};f=qa/{script}'.format(branch=git_branch, script=test_script) + + log.info('Fetching {script} for {role} from {url}'.format(script=test_script, + role=role, + url=test_url)) + args = [ 'wget', '-O', test_path, '--', test_url ] + remote.run(args=args) + + log.info('Running xfstests on {role}:'.format(role=role)) + log.info(' iteration count: {count}:'.format(count=count)) + log.info(' test device: {dev}'.format(dev=test_dev)) + log.info(' scratch device: {dev}'.format(dev=scratch_dev)) + log.info(' using fs_type: {fs_type}'.format(fs_type=fs_type)) + log.info(' tests to run: {tests}'.format(tests=tests)) + + # Note that the device paths are interpreted using + # readlink -f in order to get their canonical + # pathname (so it matches what the kernel remembers). + args = [ + '/usr/bin/sudo', + 'TESTDIR={tdir}'.format(tdir=testdir), + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + '/bin/bash', + test_path, + '-c', str(count), + '-f', fs_type, + '-t', test_dev, + '-s', scratch_dev, + ] + if tests: + args.append(tests) + remote.run(args=args, logger=log.getChild(role)) + finally: + log.info('Removing {script} on {role}'.format(script=test_script, + role=role)) + remote.run(args=['rm', '-f', test_path]) + +@contextlib.contextmanager +def xfstests(ctx, config): + """ + Run xfstests over rbd devices. This interface sets up all + required configuration automatically if not otherwise specified. + Note that only one instance of xfstests can run on a single host + at a time. By default, the set of tests specified is run once. + If a (non-zero) count value is supplied, the complete set of + tests will be run that number of times. + + For example:: + + tasks: + - ceph: + # Image sizes are in MB + - rbd.xfstests: + client.0: + count: 3 + test_image: 'test_image' + test_size: 250 + test_format: 2 + scratch_image: 'scratch_image' + scratch_size: 250 + scratch_format: 1 + fs_type: 'xfs' + tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48' + """ + if config is None: + config = { 'all': None } + assert isinstance(config, dict) or isinstance(config, list), \ + "task xfstests only supports a list or dictionary for configuration" + if isinstance(config, dict): + config = teuthology.replace_all_with_clients(ctx.cluster, config) + runs = config.items() + else: + runs = [(role, None) for role in config] + + running_xfstests = {} + for role, properties in runs: + assert role.startswith('client.'), \ + "task xfstests can only run on client nodes" + for host, roles_for_host in ctx.cluster.remotes.items(): + if role in roles_for_host: + assert host not in running_xfstests, \ + "task xfstests allows only one instance at a time per host" + running_xfstests[host] = True + + images_config = {} + scratch_config = {} + modprobe_config = {} + image_map_config = {} + scratch_map_config = {} + xfstests_config = {} + for role, properties in runs: + if properties is None: + properties = {} + + test_image = properties.get('test_image', 'test_image.{role}'.format(role=role)) + test_size = properties.get('test_size', 2000) # 2G + test_fmt = properties.get('test_format', 1) + scratch_image = properties.get('scratch_image', 'scratch_image.{role}'.format(role=role)) + scratch_size = properties.get('scratch_size', 10000) # 10G + scratch_fmt = properties.get('scratch_format', 1) + + images_config[role] = dict( + image_name=test_image, + image_size=test_size, + image_format=test_fmt, + ) + + scratch_config[role] = dict( + image_name=scratch_image, + image_size=scratch_size, + image_format=scratch_fmt, + ) + + xfstests_config[role] = dict( + count=properties.get('count', 1), + test_dev='/dev/rbd/rbd/{image}'.format(image=test_image), + scratch_dev='/dev/rbd/rbd/{image}'.format(image=scratch_image), + fs_type=properties.get('fs_type', 'xfs'), + tests=properties.get('tests'), + ) + + log.info('Setting up xfstests using RBD images:') + log.info(' test ({size} MB): {image}'.format(size=test_size, + image=test_image)) + log.info(' scratch ({size} MB): {image}'.format(size=scratch_size, + image=scratch_image)) + modprobe_config[role] = None + image_map_config[role] = test_image + scratch_map_config[role] = scratch_image + + with contextutil.nested( + lambda: create_image(ctx=ctx, config=images_config), + lambda: create_image(ctx=ctx, config=scratch_config), + lambda: modprobe(ctx=ctx, config=modprobe_config), + lambda: dev_create(ctx=ctx, config=image_map_config), + lambda: dev_create(ctx=ctx, config=scratch_map_config), + lambda: run_xfstests(ctx=ctx, config=xfstests_config), + ): + yield + + +@contextlib.contextmanager +def task(ctx, config): + """ + Create and mount an rbd image. + + For example, you can specify which clients to run on:: + + tasks: + - ceph: + - rbd: [client.0, client.1] + + There are a few image options:: + + tasks: + - ceph: + - rbd: + client.0: # uses defaults + client.1: + image_name: foo + image_size: 2048 + image_format: 2 + fs_type: xfs + + To use default options on all clients:: + + tasks: + - ceph: + - rbd: + all: + + To create 20GiB images and format them with xfs on all clients:: + + tasks: + - ceph: + - rbd: + all: + image_size: 20480 + fs_type: xfs + """ + if config is None: + config = { 'all': None } + norm_config = config + if isinstance(config, dict): + norm_config = teuthology.replace_all_with_clients(ctx.cluster, config) + if isinstance(norm_config, dict): + role_images = {} + for role, properties in norm_config.iteritems(): + if properties is None: + properties = {} + role_images[role] = properties.get('image_name') + else: + role_images = norm_config + + log.debug('rbd config is: %s', norm_config) + + with contextutil.nested( + lambda: create_image(ctx=ctx, config=norm_config), + lambda: modprobe(ctx=ctx, config=norm_config), + lambda: dev_create(ctx=ctx, config=role_images), + lambda: generic_mkfs(ctx=ctx, config=norm_config, + devname_rtn=rbd_devname_rtn), + lambda: generic_mount(ctx=ctx, config=role_images, + devname_rtn=rbd_devname_rtn), + ): + yield diff --git a/qa/tasks/rbd_fsx.py b/qa/tasks/rbd_fsx.py new file mode 100644 index 00000000000..6d55b5cf457 --- /dev/null +++ b/qa/tasks/rbd_fsx.py @@ -0,0 +1,64 @@ +""" +Run fsx on an rbd image +""" +import contextlib +import logging + +from teuthology.parallel import parallel +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run fsx on an rbd image. + + Currently this requires running as client.admin + to create a pool. + + Specify which clients to run on as a list:: + + tasks: + ceph: + rbd_fsx: + clients: [client.0, client.1] + + You can optionally change some properties of fsx: + + tasks: + ceph: + rbd_fsx: + clients: + seed: + ops: + size: + """ + log.info('starting rbd_fsx...') + with parallel() as p: + for role in config['clients']: + p.spawn(_run_one_client, ctx, config, role) + yield + +def _run_one_client(ctx, config, role): + """Spawned task that runs the client""" + testdir = teuthology.get_testdir(ctx) + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph_test_librbd_fsx', + '-d', + '-W', '-R', # mmap doesn't work with rbd + '-p', str(config.get('progress_interval', 100)), # show progress + '-P', '{tdir}/archive'.format(tdir=testdir), + '-t', str(config.get('truncbdy',1)), + '-l', str(config.get('size', 250000000)), + '-S', str(config.get('seed', 0)), + '-N', str(config.get('ops', 1000)), + 'pool_{pool}'.format(pool=role), + 'image_{image}'.format(image=role), + ], + ) diff --git a/qa/tasks/recovery_bench.py b/qa/tasks/recovery_bench.py new file mode 100644 index 00000000000..1984b97d31e --- /dev/null +++ b/qa/tasks/recovery_bench.py @@ -0,0 +1,208 @@ +""" +Recovery system benchmarking +""" +from cStringIO import StringIO + +import contextlib +import gevent +import json +import logging +import random +import time + +import ceph_manager +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Benchmark the recovery system. + + Generates objects with smalliobench, runs it normally to get a + baseline performance measurement, then marks an OSD out and reruns + to measure performance during recovery. + + The config should be as follows: + + recovery_bench: + duration: + num_objects: + io_size: + + example: + + tasks: + - ceph: + - recovery_bench: + duration: 60 + num_objects: 500 + io_size: 4096 + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'recovery_bench task only accepts a dict for configuration' + + log.info('Beginning recovery bench...') + + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + while len(manager.get_osd_status()['up']) < num_osds: + manager.sleep(10) + + bench_proc = RecoveryBencher( + manager, + config, + ) + try: + yield + finally: + log.info('joining recovery bencher') + bench_proc.do_join() + +class RecoveryBencher: + """ + RecoveryBencher + """ + def __init__(self, manager, config): + self.ceph_manager = manager + self.ceph_manager.wait_for_clean() + + osd_status = self.ceph_manager.get_osd_status() + self.osds = osd_status['up'] + + self.config = config + if self.config is None: + self.config = dict() + + else: + def tmp(x): + """ + Local wrapper to print value. + """ + print x + self.log = tmp + + log.info("spawning thread") + + self.thread = gevent.spawn(self.do_bench) + + def do_join(self): + """ + Join the recovery bencher. This is called after the main + task exits. + """ + self.thread.get() + + def do_bench(self): + """ + Do the benchmarking. + """ + duration = self.config.get("duration", 60) + num_objects = self.config.get("num_objects", 500) + io_size = self.config.get("io_size", 4096) + + osd = str(random.choice(self.osds)) + (osd_remote,) = self.ceph_manager.ctx.cluster.only('osd.%s' % osd).remotes.iterkeys() + + testdir = teuthology.get_testdir(self.ceph_manager.ctx) + + # create the objects + osd_remote.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'smalliobench'.format(tdir=testdir), + '--use-prefix', 'recovery_bench', + '--init-only', '1', + '--num-objects', str(num_objects), + '--io-size', str(io_size), + ], + wait=True, + ) + + # baseline bench + log.info('non-recovery (baseline)') + p = osd_remote.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'smalliobench', + '--use-prefix', 'recovery_bench', + '--do-not-init', '1', + '--duration', str(duration), + '--io-size', str(io_size), + ], + stdout=StringIO(), + stderr=StringIO(), + wait=True, + ) + self.process_samples(p.stderr.getvalue()) + + self.ceph_manager.raw_cluster_cmd('osd', 'out', osd) + time.sleep(5) + + # recovery bench + log.info('recovery active') + p = osd_remote.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'smalliobench', + '--use-prefix', 'recovery_bench', + '--do-not-init', '1', + '--duration', str(duration), + '--io-size', str(io_size), + ], + stdout=StringIO(), + stderr=StringIO(), + wait=True, + ) + self.process_samples(p.stderr.getvalue()) + + self.ceph_manager.raw_cluster_cmd('osd', 'in', osd) + + def process_samples(self, input): + """ + Extract samples from the input and process the results + + :param input: input lines in JSON format + """ + lat = {} + for line in input.split('\n'): + try: + sample = json.loads(line) + samples = lat.setdefault(sample['type'], []) + samples.append(float(sample['latency'])) + except Exception: + pass + + for type in lat: + samples = lat[type] + samples.sort() + + num = len(samples) + + # median + if num & 1 == 1: # odd number of samples + median = samples[num / 2] + else: + median = (samples[num / 2] + samples[num / 2 - 1]) / 2 + + # 99% + ninety_nine = samples[int(num * 0.99)] + + log.info("%s: median %f, 99%% %f" % (type, median, ninety_nine)) diff --git a/qa/tasks/rep_lost_unfound_delete.py b/qa/tasks/rep_lost_unfound_delete.py new file mode 100644 index 00000000000..ae5a48d898a --- /dev/null +++ b/qa/tasks/rep_lost_unfound_delete.py @@ -0,0 +1,153 @@ +""" +Lost_unfound +""" +import logging +import ceph_manager +from teuthology import misc as teuthology +from util.rados import rados + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of lost objects. + + A pretty rigid cluseter is brought up andtested by this task + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'lost_unfound task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + manager.sleep(10) + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_clean() + + # something that is always there + dummyfile = '/etc/fstab' + + # take an osd out until the very end + manager.kill_osd(2) + manager.mark_down_osd(2) + manager.mark_out_osd(2) + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile]) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.wait_for_recovery() + + # create old objects + for f in range(1, 10): + rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'rm', 'existed_%d' % f]) + + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.1', + 'injectargs', + '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000' + ) + + manager.kill_osd(0) + manager.mark_down_osd(0) + + for f in range(1, 10): + rados(ctx, mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) + + # bring osd.0 back up, let it peer, but don't replicate the new + # objects... + log.info('osd.0 command_args is %s' % 'foo') + log.info(ctx.daemons.get_daemon('osd', 0).command_args) + ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([ + '--osd-recovery-delay-start', '1000' + ]) + manager.revive_osd(0) + manager.mark_in_osd(0) + manager.wait_till_osd_is_up(0) + + manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.wait_till_active() + + # take out osd.1 and the only copy of those objects. + manager.kill_osd(1) + manager.mark_down_osd(1) + manager.mark_out_osd(1) + manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it') + + # bring up osd.2 so that things would otherwise, in theory, recovery fully + manager.revive_osd(2) + manager.mark_in_osd(2) + manager.wait_till_osd_is_up(2) + + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_till_active() + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + + # verify that there are unfound objects + unfound = manager.get_num_unfound_objects() + log.info("there are %d unfound objects" % unfound) + assert unfound + + # mark stuff lost + pgs = manager.get_pg_stats() + for pg in pgs: + if pg['stat_sum']['num_objects_unfound'] > 0: + primary = 'osd.%d' % pg['acting'][0] + + # verify that i can list them direct from the osd + log.info('listing missing/lost in %s state %s', pg['pgid'], + pg['state']); + m = manager.list_pg_missing(pg['pgid']) + #log.info('%s' % m) + assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound'] + num_unfound=0 + for o in m['objects']: + if len(o['locations']) == 0: + num_unfound += 1 + assert m['num_unfound'] == num_unfound + + log.info("reverting unfound in %s on %s", pg['pgid'], primary) + manager.raw_cluster_cmd('pg', pg['pgid'], + 'mark_unfound_lost', 'delete') + else: + log.info("no unfound in %s", pg['pgid']) + + manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') + manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') + manager.wait_for_recovery() + + # verify result + for f in range(1, 10): + err = rados(ctx, mon, ['-p', 'data', 'get', 'new_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', 'data', 'get', 'existed_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', 'data', 'get', 'existing_%d' % f, '-']) + assert err + + # see if osd.1 can cope + manager.revive_osd(1) + manager.mark_in_osd(1) + manager.wait_till_osd_is_up(1) + manager.wait_for_clean() diff --git a/qa/tasks/repair_test.py b/qa/tasks/repair_test.py new file mode 100644 index 00000000000..1dd8f2fdefa --- /dev/null +++ b/qa/tasks/repair_test.py @@ -0,0 +1,213 @@ +import logging +import time + +import ceph_manager +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def setup(ctx, config): + ctx.manager.wait_for_clean() + ctx.manager.create_pool("repair_test_pool", 1) + return "repair_test_pool" + +def teardown(ctx, config, pool): + ctx.manager.remove_pool(pool) + ctx.manager.wait_for_clean() + +def run_test(ctx, config, test): + s = setup(ctx, config) + test(ctx, config, s) + teardown(ctx, config, s) + +def choose_primary(ctx): + def ret(pool, num): + log.info("Choosing primary") + return ctx.manager.get_pg_primary(pool, num) + return ret + +def choose_replica(ctx): + def ret(pool, num): + log.info("Choosing replica") + return ctx.manager.get_pg_replica(pool, num) + return ret + +def trunc(ctx): + def ret(osd, pool, obj): + log.info("truncating object") + return ctx.manager.osd_admin_socket( + osd, + ['truncobj', pool, obj, '1']) + return ret + +def dataerr(ctx): + def ret(osd, pool, obj): + log.info("injecting data err on object") + return ctx.manager.osd_admin_socket( + osd, + ['injectdataerr', pool, obj]) + return ret + +def mdataerr(ctx): + def ret(osd, pool, obj): + log.info("injecting mdata err on object") + return ctx.manager.osd_admin_socket( + osd, + ['injectmdataerr', pool, obj]) + return ret + +def omaperr(ctx): + def ret(osd, pool, obj): + log.info("injecting omap err on object") + return ctx.manager.osd_admin_socket(osd, ['setomapval', pool, obj, 'badkey', 'badval']); + return ret + +def gen_repair_test_1(corrupter, chooser, scrub_type): + def ret(ctx, config, pool): + log.info("starting repair test type 1") + victim_osd = chooser(pool, 0) + + # create object + log.info("doing put") + ctx.manager.do_put(pool, 'repair_test_obj', '/etc/hosts') + + # corrupt object + log.info("corrupting object") + corrupter(victim_osd, pool, 'repair_test_obj') + + # verify inconsistent + log.info("scrubbing") + ctx.manager.do_pg_scrub(pool, 0, scrub_type) + + assert ctx.manager.pg_inconsistent(pool, 0) + + # repair + log.info("repairing") + ctx.manager.do_pg_scrub(pool, 0, "repair") + + log.info("re-scrubbing") + ctx.manager.do_pg_scrub(pool, 0, scrub_type) + + # verify consistent + assert not ctx.manager.pg_inconsistent(pool, 0) + log.info("done") + return ret + +def gen_repair_test_2(chooser): + def ret(ctx, config, pool): + log.info("starting repair test type 2") + victim_osd = chooser(pool, 0) + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + # create object + log.info("doing put and setomapval") + ctx.manager.do_put(pool, 'file1', '/etc/hosts') + ctx.manager.do_rados(mon, ['-p', pool, 'setomapval', 'file1', 'key', 'val']) + ctx.manager.do_put(pool, 'file2', '/etc/hosts') + ctx.manager.do_put(pool, 'file3', '/etc/hosts') + ctx.manager.do_put(pool, 'file4', '/etc/hosts') + ctx.manager.do_put(pool, 'file5', '/etc/hosts') + ctx.manager.do_rados(mon, ['-p', pool, 'setomapval', 'file5', 'key', 'val']) + ctx.manager.do_put(pool, 'file6', '/etc/hosts') + + # corrupt object + log.info("corrupting object") + omaperr(ctx)(victim_osd, pool, 'file1') + + # verify inconsistent + log.info("scrubbing") + ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub') + + assert ctx.manager.pg_inconsistent(pool, 0) + + # Regression test for bug #4778, should still + # be inconsistent after scrub + ctx.manager.do_pg_scrub(pool, 0, 'scrub') + + assert ctx.manager.pg_inconsistent(pool, 0) + + # Additional corruptions including 2 types for file1 + log.info("corrupting more objects") + dataerr(ctx)(victim_osd, pool, 'file1') + mdataerr(ctx)(victim_osd, pool, 'file2') + trunc(ctx)(victim_osd, pool, 'file3') + omaperr(ctx)(victim_osd, pool, 'file6') + + # see still inconsistent + log.info("scrubbing") + ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub') + + assert ctx.manager.pg_inconsistent(pool, 0) + + # repair + log.info("repairing") + ctx.manager.do_pg_scrub(pool, 0, "repair") + + # Let repair clear inconsistent flag + time.sleep(10) + + # verify consistent + assert not ctx.manager.pg_inconsistent(pool, 0) + + # In the future repair might determine state of + # inconsistency itself, verify with a deep-scrub + log.info("scrubbing") + ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub') + + # verify consistent + assert not ctx.manager.pg_inconsistent(pool, 0) + + log.info("done") + return ret + +def task(ctx, config): + """ + Test [deep] repair in several situations: + Repair [Truncate, Data EIO, MData EIO] on [Primary|Replica] + + The config should be as follows: + + Must include the log-whitelist below + Must enable filestore_debug_inject_read_err config + + example: + + tasks: + - chef: + - install: + - ceph: + log-whitelist: ['candidate had a read error', 'deep-scrub 0 missing, 1 inconsistent objects', 'deep-scrub 0 missing, 4 inconsistent objects', 'deep-scrub 1 errors', 'deep-scrub 4 errors', '!= known omap_digest', 'repair 0 missing, 1 inconsistent objects', 'repair 0 missing, 4 inconsistent objects', 'repair 1 errors, 1 fixed', 'repair 4 errors, 4 fixed', 'scrub 0 missing, 1 inconsistent', 'scrub 1 errors', 'size 1 != known size'] + conf: + osd: + filestore debug inject read err: true + - repair_test: + + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'repair_test task only accepts a dict for config' + + if not hasattr(ctx, 'manager'): + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + ctx.manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager') + ) + + tests = [ + gen_repair_test_1(mdataerr(ctx), choose_primary(ctx), "scrub"), + gen_repair_test_1(mdataerr(ctx), choose_replica(ctx), "scrub"), + gen_repair_test_1(dataerr(ctx), choose_primary(ctx), "deep-scrub"), + gen_repair_test_1(dataerr(ctx), choose_replica(ctx), "deep-scrub"), + gen_repair_test_1(trunc(ctx), choose_primary(ctx), "scrub"), + gen_repair_test_1(trunc(ctx), choose_replica(ctx), "scrub"), + gen_repair_test_2(choose_primary(ctx)), + gen_repair_test_2(choose_replica(ctx)) + ] + + for test in tests: + run_test(ctx, config, test) diff --git a/qa/tasks/rest_api.py b/qa/tasks/rest_api.py new file mode 100644 index 00000000000..f4de1866884 --- /dev/null +++ b/qa/tasks/rest_api.py @@ -0,0 +1,183 @@ +""" +Rest Api +""" +import logging +import contextlib +import time + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.orchestra import run +from tasks.ceph import DaemonGroup + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def run_rest_api_daemon(ctx, api_clients): + """ + Wrapper starts the rest api daemons + """ + if not hasattr(ctx, 'daemons'): + ctx.daemons = DaemonGroup() + remotes = ctx.cluster.only(teuthology.is_type('client')).remotes + for rems, roles in remotes.iteritems(): + for whole_id_ in roles: + if whole_id_ in api_clients: + id_ = whole_id_[len('clients'):] + run_cmd = [ + 'sudo', + 'daemon-helper', + 'kill', + 'ceph-rest-api', + '-n', + 'client.rest{id}'.format(id=id_), ] + cl_rest_id = 'client.rest{id}'.format(id=id_) + ctx.daemons.add_daemon(rems, 'restapi', + cl_rest_id, + args=run_cmd, + logger=log.getChild(cl_rest_id), + stdin=run.PIPE, + wait=False, + ) + for i in range(1, 12): + log.info('testing for ceph-rest-api try {0}'.format(i)) + run_cmd = [ + 'wget', + '-O', + '/dev/null', + '-q', + 'http://localhost:5000/api/v0.1/status' + ] + proc = rems.run( + args=run_cmd, + check_status=False + ) + if proc.exitstatus == 0: + break + time.sleep(5) + if proc.exitstatus != 0: + raise RuntimeError('Cannot contact ceph-rest-api') + try: + yield + + finally: + """ + TO DO: destroy daemons started -- modify iter_daemons_of_role + """ + teuthology.stop_daemons_of_type(ctx, 'restapi') + +@contextlib.contextmanager +def task(ctx, config): + """ + Start up rest-api. + + To start on on all clients:: + + tasks: + - ceph: + - rest-api: + + To only run on certain clients:: + + tasks: + - ceph: + - rest-api: [client.0, client.3] + + or + + tasks: + - ceph: + - rest-api: + client.0: + client.3: + + The general flow of things here is: + 1. Find clients on which rest-api is supposed to run (api_clients) + 2. Generate keyring values + 3. Start up ceph-rest-api daemons + On cleanup: + 4. Stop the daemons + 5. Delete keyring value files. + """ + api_clients = [] + remotes = ctx.cluster.only(teuthology.is_type('client')).remotes + log.info(remotes) + if config == None: + api_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + else: + api_clients = config + log.info(api_clients) + testdir = teuthology.get_testdir(ctx) + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) + for rems, roles in remotes.iteritems(): + for whole_id_ in roles: + if whole_id_ in api_clients: + id_ = whole_id_[len('client.'):] + keyring = '/etc/ceph/ceph.client.rest{id}.keyring'.format( + id=id_) + rems.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--create-keyring', + '--gen-key', + '--name=client.rest{id}'.format(id=id_), + '--set-uid=0', + '--cap', 'mon', 'allow *', + '--cap', 'osd', 'allow *', + '--cap', 'mds', 'allow', + keyring, + run.Raw('&&'), + 'sudo', + 'chmod', + '0644', + keyring, + ], + ) + rems.run( + args=[ + 'sudo', + 'sh', + '-c', + run.Raw("'"), + "echo", + '[client.rest{id}]'.format(id=id_), + run.Raw('>>'), + "/etc/ceph/ceph.conf", + run.Raw("'") + ] + ) + rems.run( + args=[ + 'sudo', + 'sh', + '-c', + run.Raw("'"), + 'echo', + 'restapi', + 'keyring', + '=', + '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_), + run.Raw('>>'), + '/etc/ceph/ceph.conf', + run.Raw("'"), + ] + ) + rems.run( + args=[ + 'ceph', + 'auth', + 'import', + '-i', + '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_), + ] + ) + with contextutil.nested( + lambda: run_rest_api_daemon(ctx=ctx, api_clients=api_clients),): + yield + diff --git a/qa/tasks/restart.py b/qa/tasks/restart.py new file mode 100644 index 00000000000..697345a975b --- /dev/null +++ b/qa/tasks/restart.py @@ -0,0 +1,163 @@ +""" +Daemon restart +""" +import logging +import pipes + +from teuthology import misc as teuthology +from teuthology.orchestra import run as tor + +from teuthology.orchestra import run +log = logging.getLogger(__name__) + +def restart_daemon(ctx, config, role, id_, *args): + """ + Handle restart (including the execution of the command parameters passed) + """ + log.info('Restarting {r}.{i} daemon...'.format(r=role, i=id_)) + daemon = ctx.daemons.get_daemon(role, id_) + log.debug('Waiting for exit of {r}.{i} daemon...'.format(r=role, i=id_)) + try: + daemon.wait_for_exit() + except tor.CommandFailedError as e: + log.debug('Command Failed: {e}'.format(e=e)) + if len(args) > 0: + confargs = ['--{k}={v}'.format(k=k, v=v) for k,v in zip(args[0::2], args[1::2])] + log.debug('Doing restart of {r}.{i} daemon with args: {a}...'.format(r=role, i=id_, a=confargs)) + daemon.restart_with_args(confargs) + else: + log.debug('Doing restart of {r}.{i} daemon...'.format(r=role, i=id_)) + daemon.restart() + +def get_tests(ctx, config, role, remote, testdir): + """Download restart tests""" + srcdir = '{tdir}/restart.{role}'.format(tdir=testdir, role=role) + + refspec = config.get('branch') + if refspec is None: + refspec = config.get('sha1') + if refspec is None: + refspec = config.get('tag') + if refspec is None: + refspec = 'HEAD' + log.info('Pulling restart qa/workunits from ref %s', refspec) + + remote.run( + logger=log.getChild(role), + args=[ + 'mkdir', '--', srcdir, + run.Raw('&&'), + 'git', + 'archive', + '--remote=git://git.ceph.com/ceph.git', + '%s:qa/workunits' % refspec, + run.Raw('|'), + 'tar', + '-C', srcdir, + '-x', + '-f-', + run.Raw('&&'), + 'cd', '--', srcdir, + run.Raw('&&'), + 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi', + run.Raw('&&'), + 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir), + run.Raw('>{tdir}/restarts.list'.format(tdir=testdir)), + ], + ) + restarts = sorted(teuthology.get_file( + remote, + '{tdir}/restarts.list'.format(tdir=testdir)).split('\0')) + return (srcdir, restarts) + +def task(ctx, config): + """ + Execute commands and allow daemon restart with config options. + Each process executed can output to stdout restart commands of the form: + restart + This will restart the daemon . with the specified config values once + by modifying the conf file with those values, and then replacing the old conf file + once the daemon is restarted. + This task does not kill a running daemon, it assumes the daemon will abort on an + assert specified in the config. + + tasks: + - install: + - ceph: + - restart: + exec: + client.0: + - test_backtraces.py + + """ + assert isinstance(config, dict), "task kill got invalid config" + + testdir = teuthology.get_testdir(ctx) + + try: + assert 'exec' in config, "config requires exec key with : entries" + for role, task in config['exec'].iteritems(): + log.info('restart for role {r}'.format(r=role)) + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + srcdir, restarts = get_tests(ctx, config, role, remote, testdir) + log.info('Running command on role %s host %s', role, remote.name) + spec = '{spec}'.format(spec=task[0]) + log.info('Restarts list: %s', restarts) + log.info('Spec is %s', spec) + to_run = [w for w in restarts if w == task or w.find(spec) != -1] + log.info('To run: %s', to_run) + for c in to_run: + log.info('Running restart script %s...', c) + args = [ + run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)), + ] + env = config.get('env') + if env is not None: + for var, val in env.iteritems(): + quoted_val = pipes.quote(val) + env_arg = '{var}={val}'.format(var=var, val=quoted_val) + args.append(run.Raw(env_arg)) + args.extend([ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + '{srcdir}/{c}'.format( + srcdir=srcdir, + c=c, + ), + ]) + proc = remote.run( + args=args, + stdout=tor.PIPE, + stdin=tor.PIPE, + stderr=log, + wait=False, + ) + log.info('waiting for a command from script') + while True: + l = proc.stdout.readline() + if not l or l == '': + break + log.debug('script command: {c}'.format(c=l)) + ll = l.strip() + cmd = ll.split(' ') + if cmd[0] == "done": + break + assert cmd[0] == 'restart', "script sent invalid command request to kill task" + # cmd should be: restart + # or to clear, just: restart + restart_daemon(ctx, config, cmd[1], cmd[2], *cmd[3:]) + proc.stdin.writelines(['restarted\n']) + proc.stdin.flush() + try: + proc.wait() + except tor.CommandFailedError: + raise Exception('restart task got non-zero exit status from script: {s}'.format(s=c)) + finally: + log.info('Finishing %s on %s...', task, role) + remote.run( + logger=log.getChild(role), + args=[ + 'rm', '-rf', '--', '{tdir}/restarts.list'.format(tdir=testdir), srcdir, + ], + ) diff --git a/qa/tasks/rgw.py b/qa/tasks/rgw.py new file mode 100644 index 00000000000..8480380fbd6 --- /dev/null +++ b/qa/tasks/rgw.py @@ -0,0 +1,837 @@ +""" +rgw routines +""" +import argparse +import contextlib +import json +import logging +import os + +from cStringIO import StringIO + +from teuthology.orchestra import run +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.orchestra.run import CommandFailedError +from util.rgw import rgwadmin +from util.rados import (rados, create_ec_pool, + create_replicated_pool, + create_cache_pool) + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def create_apache_dirs(ctx, config): + """ + Remotely create apache directories. Delete when finished. + """ + log.info('Creating apache directories...') + testdir = teuthology.get_testdir(ctx) + for client in config.iterkeys(): + ctx.cluster.only(client).run( + args=[ + 'mkdir', + '-p', + '{tdir}/apache/htdocs.{client}'.format(tdir=testdir, + client=client), + '{tdir}/apache/tmp.{client}/fastcgi_sock'.format( + tdir=testdir, + client=client), + run.Raw('&&'), + 'mkdir', + '{tdir}/archive/apache.{client}'.format(tdir=testdir, + client=client), + ], + ) + try: + yield + finally: + log.info('Cleaning up apache directories...') + for client in config.iterkeys(): + ctx.cluster.only(client).run( + args=[ + 'rm', + '-rf', + '{tdir}/apache/tmp.{client}'.format(tdir=testdir, + client=client), + run.Raw('&&'), + 'rmdir', + '{tdir}/apache/htdocs.{client}'.format(tdir=testdir, + client=client), + ], + ) + + for client in config.iterkeys(): + ctx.cluster.only(client).run( + args=[ + 'rmdir', + '{tdir}/apache'.format(tdir=testdir), + ], + check_status=False, # only need to remove once per host + ) + + +@contextlib.contextmanager +def ship_apache_configs(ctx, config, role_endpoints): + """ + Ship apache config and rgw.fgci to all clients. Clean up on termination + """ + assert isinstance(config, dict) + assert isinstance(role_endpoints, dict) + testdir = teuthology.get_testdir(ctx) + log.info('Shipping apache config and rgw.fcgi...') + src = os.path.join(os.path.dirname(__file__), 'apache.conf.template') + for client, conf in config.iteritems(): + (remote,) = ctx.cluster.only(client).remotes.keys() + system_type = teuthology.get_system_type(remote) + if not conf: + conf = {} + idle_timeout = conf.get('idle_timeout', ctx.rgw.default_idle_timeout) + if system_type == 'deb': + mod_path = '/usr/lib/apache2/modules' + print_continue = 'on' + user = 'www-data' + group = 'www-data' + apache24_modconfig = ''' + IncludeOptional /etc/apache2/mods-available/mpm_event.conf + IncludeOptional /etc/apache2/mods-available/mpm_event.load +''' + else: + mod_path = '/usr/lib64/httpd/modules' + print_continue = 'off' + user = 'apache' + group = 'apache' + apache24_modconfig = \ + 'IncludeOptional /etc/httpd/conf.modules.d/00-mpm.conf' + host, port = role_endpoints[client] + with file(src, 'rb') as f: + conf = f.read().format( + testdir=testdir, + mod_path=mod_path, + print_continue=print_continue, + host=host, + port=port, + client=client, + idle_timeout=idle_timeout, + user=user, + group=group, + apache24_modconfig=apache24_modconfig, + ) + teuthology.write_file( + remote=remote, + path='{tdir}/apache/apache.{client}.conf'.format( + tdir=testdir, + client=client), + data=conf, + ) + teuthology.write_file( + remote=remote, + path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format( + tdir=testdir, + client=client), + data="""#!/bin/sh +ulimit -c unlimited +exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring --rgw-socket-path {tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock + +""".format(tdir=testdir, client=client) + ) + remote.run( + args=[ + 'chmod', + 'a=rx', + '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir, + client=client), + ], + ) + try: + yield + finally: + log.info('Removing apache config...') + for client in config.iterkeys(): + ctx.cluster.only(client).run( + args=[ + 'rm', + '-f', + '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir, + client=client), + run.Raw('&&'), + 'rm', + '-f', + '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format( + tdir=testdir, + client=client), + ], + ) + + +@contextlib.contextmanager +def start_rgw(ctx, config): + """ + Start rgw on remote sites. + """ + log.info('Starting rgw...') + testdir = teuthology.get_testdir(ctx) + for client in config.iterkeys(): + (remote,) = ctx.cluster.only(client).remotes.iterkeys() + + client_config = config.get(client) + if client_config is None: + client_config = {} + log.info("rgw %s config is %s", client, client_config) + id_ = client.split('.', 1)[1] + log.info('client {client} is id {id}'.format(client=client, id=id_)) + cmd_prefix = [ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'daemon-helper', + 'term', + ] + + rgw_cmd = ['radosgw'] + + if ctx.rgw.frontend == 'apache': + rgw_cmd.extend([ + '--rgw-socket-path', + '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format( + tdir=testdir, + client=client, + ), + ]) + elif ctx.rgw.frontend == 'civetweb': + host, port = ctx.rgw.role_endpoints[client] + rgw_cmd.extend([ + '--rgw-frontends', + 'civetweb port={port}'.format(port=port), + ]) + + rgw_cmd.extend([ + '-n', client, + '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client), + '--log-file', + '/var/log/ceph/rgw.{client}.log'.format(client=client), + '--rgw_ops_log_socket_path', + '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, + client=client), + '--foreground', + run.Raw('|'), + 'sudo', + 'tee', + '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir, + client=client), + run.Raw('2>&1'), + ]) + + if client_config.get('valgrind'): + cmd_prefix = teuthology.get_valgrind_args( + testdir, + client, + cmd_prefix, + client_config.get('valgrind') + ) + + run_cmd = list(cmd_prefix) + run_cmd.extend(rgw_cmd) + + ctx.daemons.add_daemon( + remote, 'rgw', client, + args=run_cmd, + logger=log.getChild(client), + stdin=run.PIPE, + wait=False, + ) + + try: + yield + finally: + teuthology.stop_daemons_of_type(ctx, 'rgw') + for client in config.iterkeys(): + ctx.cluster.only(client).run( + args=[ + 'rm', + '-f', + '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, + client=client), + ], + ) + + +@contextlib.contextmanager +def start_apache(ctx, config): + """ + Start apache on remote sites. + """ + log.info('Starting apache...') + testdir = teuthology.get_testdir(ctx) + apaches = {} + for client in config.iterkeys(): + (remote,) = ctx.cluster.only(client).remotes.keys() + system_type = teuthology.get_system_type(remote) + if system_type == 'deb': + apache_name = 'apache2' + else: + try: + remote.run( + args=[ + 'stat', + '/usr/sbin/httpd.worker', + ], + ) + apache_name = '/usr/sbin/httpd.worker' + except CommandFailedError: + apache_name = '/usr/sbin/httpd' + + proc = remote.run( + args=[ + 'adjust-ulimits', + 'daemon-helper', + 'kill', + apache_name, + '-X', + '-f', + '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir, + client=client), + ], + logger=log.getChild(client), + stdin=run.PIPE, + wait=False, + ) + apaches[client] = proc + + try: + yield + finally: + log.info('Stopping apache...') + for client, proc in apaches.iteritems(): + proc.stdin.close() + + run.wait(apaches.itervalues()) + + +def extract_user_info(client_config): + """ + Extract user info from the client config specified. Returns a dict + that includes system key information. + """ + # test if there isn't a system user or if there isn't a name for that + # user, return None + if ('system user' not in client_config or + 'name' not in client_config['system user']): + return None + + user_info = dict() + user_info['system_key'] = dict( + user=client_config['system user']['name'], + access_key=client_config['system user']['access key'], + secret_key=client_config['system user']['secret key'], + ) + return user_info + + +def extract_zone_info(ctx, client, client_config): + """ + Get zone information. + :param client: dictionary of client information + :param client_config: dictionary of client configuration information + :returns: zone extracted from client and client_config information + """ + ceph_config = ctx.ceph.conf.get('global', {}) + ceph_config.update(ctx.ceph.conf.get('client', {})) + ceph_config.update(ctx.ceph.conf.get(client, {})) + for key in ['rgw zone', 'rgw region', 'rgw zone root pool']: + assert key in ceph_config, \ + 'ceph conf must contain {key} for {client}'.format(key=key, + client=client) + region = ceph_config['rgw region'] + zone = ceph_config['rgw zone'] + zone_info = dict() + for key in ['rgw control pool', 'rgw gc pool', 'rgw log pool', + 'rgw intent log pool', 'rgw usage log pool', + 'rgw user keys pool', 'rgw user email pool', + 'rgw user swift pool', 'rgw user uid pool', + 'rgw domain root']: + new_key = key.split(' ', 1)[1] + new_key = new_key.replace(' ', '_') + + if key in ceph_config: + value = ceph_config[key] + log.debug('{key} specified in ceph_config ({val})'.format( + key=key, val=value)) + zone_info[new_key] = value + else: + zone_info[new_key] = '.' + region + '.' + zone + '.' + new_key + + index_pool = '.' + region + '.' + zone + '.' + 'index_pool' + data_pool = '.' + region + '.' + zone + '.' + 'data_pool' + data_extra_pool = '.' + region + '.' + zone + '.' + 'data_extra_pool' + + zone_info['placement_pools'] = [{'key': 'default_placement', + 'val': {'index_pool': index_pool, + 'data_pool': data_pool, + 'data_extra_pool': data_extra_pool} + }] + + # these keys are meant for the zones argument in the region info. We + # insert them into zone_info with a different format and then remove them + # in the fill_in_endpoints() method + for key in ['rgw log meta', 'rgw log data']: + if key in ceph_config: + zone_info[key] = ceph_config[key] + + # these keys are meant for the zones argument in the region info. We + # insert them into zone_info with a different format and then remove them + # in the fill_in_endpoints() method + for key in ['rgw log meta', 'rgw log data']: + if key in ceph_config: + zone_info[key] = ceph_config[key] + + return region, zone, zone_info + + +def extract_region_info(region, region_info): + """ + Extract region information from the region_info parameter, using get + to set default values. + + :param region: name of the region + :param region_info: region information (in dictionary form). + :returns: dictionary of region information set from region_info, using + default values for missing fields. + """ + assert isinstance(region_info['zones'], list) and region_info['zones'], \ + 'zones must be a non-empty list' + return dict( + name=region, + api_name=region_info.get('api name', region), + is_master=region_info.get('is master', False), + log_meta=region_info.get('log meta', False), + log_data=region_info.get('log data', False), + master_zone=region_info.get('master zone', region_info['zones'][0]), + placement_targets=region_info.get('placement targets', + [{'name': 'default_placement', + 'tags': []}]), + default_placement=region_info.get('default placement', + 'default_placement'), + ) + + +def assign_ports(ctx, config): + """ + Assign port numberst starting with port 7280. + """ + port = 7280 + role_endpoints = {} + for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for role in roles_for_host: + if role in config: + role_endpoints[role] = (remote.name.split('@')[1], port) + port += 1 + + return role_endpoints + + +def fill_in_endpoints(region_info, role_zones, role_endpoints): + """ + Iterate through the list of role_endpoints, filling in zone information + + :param region_info: region data + :param role_zones: region and zone information. + :param role_endpoints: endpoints being used + """ + for role, (host, port) in role_endpoints.iteritems(): + region, zone, zone_info, _ = role_zones[role] + host, port = role_endpoints[role] + endpoint = 'http://{host}:{port}/'.format(host=host, port=port) + # check if the region specified under client actually exists + # in region_info (it should, if properly configured). + # If not, throw a reasonable error + if region not in region_info: + raise Exception( + 'Region: {region} was specified but no corresponding' + ' entry was found under \'regions\''.format(region=region)) + + region_conf = region_info[region] + region_conf.setdefault('endpoints', []) + region_conf['endpoints'].append(endpoint) + + # this is the payload for the 'zones' field in the region field + zone_payload = dict() + zone_payload['endpoints'] = [endpoint] + zone_payload['name'] = zone + + # Pull the log meta and log data settings out of zone_info, if they + # exist, then pop them as they don't actually belong in the zone info + for key in ['rgw log meta', 'rgw log data']: + new_key = key.split(' ', 1)[1] + new_key = new_key.replace(' ', '_') + + if key in zone_info: + value = zone_info.pop(key) + else: + value = 'false' + + zone_payload[new_key] = value + + region_conf.setdefault('zones', []) + region_conf['zones'].append(zone_payload) + + +@contextlib.contextmanager +def configure_users(ctx, config, everywhere=False): + """ + Create users by remotely running rgwadmin commands using extracted + user information. + """ + log.info('Configuring users...') + + # extract the user info and append it to the payload tuple for the given + # client + for client, c_config in config.iteritems(): + if not c_config: + continue + user_info = extract_user_info(c_config) + if not user_info: + continue + + # For data sync the master zones and regions must have the + # system users of the secondary zones. To keep this simple, + # just create the system users on every client if regions are + # configured. + clients_to_create_as = [client] + if everywhere: + clients_to_create_as = config.keys() + for client_name in clients_to_create_as: + log.debug('Creating user {user} on {client}'.format( + user=user_info['system_key']['user'], client=client)) + rgwadmin(ctx, client_name, + cmd=[ + 'user', 'create', + '--uid', user_info['system_key']['user'], + '--access-key', user_info['system_key']['access_key'], + '--secret', user_info['system_key']['secret_key'], + '--display-name', user_info['system_key']['user'], + '--system', + ], + check_status=True, + ) + + yield + + +@contextlib.contextmanager +def create_nonregion_pools(ctx, config, regions): + """Create replicated or erasure coded data pools for rgw.""" + if regions: + yield + return + + log.info('creating data pools') + for client in config.keys(): + (remote,) = ctx.cluster.only(client).remotes.iterkeys() + data_pool = '.rgw.buckets' + if ctx.rgw.ec_data_pool: + create_ec_pool(remote, data_pool, client, 64) + else: + create_replicated_pool(remote, data_pool, 64) + if ctx.rgw.cache_pools: + create_cache_pool(remote, data_pool, data_pool + '.cache', 64, + 64*1024*1024) + yield + + +@contextlib.contextmanager +def configure_regions_and_zones(ctx, config, regions, role_endpoints): + """ + Configure regions and zones from rados and rgw. + """ + if not regions: + log.debug( + 'In rgw.configure_regions_and_zones() and regions is None. ' + 'Bailing') + yield + return + + log.info('Configuring regions and zones...') + + log.debug('config is %r', config) + log.debug('regions are %r', regions) + log.debug('role_endpoints = %r', role_endpoints) + # extract the zone info + role_zones = dict([(client, extract_zone_info(ctx, client, c_config)) + for client, c_config in config.iteritems()]) + log.debug('roles_zones = %r', role_zones) + + # extract the user info and append it to the payload tuple for the given + # client + for client, c_config in config.iteritems(): + if not c_config: + user_info = None + else: + user_info = extract_user_info(c_config) + + (region, zone, zone_info) = role_zones[client] + role_zones[client] = (region, zone, zone_info, user_info) + + region_info = dict([ + (region_name, extract_region_info(region_name, r_config)) + for region_name, r_config in regions.iteritems()]) + + fill_in_endpoints(region_info, role_zones, role_endpoints) + + # clear out the old defaults + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + # removing these objects from .rgw.root and the per-zone root pools + # may or may not matter + rados(ctx, mon, + cmd=['-p', '.rgw.root', 'rm', 'region_info.default']) + rados(ctx, mon, + cmd=['-p', '.rgw.root', 'rm', 'zone_info.default']) + + for client in config.iterkeys(): + for role, (_, zone, zone_info, user_info) in role_zones.iteritems(): + rados(ctx, mon, + cmd=['-p', zone_info['domain_root'], + 'rm', 'region_info.default']) + rados(ctx, mon, + cmd=['-p', zone_info['domain_root'], + 'rm', 'zone_info.default']) + + (remote,) = ctx.cluster.only(role).remotes.keys() + for pool_info in zone_info['placement_pools']: + remote.run(args=['ceph', 'osd', 'pool', 'create', + pool_info['val']['index_pool'], '64', '64']) + if ctx.rgw.ec_data_pool: + create_ec_pool(remote, pool_info['val']['data_pool'], + zone, 64) + else: + create_replicated_pool( + remote, pool_info['val']['data_pool'], + 64) + + rgwadmin(ctx, client, + cmd=['-n', client, 'zone', 'set', '--rgw-zone', zone], + stdin=StringIO(json.dumps(dict( + zone_info.items() + user_info.items()))), + check_status=True) + + for region, info in region_info.iteritems(): + region_json = json.dumps(info) + log.debug('region info is: %s', region_json) + rgwadmin(ctx, client, + cmd=['-n', client, 'region', 'set'], + stdin=StringIO(region_json), + check_status=True) + if info['is_master']: + rgwadmin(ctx, client, + cmd=['-n', client, + 'region', 'default', + '--rgw-region', region], + check_status=True) + + rgwadmin(ctx, client, cmd=['-n', client, 'regionmap', 'update']) + yield + + +@contextlib.contextmanager +def task(ctx, config): + """ + Either use configure apache to run a rados gateway, or use the built-in + civetweb server. + Only one should be run per machine, since it uses a hard-coded port for + now. + + For example, to run rgw on all clients:: + + tasks: + - ceph: + - rgw: + + To only run on certain clients:: + + tasks: + - ceph: + - rgw: [client.0, client.3] + + or + + tasks: + - ceph: + - rgw: + client.0: + client.3: + + You can adjust the idle timeout for fastcgi (default is 30 seconds): + + tasks: + - ceph: + - rgw: + client.0: + idle_timeout: 90 + + To run radosgw through valgrind: + + tasks: + - ceph: + - rgw: + client.0: + valgrind: [--tool=memcheck] + client.3: + valgrind: [--tool=memcheck] + + To use civetweb instead of apache: + + tasks: + - ceph: + - rgw: + - client.0 + overrides: + rgw: + frontend: civetweb + + Note that without a modified fastcgi module e.g. with the default + one on CentOS, you must have rgw print continue = false in ceph.conf:: + + tasks: + - ceph: + conf: + global: + rgw print continue: false + - rgw: [client.0] + + To run rgws for multiple regions or zones, describe the regions + and their zones in a regions section. The endpoints will be + generated by this task. Each client must have a region, zone, + and pools assigned in ceph.conf:: + + tasks: + - install: + - ceph: + conf: + client.0: + rgw region: foo + rgw zone: foo-1 + rgw region root pool: .rgw.rroot.foo + rgw zone root pool: .rgw.zroot.foo + rgw log meta: true + rgw log data: true + client.1: + rgw region: bar + rgw zone: bar-master + rgw region root pool: .rgw.rroot.bar + rgw zone root pool: .rgw.zroot.bar + rgw log meta: true + rgw log data: true + client.2: + rgw region: bar + rgw zone: bar-secondary + rgw region root pool: .rgw.rroot.bar + rgw zone root pool: .rgw.zroot.bar-secondary + - rgw: + default_idle_timeout: 30 + ec-data-pool: true + regions: + foo: + api name: api_name # default: region name + is master: true # default: false + master zone: foo-1 # default: first zone + zones: [foo-1] + log meta: true + log data: true + placement targets: [target1, target2] # default: [] + default placement: target2 # default: '' + bar: + api name: bar-api + zones: [bar-master, bar-secondary] + client.0: + system user: + name: foo-system + access key: X2IYPSTY1072DDY1SJMC + secret key: YIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm + client.1: + system user: + name: bar1 + access key: Y2IYPSTY1072DDY1SJMC + secret key: XIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm + client.2: + system user: + name: bar2 + access key: Z2IYPSTY1072DDY1SJMC + secret key: ZIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm + """ + if config is None: + config = dict(('client.{id}'.format(id=id_), None) + for id_ in teuthology.all_roles_of_type( + ctx.cluster, 'client')) + elif isinstance(config, list): + config = dict((name, None) for name in config) + + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('rgw', {})) + + regions = {} + if 'regions' in config: + # separate region info so only clients are keys in config + regions = config['regions'] + del config['regions'] + + role_endpoints = assign_ports(ctx, config) + ctx.rgw = argparse.Namespace() + ctx.rgw.role_endpoints = role_endpoints + # stash the region info for later, since it was deleted from the config + # structure + ctx.rgw.regions = regions + + ctx.rgw.ec_data_pool = False + if 'ec-data-pool' in config: + ctx.rgw.ec_data_pool = bool(config['ec-data-pool']) + del config['ec-data-pool'] + ctx.rgw.default_idle_timeout = 30 + if 'default_idle_timeout' in config: + ctx.rgw.default_idle_timeout = int(config['default_idle_timeout']) + del config['default_idle_timeout'] + ctx.rgw.cache_pools = False + if 'cache-pools' in config: + ctx.rgw.cache_pools = bool(config['cache-pools']) + del config['cache-pools'] + + ctx.rgw.frontend = 'apache' + if 'frontend' in config: + ctx.rgw.frontend = config['frontend'] + del config['frontend'] + + subtasks = [ + lambda: configure_regions_and_zones( + ctx=ctx, + config=config, + regions=regions, + role_endpoints=role_endpoints, + ), + lambda: configure_users( + ctx=ctx, + config=config, + everywhere=bool(regions), + ), + lambda: create_nonregion_pools( + ctx=ctx, config=config, regions=regions), + ] + if ctx.rgw.frontend == 'apache': + subtasks.insert(0, lambda: create_apache_dirs(ctx=ctx, config=config)) + subtasks.extend([ + lambda: ship_apache_configs(ctx=ctx, config=config, + role_endpoints=role_endpoints), + lambda: start_rgw(ctx=ctx, config=config), + lambda: start_apache(ctx=ctx, config=config), + ]) + elif ctx.rgw.frontend == 'civetweb': + subtasks.extend([ + lambda: start_rgw(ctx=ctx, config=config), + ]) + else: + raise ValueError("frontend must be 'apache' or 'civetweb'") + + log.info("Using %s as radosgw frontend", ctx.rgw.frontend) + with contextutil.nested(*subtasks): + yield diff --git a/qa/tasks/rgw_logsocket.py b/qa/tasks/rgw_logsocket.py new file mode 100644 index 00000000000..6f49b00d8a4 --- /dev/null +++ b/qa/tasks/rgw_logsocket.py @@ -0,0 +1,161 @@ +""" +rgw s3tests logging wrappers +""" +from cStringIO import StringIO +from configobj import ConfigObj +import contextlib +import logging +import s3tests + +from teuthology import misc as teuthology +from teuthology import contextutil + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def download(ctx, config): + """ + Run s3tests download function + """ + return s3tests.download(ctx, config) + +def _config_user(s3tests_conf, section, user): + """ + Run s3tests user config function + """ + return s3tests._config_user(s3tests_conf, section, user) + +@contextlib.contextmanager +def create_users(ctx, config): + """ + Run s3tests user create function + """ + return s3tests.create_users(ctx, config) + +@contextlib.contextmanager +def configure(ctx, config): + """ + Run s3tests user configure function + """ + return s3tests.configure(ctx, config) + +@contextlib.contextmanager +def run_tests(ctx, config): + """ + Run remote netcat tests + """ + assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.iteritems(): + client_config['extra_args'] = [ + 's3tests.functional.test_s3:test_bucket_list_return_data', + ] +# args = [ +# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), +# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir), +# '-w', +# '{tdir}/s3-tests'.format(tdir=testdir), +# '-v', +# 's3tests.functional.test_s3:test_bucket_list_return_data', +# ] +# if client_config is not None and 'extra_args' in client_config: +# args.extend(client_config['extra_args']) +# +# ctx.cluster.only(client).run( +# args=args, +# ) + + s3tests.run_tests(ctx, config) + + netcat_out = StringIO() + + for client, client_config in config.iteritems(): + ctx.cluster.only(client).run( + args = [ + 'netcat', + '-w', '5', + '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir), + ], + stdout = netcat_out, + ) + + out = netcat_out.getvalue() + + assert len(out) > 100 + + log.info('Received', out) + + yield + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run some s3-tests suite against rgw, verify opslog socket returns data + + Must restrict testing to a particular client:: + + tasks: + - ceph: + - rgw: [client.0] + - s3tests: [client.0] + + To pass extra arguments to nose (e.g. to run a certain test):: + + tasks: + - ceph: + - rgw: [client.0] + - s3tests: + client.0: + extra_args: ['test_s3:test_object_acl_grand_public_read'] + client.1: + extra_args: ['--exclude', 'test_100_continue'] + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task s3tests only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + overrides = ctx.config.get('overrides', {}) + # merge each client section, not the top level. + for (client, cconf) in config.iteritems(): + teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {})) + + log.debug('config is %s', config) + + s3tests_conf = {} + for client in clients: + s3tests_conf[client] = ConfigObj( + indent_type='', + infile={ + 'DEFAULT': + { + 'port' : 7280, + 'is_secure' : 'no', + }, + 'fixtures' : {}, + 's3 main' : {}, + 's3 alt' : {}, + } + ) + + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: create_users(ctx=ctx, config=dict( + clients=clients, + s3tests_conf=s3tests_conf, + )), + lambda: configure(ctx=ctx, config=dict( + clients=config, + s3tests_conf=s3tests_conf, + )), + lambda: run_tests(ctx=ctx, config=config), + ): + yield diff --git a/qa/tasks/s3readwrite.py b/qa/tasks/s3readwrite.py new file mode 100644 index 00000000000..9f1507ef816 --- /dev/null +++ b/qa/tasks/s3readwrite.py @@ -0,0 +1,346 @@ +""" +Run rgw s3 readwite tests +""" +from cStringIO import StringIO +import base64 +import contextlib +import logging +import os +import random +import string +import yaml + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.config import config as teuth_config +from teuthology.orchestra import run +from teuthology.orchestra.connection import split_user + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def download(ctx, config): + """ + Download the s3 tests from the git builder. + Remove downloaded s3 file upon exit. + + The context passed in should be identical to the context + passed in to the main task. + """ + assert isinstance(config, dict) + log.info('Downloading s3-tests...') + testdir = teuthology.get_testdir(ctx) + for (client, cconf) in config.items(): + branch = cconf.get('force-branch', None) + if not branch: + branch = cconf.get('branch', 'master') + sha1 = cconf.get('sha1') + ctx.cluster.only(client).run( + args=[ + 'git', 'clone', + '-b', branch, + teuth_config.ceph_git_base_url + 's3-tests.git', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + if sha1 is not None: + ctx.cluster.only(client).run( + args=[ + 'cd', '{tdir}/s3-tests'.format(tdir=testdir), + run.Raw('&&'), + 'git', 'reset', '--hard', sha1, + ], + ) + try: + yield + finally: + log.info('Removing s3-tests...') + testdir = teuthology.get_testdir(ctx) + for client in config: + ctx.cluster.only(client).run( + args=[ + 'rm', + '-rf', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + + +def _config_user(s3tests_conf, section, user): + """ + Configure users for this section by stashing away keys, ids, and + email addresses. + """ + s3tests_conf[section].setdefault('user_id', user) + s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) + s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) + s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20))) + s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40))) + +@contextlib.contextmanager +def create_users(ctx, config): + """ + Create a default s3 user. + """ + assert isinstance(config, dict) + log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) + users = {'s3': 'foo'} + cached_client_user_names = dict() + for client in config['clients']: + cached_client_user_names[client] = dict() + s3tests_conf = config['s3tests_conf'][client] + s3tests_conf.setdefault('readwrite', {}) + s3tests_conf['readwrite'].setdefault('bucket', 'rwtest-' + client + '-{random}-') + s3tests_conf['readwrite'].setdefault('readers', 10) + s3tests_conf['readwrite'].setdefault('writers', 3) + s3tests_conf['readwrite'].setdefault('duration', 300) + s3tests_conf['readwrite'].setdefault('files', {}) + rwconf = s3tests_conf['readwrite'] + rwconf['files'].setdefault('num', 10) + rwconf['files'].setdefault('size', 2000) + rwconf['files'].setdefault('stddev', 500) + for section, user in users.iteritems(): + _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) + log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'], + client=client)) + + # stash the 'delete_user' flag along with user name for easier cleanup + delete_this_user = True + if 'delete_user' in s3tests_conf['s3']: + delete_this_user = s3tests_conf['s3']['delete_user'] + log.debug('delete_user set to {flag} for {client}'.format(flag=delete_this_user, client=client)) + cached_client_user_names[client][section+user] = (s3tests_conf[section]['user_id'], delete_this_user) + + # skip actual user creation if the create_user flag is set to false for this client + if 'create_user' in s3tests_conf['s3'] and s3tests_conf['s3']['create_user'] == False: + log.debug('create_user set to False, skipping user creation for {client}'.format(client=client)) + continue + else: + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'create', + '--uid', s3tests_conf[section]['user_id'], + '--display-name', s3tests_conf[section]['display_name'], + '--access-key', s3tests_conf[section]['access_key'], + '--secret', s3tests_conf[section]['secret_key'], + '--email', s3tests_conf[section]['email'], + ], + ) + try: + yield + finally: + for client in config['clients']: + for section, user in users.iteritems(): + #uid = '{user}.{client}'.format(user=user, client=client) + real_uid, delete_this_user = cached_client_user_names[client][section+user] + if delete_this_user: + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'rm', + '--uid', real_uid, + '--purge-data', + ], + ) + else: + log.debug('skipping delete for user {uid} on {client}'.format(uid=real_uid, client=client)) + +@contextlib.contextmanager +def configure(ctx, config): + """ + Configure the s3-tests. This includes the running of the + bootstrap code and the updating of local conf files. + """ + assert isinstance(config, dict) + log.info('Configuring s3-readwrite-tests...') + for client, properties in config['clients'].iteritems(): + s3tests_conf = config['s3tests_conf'][client] + if properties is not None and 'rgw_server' in properties: + host = None + for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + log.info('roles: ' + str(roles)) + log.info('target: ' + str(target)) + if properties['rgw_server'] in roles: + _, host = split_user(target) + assert host is not None, "Invalid client specified as the rgw_server" + s3tests_conf['s3']['host'] = host + else: + s3tests_conf['s3']['host'] = 'localhost' + + def_conf = s3tests_conf['DEFAULT'] + s3tests_conf['s3'].setdefault('port', def_conf['port']) + s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure']) + + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'cd', + '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)), + run.Raw('&&'), + './bootstrap', + ], + ) + conf_fp = StringIO() + conf = dict( + s3=s3tests_conf['s3'], + readwrite=s3tests_conf['readwrite'], + ) + yaml.safe_dump(conf, conf_fp, default_flow_style=False) + teuthology.write_file( + remote=remote, + path='{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=teuthology.get_testdir(ctx), client=client), + data=conf_fp.getvalue(), + ) + yield + + +@contextlib.contextmanager +def run_tests(ctx, config): + """ + Run the s3readwrite tests after everything is set up. + + :param ctx: Context passed to task + :param config: specific configuration information + """ + assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.iteritems(): + (remote,) = ctx.cluster.only(client).remotes.keys() + conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client)) + args = [ + '{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir), + ] + if client_config is not None and 'extra_args' in client_config: + args.extend(client_config['extra_args']) + + ctx.cluster.only(client).run( + args=args, + stdin=conf, + ) + yield + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run the s3tests-test-readwrite suite against rgw. + + To run all tests on all clients:: + + tasks: + - ceph: + - rgw: + - s3readwrite: + + To restrict testing to particular clients:: + + tasks: + - ceph: + - rgw: [client.0] + - s3readwrite: [client.0] + + To run against a server on client.1:: + + tasks: + - ceph: + - rgw: [client.1] + - s3readwrite: + client.0: + rgw_server: client.1 + + To pass extra test arguments + + tasks: + - ceph: + - rgw: [client.0] + - s3readwrite: + client.0: + readwrite: + bucket: mybucket + readers: 10 + writers: 3 + duration: 600 + files: + num: 10 + size: 2000 + stddev: 500 + client.1: + ... + + To override s3 configuration + + tasks: + - ceph: + - rgw: [client.0] + - s3readwrite: + client.0: + s3: + user_id: myuserid + display_name: myname + email: my@email + access_key: myaccesskey + secret_key: mysecretkey + + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task s3tests only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + overrides = ctx.config.get('overrides', {}) + # merge each client section, not the top level. + for client in config.iterkeys(): + if not config[client]: + config[client] = {} + teuthology.deep_merge(config[client], overrides.get('s3readwrite', {})) + + log.debug('in s3readwrite, config is %s', config) + + s3tests_conf = {} + for client in clients: + if config[client] is None: + config[client] = {} + config[client].setdefault('s3', {}) + config[client].setdefault('readwrite', {}) + + s3tests_conf[client] = ({ + 'DEFAULT': + { + 'port' : 7280, + 'is_secure' : False, + }, + 'readwrite' : config[client]['readwrite'], + 's3' : config[client]['s3'], + }) + + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: create_users(ctx=ctx, config=dict( + clients=clients, + s3tests_conf=s3tests_conf, + )), + lambda: configure(ctx=ctx, config=dict( + clients=config, + s3tests_conf=s3tests_conf, + )), + lambda: run_tests(ctx=ctx, config=config), + ): + pass + yield diff --git a/qa/tasks/s3roundtrip.py b/qa/tasks/s3roundtrip.py new file mode 100644 index 00000000000..4c17144dbae --- /dev/null +++ b/qa/tasks/s3roundtrip.py @@ -0,0 +1,302 @@ +""" +Run rgw roundtrip message tests +""" +from cStringIO import StringIO +import base64 +import contextlib +import logging +import os +import random +import string +import yaml + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.config import config as teuth_config +from teuthology.orchestra import run +from teuthology.orchestra.connection import split_user + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def download(ctx, config): + """ + Download the s3 tests from the git builder. + Remove downloaded s3 file upon exit. + + The context passed in should be identical to the context + passed in to the main task. + """ + assert isinstance(config, list) + log.info('Downloading s3-tests...') + testdir = teuthology.get_testdir(ctx) + for client in config: + ctx.cluster.only(client).run( + args=[ + 'git', 'clone', + teuth_config.ceph_git_base_url + 's3-tests.git', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + try: + yield + finally: + log.info('Removing s3-tests...') + for client in config: + ctx.cluster.only(client).run( + args=[ + 'rm', + '-rf', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + +def _config_user(s3tests_conf, section, user): + """ + Configure users for this section by stashing away keys, ids, and + email addresses. + """ + s3tests_conf[section].setdefault('user_id', user) + s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) + s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) + s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20))) + s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40))) + +@contextlib.contextmanager +def create_users(ctx, config): + """ + Create a default s3 user. + """ + assert isinstance(config, dict) + log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) + users = {'s3': 'foo'} + for client in config['clients']: + s3tests_conf = config['s3tests_conf'][client] + s3tests_conf.setdefault('roundtrip', {}) + s3tests_conf['roundtrip'].setdefault('bucket', 'rttest-' + client + '-{random}-') + s3tests_conf['roundtrip'].setdefault('readers', 10) + s3tests_conf['roundtrip'].setdefault('writers', 3) + s3tests_conf['roundtrip'].setdefault('duration', 300) + s3tests_conf['roundtrip'].setdefault('files', {}) + rtconf = s3tests_conf['roundtrip'] + rtconf['files'].setdefault('num', 10) + rtconf['files'].setdefault('size', 2000) + rtconf['files'].setdefault('stddev', 500) + for section, user in [('s3', 'foo')]: + _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'create', + '--uid', s3tests_conf[section]['user_id'], + '--display-name', s3tests_conf[section]['display_name'], + '--access-key', s3tests_conf[section]['access_key'], + '--secret', s3tests_conf[section]['secret_key'], + '--email', s3tests_conf[section]['email'], + ], + ) + try: + yield + finally: + for client in config['clients']: + for user in users.itervalues(): + uid = '{user}.{client}'.format(user=user, client=client) + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'rm', + '--uid', uid, + '--purge-data', + ], + ) + +@contextlib.contextmanager +def configure(ctx, config): + """ + Configure the s3-tests. This includes the running of the + bootstrap code and the updating of local conf files. + """ + assert isinstance(config, dict) + log.info('Configuring s3-roundtrip-tests...') + testdir = teuthology.get_testdir(ctx) + for client, properties in config['clients'].iteritems(): + s3tests_conf = config['s3tests_conf'][client] + if properties is not None and 'rgw_server' in properties: + host = None + for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + log.info('roles: ' + str(roles)) + log.info('target: ' + str(target)) + if properties['rgw_server'] in roles: + _, host = split_user(target) + assert host is not None, "Invalid client specified as the rgw_server" + s3tests_conf['s3']['host'] = host + else: + s3tests_conf['s3']['host'] = 'localhost' + + def_conf = s3tests_conf['DEFAULT'] + s3tests_conf['s3'].setdefault('port', def_conf['port']) + s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure']) + + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'cd', + '{tdir}/s3-tests'.format(tdir=testdir), + run.Raw('&&'), + './bootstrap', + ], + ) + conf_fp = StringIO() + conf = dict( + s3=s3tests_conf['s3'], + roundtrip=s3tests_conf['roundtrip'], + ) + yaml.safe_dump(conf, conf_fp, default_flow_style=False) + teuthology.write_file( + remote=remote, + path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client), + data=conf_fp.getvalue(), + ) + yield + + +@contextlib.contextmanager +def run_tests(ctx, config): + """ + Run the s3 roundtrip after everything is set up. + + :param ctx: Context passed to task + :param config: specific configuration information + """ + assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.iteritems(): + (remote,) = ctx.cluster.only(client).remotes.keys() + conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client)) + args = [ + '{tdir}/s3-tests/virtualenv/bin/s3tests-test-roundtrip'.format(tdir=testdir), + ] + if client_config is not None and 'extra_args' in client_config: + args.extend(client_config['extra_args']) + + ctx.cluster.only(client).run( + args=args, + stdin=conf, + ) + yield + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run the s3tests-test-roundtrip suite against rgw. + + To run all tests on all clients:: + + tasks: + - ceph: + - rgw: + - s3roundtrip: + + To restrict testing to particular clients:: + + tasks: + - ceph: + - rgw: [client.0] + - s3roundtrip: [client.0] + + To run against a server on client.1:: + + tasks: + - ceph: + - rgw: [client.1] + - s3roundtrip: + client.0: + rgw_server: client.1 + + To pass extra test arguments + + tasks: + - ceph: + - rgw: [client.0] + - s3roundtrip: + client.0: + roundtrip: + bucket: mybucket + readers: 10 + writers: 3 + duration: 600 + files: + num: 10 + size: 2000 + stddev: 500 + client.1: + ... + + To override s3 configuration + + tasks: + - ceph: + - rgw: [client.0] + - s3roundtrip: + client.0: + s3: + user_id: myuserid + display_name: myname + email: my@email + access_key: myaccesskey + secret_key: mysecretkey + + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task s3tests only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + s3tests_conf = {} + for client in clients: + if config[client] is None: + config[client] = {} + config[client].setdefault('s3', {}) + config[client].setdefault('roundtrip', {}) + + s3tests_conf[client] = ({ + 'DEFAULT': + { + 'port' : 7280, + 'is_secure' : False, + }, + 'roundtrip' : config[client]['roundtrip'], + 's3' : config[client]['s3'], + }) + + with contextutil.nested( + lambda: download(ctx=ctx, config=clients), + lambda: create_users(ctx=ctx, config=dict( + clients=clients, + s3tests_conf=s3tests_conf, + )), + lambda: configure(ctx=ctx, config=dict( + clients=config, + s3tests_conf=s3tests_conf, + )), + lambda: run_tests(ctx=ctx, config=config), + ): + pass + yield diff --git a/qa/tasks/s3tests.py b/qa/tasks/s3tests.py new file mode 100644 index 00000000000..d0f6431dd5f --- /dev/null +++ b/qa/tasks/s3tests.py @@ -0,0 +1,442 @@ +""" +Run a set of s3 tests on rgw. +""" +from cStringIO import StringIO +from configobj import ConfigObj +import base64 +import contextlib +import logging +import os +import random +import string + +import util.rgw as rgw_utils + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.config import config as teuth_config +from teuthology.orchestra import run +from teuthology.orchestra.connection import split_user + +log = logging.getLogger(__name__) + +def extract_sync_client_data(ctx, client_name): + """ + Extract synchronized client rgw zone and rgw region information. + + :param ctx: Context passed to the s3tests task + :param name: Name of client that we are synching with + """ + return_region_name = None + return_dict = None + client = ctx.ceph.conf.get(client_name, None) + if client: + current_client_zone = client.get('rgw zone', None) + if current_client_zone: + (endpoint_host, endpoint_port) = ctx.rgw.role_endpoints.get(client_name, (None, None)) + # pull out the radosgw_agent stuff + regions = ctx.rgw.regions + for region in regions: + log.debug('jbuck, region is {region}'.format(region=region)) + region_data = ctx.rgw.regions[region] + log.debug('region data is {region}'.format(region=region_data)) + zones = region_data['zones'] + for zone in zones: + if current_client_zone in zone: + return_region_name = region + return_dict = dict() + return_dict['api_name'] = region_data['api name'] + return_dict['is_master'] = region_data['is master'] + return_dict['port'] = endpoint_port + return_dict['host'] = endpoint_host + + # The s3tests expect the sync_agent_[addr|port} to be + # set on the non-master node for some reason + if not region_data['is master']: + (rgwagent_host, rgwagent_port) = ctx.radosgw_agent.endpoint + (return_dict['sync_agent_addr'], _) = ctx.rgw.role_endpoints[rgwagent_host] + return_dict['sync_agent_port'] = rgwagent_port + + else: #if client_zone: + log.debug('No zone info for {host}'.format(host=client_name)) + else: # if client + log.debug('No ceph conf for {host}'.format(host=client_name)) + + return return_region_name, return_dict + +def update_conf_with_region_info(ctx, config, s3tests_conf): + """ + Scan for a client (passed in s3tests_conf) that is an s3agent + with which we can sync. Update information in local conf file + if such a client is found. + """ + for key in s3tests_conf.keys(): + # we'll assume that there's only one sync relationship (source / destination) with client.X + # as the key for now + + # Iterate through all of the radosgw_agent (rgwa) configs and see if a + # given client is involved in a relationship. + # If a given client isn't, skip it + this_client_in_rgwa_config = False + for rgwa in ctx.radosgw_agent.config.keys(): + rgwa_data = ctx.radosgw_agent.config[rgwa] + + if key in rgwa_data['src'] or key in rgwa_data['dest']: + this_client_in_rgwa_config = True + log.debug('{client} is in an radosgw-agent sync relationship'.format(client=key)) + radosgw_sync_data = ctx.radosgw_agent.config[key] + break + if not this_client_in_rgwa_config: + log.debug('{client} is NOT in an radosgw-agent sync relationship'.format(client=key)) + continue + + source_client = radosgw_sync_data['src'] + dest_client = radosgw_sync_data['dest'] + + # #xtract the pertinent info for the source side + source_region_name, source_region_dict = extract_sync_client_data(ctx, source_client) + log.debug('\t{key} source_region {source_region} source_dict {source_dict}'.format + (key=key,source_region=source_region_name,source_dict=source_region_dict)) + + # The source *should* be the master region, but test anyway and then set it as the default region + if source_region_dict['is_master']: + log.debug('Setting {region} as default_region'.format(region=source_region_name)) + s3tests_conf[key]['fixtures'].setdefault('default_region', source_region_name) + + # Extract the pertinent info for the destination side + dest_region_name, dest_region_dict = extract_sync_client_data(ctx, dest_client) + log.debug('\t{key} dest_region {dest_region} dest_dict {dest_dict}'.format + (key=key,dest_region=dest_region_name,dest_dict=dest_region_dict)) + + # now add these regions to the s3tests_conf object + s3tests_conf[key]['region {region_name}'.format(region_name=source_region_name)] = source_region_dict + s3tests_conf[key]['region {region_name}'.format(region_name=dest_region_name)] = dest_region_dict + +@contextlib.contextmanager +def download(ctx, config): + """ + Download the s3 tests from the git builder. + Remove downloaded s3 file upon exit. + + The context passed in should be identical to the context + passed in to the main task. + """ + assert isinstance(config, dict) + log.info('Downloading s3-tests...') + testdir = teuthology.get_testdir(ctx) + for (client, cconf) in config.items(): + branch = cconf.get('force-branch', None) + if not branch: + ceph_branch = ctx.config.get('branch') + suite_branch = ctx.config.get('suite_branch', ceph_branch) + branch = cconf.get('branch', suite_branch) + if not branch: + raise ValueError( + "Could not determine what branch to use for s3tests!") + else: + log.info("Using branch '%s' for s3tests", branch) + sha1 = cconf.get('sha1') + ctx.cluster.only(client).run( + args=[ + 'git', 'clone', + '-b', branch, + teuth_config.ceph_git_base_url + 's3-tests.git', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + if sha1 is not None: + ctx.cluster.only(client).run( + args=[ + 'cd', '{tdir}/s3-tests'.format(tdir=testdir), + run.Raw('&&'), + 'git', 'reset', '--hard', sha1, + ], + ) + try: + yield + finally: + log.info('Removing s3-tests...') + testdir = teuthology.get_testdir(ctx) + for client in config: + ctx.cluster.only(client).run( + args=[ + 'rm', + '-rf', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + + +def _config_user(s3tests_conf, section, user): + """ + Configure users for this section by stashing away keys, ids, and + email addresses. + """ + s3tests_conf[section].setdefault('user_id', user) + s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) + s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) + s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20))) + s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40))) + + +@contextlib.contextmanager +def create_users(ctx, config): + """ + Create a main and an alternate s3 user. + """ + assert isinstance(config, dict) + log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) + users = {'s3 main': 'foo', 's3 alt': 'bar'} + for client in config['clients']: + s3tests_conf = config['s3tests_conf'][client] + s3tests_conf.setdefault('fixtures', {}) + s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-') + for section, user in users.iteritems(): + _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) + log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client)) + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'create', + '--uid', s3tests_conf[section]['user_id'], + '--display-name', s3tests_conf[section]['display_name'], + '--access-key', s3tests_conf[section]['access_key'], + '--secret', s3tests_conf[section]['secret_key'], + '--email', s3tests_conf[section]['email'], + ], + ) + try: + yield + finally: + for client in config['clients']: + for user in users.itervalues(): + uid = '{user}.{client}'.format(user=user, client=client) + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'rm', + '--uid', uid, + '--purge-data', + ], + ) + + +@contextlib.contextmanager +def configure(ctx, config): + """ + Configure the s3-tests. This includes the running of the + bootstrap code and the updating of local conf files. + """ + assert isinstance(config, dict) + log.info('Configuring s3-tests...') + testdir = teuthology.get_testdir(ctx) + for client, properties in config['clients'].iteritems(): + s3tests_conf = config['s3tests_conf'][client] + if properties is not None and 'rgw_server' in properties: + host = None + for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + log.info('roles: ' + str(roles)) + log.info('target: ' + str(target)) + if properties['rgw_server'] in roles: + _, host = split_user(target) + assert host is not None, "Invalid client specified as the rgw_server" + s3tests_conf['DEFAULT']['host'] = host + else: + s3tests_conf['DEFAULT']['host'] = 'localhost' + + if properties is not None and 'slow_backend' in properties: + s3tests_conf['fixtures']['slow backend'] = properties['slow_backend'] + + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'cd', + '{tdir}/s3-tests'.format(tdir=testdir), + run.Raw('&&'), + './bootstrap', + ], + ) + conf_fp = StringIO() + s3tests_conf.write(conf_fp) + teuthology.write_file( + remote=remote, + path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), + data=conf_fp.getvalue(), + ) + + log.info('Configuring boto...') + boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template') + for client, properties in config['clients'].iteritems(): + with file(boto_src, 'rb') as f: + (remote,) = ctx.cluster.only(client).remotes.keys() + conf = f.read().format( + idle_timeout=config.get('idle_timeout', 30) + ) + teuthology.write_file( + remote=remote, + path='{tdir}/boto.cfg'.format(tdir=testdir), + data=conf, + ) + + try: + yield + + finally: + log.info('Cleaning up boto...') + for client, properties in config['clients'].iteritems(): + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'rm', + '{tdir}/boto.cfg'.format(tdir=testdir), + ], + ) + +@contextlib.contextmanager +def sync_users(ctx, config): + """ + Sync this user. + """ + assert isinstance(config, dict) + # do a full sync if this is a multi-region test + if rgw_utils.multi_region_enabled(ctx): + log.debug('Doing a full sync') + rgw_utils.radosgw_agent_sync_all(ctx) + else: + log.debug('Not a multi-region config; skipping the metadata sync') + + yield + +@contextlib.contextmanager +def run_tests(ctx, config): + """ + Run the s3tests after everything is set up. + + :param ctx: Context passed to task + :param config: specific configuration information + """ + assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.iteritems(): + args = [ + 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), + 'BOTO_CONFIG={tdir}/boto.cfg'.format(tdir=testdir), + '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir), + '-w', + '{tdir}/s3-tests'.format(tdir=testdir), + '-v', + '-a', '!fails_on_rgw', + ] + if client_config is not None and 'extra_args' in client_config: + args.extend(client_config['extra_args']) + + ctx.cluster.only(client).run( + args=args, + label="s3 tests against rgw" + ) + yield + +@contextlib.contextmanager +def task(ctx, config): + """ + Run the s3-tests suite against rgw. + + To run all tests on all clients:: + + tasks: + - ceph: + - rgw: + - s3tests: + + To restrict testing to particular clients:: + + tasks: + - ceph: + - rgw: [client.0] + - s3tests: [client.0] + + To run against a server on client.1 and increase the boto timeout to 10m:: + + tasks: + - ceph: + - rgw: [client.1] + - s3tests: + client.0: + rgw_server: client.1 + idle_timeout: 600 + + To pass extra arguments to nose (e.g. to run a certain test):: + + tasks: + - ceph: + - rgw: [client.0] + - s3tests: + client.0: + extra_args: ['test_s3:test_object_acl_grand_public_read'] + client.1: + extra_args: ['--exclude', 'test_100_continue'] + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task s3tests only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + overrides = ctx.config.get('overrides', {}) + # merge each client section, not the top level. + for client in config.iterkeys(): + if not config[client]: + config[client] = {} + teuthology.deep_merge(config[client], overrides.get('s3tests', {})) + + log.debug('s3tests config is %s', config) + + s3tests_conf = {} + for client in clients: + s3tests_conf[client] = ConfigObj( + indent_type='', + infile={ + 'DEFAULT': + { + 'port' : 7280, + 'is_secure' : 'no', + }, + 'fixtures' : {}, + 's3 main' : {}, + 's3 alt' : {}, + } + ) + + # Only attempt to add in the region info if there's a radosgw_agent configured + if hasattr(ctx, 'radosgw_agent'): + update_conf_with_region_info(ctx, config, s3tests_conf) + + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: create_users(ctx=ctx, config=dict( + clients=clients, + s3tests_conf=s3tests_conf, + )), + lambda: sync_users(ctx=ctx, config=config), + lambda: configure(ctx=ctx, config=dict( + clients=config, + s3tests_conf=s3tests_conf, + )), + lambda: run_tests(ctx=ctx, config=config), + ): + pass + yield diff --git a/qa/tasks/samba.py b/qa/tasks/samba.py new file mode 100644 index 00000000000..d79bb2a05e9 --- /dev/null +++ b/qa/tasks/samba.py @@ -0,0 +1,243 @@ +""" +Samba +""" +import contextlib +import logging +import sys + +from teuthology import misc as teuthology +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def get_sambas(ctx, roles): + """ + Scan for roles that are samba. Yield the id of the the samba role + (samba.0, samba.1...) and the associated remote site + + :param ctx: Context + :param roles: roles for this test (extracted from yaml files) + """ + for role in roles: + assert isinstance(role, basestring) + PREFIX = 'samba.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + yield (id_, remote) + +@contextlib.contextmanager +def task(ctx, config): + """ + Setup samba smbd with ceph vfs module. This task assumes the samba + package has already been installed via the install task. + + The config is optional and defaults to starting samba on all nodes. + If a config is given, it is expected to be a list of + samba nodes to start smbd servers on. + + Example that starts smbd on all samba nodes:: + + tasks: + - install: + - install: + project: samba + extra_packages: ['samba'] + - ceph: + - samba: + - interactive: + + Example that starts smbd on just one of the samba nodes and cifs on the other:: + + tasks: + - samba: [samba.0] + - cifs: [samba.1] + + An optional backend can be specified, and requires a path which smbd will + use as the backend storage location: + + roles: + - [osd.0, osd.1, osd.2, mon.0, mon.1, mon.2, mds.a] + - [client.0, samba.0] + + tasks: + - ceph: + - ceph-fuse: [client.0] + - samba: + samba.0: + cephfuse: "{testdir}/mnt.0" + + This mounts ceph to {testdir}/mnt.0 using fuse, and starts smbd with + a UNC of //localhost/cephfuse. Access through that UNC will be on + the ceph fuse mount point. + + If no arguments are specified in the samba + role, the default behavior is to enable the ceph UNC //localhost/ceph + and use the ceph vfs module as the smbd backend. + + :param ctx: Context + :param config: Configuration + """ + log.info("Setting up smbd with ceph vfs...") + assert config is None or isinstance(config, list) or isinstance(config, dict), \ + "task samba got invalid config" + + if config is None: + config = dict(('samba.{id}'.format(id=id_), None) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')) + elif isinstance(config, list): + config = dict((name, None) for name in config) + + samba_servers = list(get_sambas(ctx=ctx, roles=config.keys())) + + testdir = teuthology.get_testdir(ctx) + + from tasks.ceph import DaemonGroup + if not hasattr(ctx, 'daemons'): + ctx.daemons = DaemonGroup() + + for id_, remote in samba_servers: + + rolestr = "samba.{id_}".format(id_=id_) + + confextras = """vfs objects = ceph + ceph:config_file = /etc/ceph/ceph.conf""" + + unc = "ceph" + backend = "/" + + if config[rolestr] is not None: + # verify that there's just one parameter in role + if len(config[rolestr]) != 1: + log.error("samba config for role samba.{id_} must have only one parameter".format(id_=id_)) + raise Exception('invalid config') + confextras = "" + (unc, backendstr) = config[rolestr].items()[0] + backend = backendstr.format(testdir=testdir) + + # on first samba role, set ownership and permissions of ceph root + # so that samba tests succeed + if config[rolestr] is None and id_ == samba_servers[0][0]: + remote.run( + args=[ + 'mkdir', '-p', '/tmp/cmnt', run.Raw('&&'), + 'sudo', 'ceph-fuse', '/tmp/cmnt', run.Raw('&&'), + 'sudo', 'chown', 'ubuntu:ubuntu', '/tmp/cmnt/', run.Raw('&&'), + 'sudo', 'chmod', '1777', '/tmp/cmnt/', run.Raw('&&'), + 'sudo', 'umount', '/tmp/cmnt/', run.Raw('&&'), + 'rm', '-rf', '/tmp/cmnt', + ], + ) + else: + remote.run( + args=[ + 'sudo', 'chown', 'ubuntu:ubuntu', backend, run.Raw('&&'), + 'sudo', 'chmod', '1777', backend, + ], + ) + + teuthology.sudo_write_file(remote, "/usr/local/samba/etc/smb.conf", """ +[global] + workgroup = WORKGROUP + netbios name = DOMAIN + +[{unc}] + path = {backend} + {extras} + writeable = yes + valid users = ubuntu +""".format(extras=confextras, unc=unc, backend=backend)) + + # create ubuntu user + remote.run( + args=[ + 'sudo', '/usr/local/samba/bin/smbpasswd', '-e', 'ubuntu', + run.Raw('||'), + 'printf', run.Raw('"ubuntu\nubuntu\n"'), + run.Raw('|'), + 'sudo', '/usr/local/samba/bin/smbpasswd', '-s', '-a', 'ubuntu' + ]) + + smbd_cmd = [ + 'sudo', + 'daemon-helper', + 'term', + 'nostdin', + '/usr/local/samba/sbin/smbd', + '-F', + ] + ctx.daemons.add_daemon(remote, 'smbd', id_, + args=smbd_cmd, + logger=log.getChild("smbd.{id_}".format(id_=id_)), + stdin=run.PIPE, + wait=False, + ) + + # let smbd initialize, probably a better way... + import time + seconds_to_sleep = 100 + log.info('Sleeping for %s seconds...' % seconds_to_sleep) + time.sleep(seconds_to_sleep) + log.info('Sleeping stopped...') + + try: + yield + finally: + log.info('Stopping smbd processes...') + exc_info = (None, None, None) + for d in ctx.daemons.iter_daemons_of_role('smbd'): + try: + d.stop() + except (run.CommandFailedError, + run.CommandCrashedError, + run.ConnectionLostError): + exc_info = sys.exc_info() + log.exception('Saw exception from %s.%s', d.role, d.id_) + if exc_info != (None, None, None): + raise exc_info[0], exc_info[1], exc_info[2] + + for id_, remote in samba_servers: + remote.run( + args=[ + 'sudo', + 'rm', '-rf', + '/usr/local/samba/etc/smb.conf', + '/usr/local/samba/private/*', + '/usr/local/samba/var/run/', + '/usr/local/samba/var/locks', + '/usr/local/samba/var/lock', + ], + ) + # make sure daemons are gone + try: + remote.run( + args=[ + 'while', + 'sudo', 'killall', '-9', 'smbd', + run.Raw(';'), + 'do', 'sleep', '1', + run.Raw(';'), + 'done', + ], + ) + + remote.run( + args=[ + 'sudo', + 'lsof', + backend, + ], + check_status=False + ) + remote.run( + args=[ + 'sudo', + 'fuser', + '-M', + backend, + ], + check_status=False + ) + except Exception: + log.exception("Saw exception") + pass diff --git a/qa/tasks/scrub.py b/qa/tasks/scrub.py new file mode 100644 index 00000000000..7a25300a677 --- /dev/null +++ b/qa/tasks/scrub.py @@ -0,0 +1,117 @@ +""" +Scrub osds +""" +import contextlib +import gevent +import logging +import random +import time + +import ceph_manager +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run scrub periodically. Randomly chooses an OSD to scrub. + + The config should be as follows: + + scrub: + frequency: + deep: + + example: + + tasks: + - ceph: + - scrub: + frequency: 30 + deep: 0 + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'scrub task only accepts a dict for configuration' + + log.info('Beginning scrub...') + + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + while len(manager.get_osd_status()['up']) < num_osds: + manager.sleep(10) + + scrub_proc = Scrubber( + manager, + config, + ) + try: + yield + finally: + log.info('joining scrub') + scrub_proc.do_join() + +class Scrubber: + """ + Scrubbing is actually performed during initialzation + """ + def __init__(self, manager, config): + """ + Spawn scrubbing thread upon completion. + """ + self.ceph_manager = manager + self.ceph_manager.wait_for_clean() + + osd_status = self.ceph_manager.get_osd_status() + self.osds = osd_status['up'] + + self.config = config + if self.config is None: + self.config = dict() + + else: + def tmp(x): + """Local display""" + print x + self.log = tmp + + self.stopping = False + + log.info("spawning thread") + + self.thread = gevent.spawn(self.do_scrub) + + def do_join(self): + """Scrubbing thread finished""" + self.stopping = True + self.thread.get() + + def do_scrub(self): + """Perform the scrub operation""" + frequency = self.config.get("frequency", 30) + deep = self.config.get("deep", 0) + + log.info("stopping %s" % self.stopping) + + while not self.stopping: + osd = str(random.choice(self.osds)) + + if deep: + cmd = 'deep-scrub' + else: + cmd = 'scrub' + + log.info('%sbing %s' % (cmd, osd)) + self.ceph_manager.raw_cluster_cmd('osd', cmd, osd) + + time.sleep(frequency) diff --git a/qa/tasks/scrub_test.py b/qa/tasks/scrub_test.py new file mode 100644 index 00000000000..3443ae9f45e --- /dev/null +++ b/qa/tasks/scrub_test.py @@ -0,0 +1,199 @@ +"""Scrub testing""" +from cStringIO import StringIO + +import logging +import os +import time + +import ceph_manager +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test [deep] scrub + + tasks: + - chef: + - install: + - ceph: + log-whitelist: + - '!= known digest' + - '!= known omap_digest' + - deep-scrub 0 missing, 1 inconsistent objects + - deep-scrub 1 errors + - repair 0 missing, 1 inconsistent objects + - repair 1 errors, 1 fixed + - scrub_test: + + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'scrub_test task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + log.info('num_osds is %s' % num_osds) + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < num_osds: + time.sleep(10) + + for i in range(num_osds): + manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'flush_pg_stats') + manager.wait_for_clean() + + # write some data + p = manager.do_rados(mon, ['-p', 'rbd', 'bench', '--no-cleanup', '1', 'write', '-b', '4096']) + err = p.exitstatus + log.info('err is %d' % err) + + # wait for some PG to have data that we can mess with + victim = None + osd = None + while victim is None: + stats = manager.get_pg_stats() + for pg in stats: + size = pg['stat_sum']['num_bytes'] + if size > 0: + victim = pg['pgid'] + osd = pg['acting'][0] + break + + if victim is None: + time.sleep(3) + + log.info('messing with PG %s on osd %d' % (victim, osd)) + + (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys() + data_path = os.path.join( + '/var/lib/ceph/osd', + 'ceph-{id}'.format(id=osd), + 'current', + '{pg}_head'.format(pg=victim) + ) + + # fuzz time + ls_fp = StringIO() + osd_remote.run( + args=[ 'ls', data_path ], + stdout=ls_fp, + ) + ls_out = ls_fp.getvalue() + ls_fp.close() + + # find an object file we can mess with + osdfilename = None + for line in ls_out.split('\n'): + if 'object' in line: + osdfilename = line + break + assert osdfilename is not None + + # Get actual object name from osd stored filename + tmp=osdfilename.split('__') + objname=tmp[0] + objname=objname.replace('\u', '_') + log.info('fuzzing %s' % objname) + + # put a single \0 at the beginning of the file + osd_remote.run( + args=[ 'sudo', 'dd', + 'if=/dev/zero', + 'of=%s' % os.path.join(data_path, osdfilename), + 'bs=1', 'count=1', 'conv=notrunc' + ] + ) + + # scrub, verify inconsistent + manager.raw_cluster_cmd('pg', 'deep-scrub', victim) + # Give deep-scrub a chance to start + time.sleep(60) + + while True: + stats = manager.get_single_pg_stats(victim) + state = stats['state'] + + # wait for the scrub to finish + if 'scrubbing' in state: + time.sleep(3) + continue + + inconsistent = stats['state'].find('+inconsistent') != -1 + assert inconsistent + break + + + # repair, verify no longer inconsistent + manager.raw_cluster_cmd('pg', 'repair', victim) + # Give repair a chance to start + time.sleep(60) + + while True: + stats = manager.get_single_pg_stats(victim) + state = stats['state'] + + # wait for the scrub to finish + if 'scrubbing' in state: + time.sleep(3) + continue + + inconsistent = stats['state'].find('+inconsistent') != -1 + assert not inconsistent + break + + # Test deep-scrub with various omap modifications + manager.do_rados(mon, ['-p', 'rbd', 'setomapval', objname, 'key', 'val']) + manager.do_rados(mon, ['-p', 'rbd', 'setomapheader', objname, 'hdr']) + + # Modify omap on specific osd + log.info('fuzzing omap of %s' % objname) + manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'key']); + manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname, 'badkey', 'badval']); + manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'badhdr']); + + # scrub, verify inconsistent + manager.raw_cluster_cmd('pg', 'deep-scrub', victim) + # Give deep-scrub a chance to start + time.sleep(60) + + while True: + stats = manager.get_single_pg_stats(victim) + state = stats['state'] + + # wait for the scrub to finish + if 'scrubbing' in state: + time.sleep(3) + continue + + inconsistent = stats['state'].find('+inconsistent') != -1 + assert inconsistent + break + + # repair, verify no longer inconsistent + manager.raw_cluster_cmd('pg', 'repair', victim) + # Give repair a chance to start + time.sleep(60) + + while True: + stats = manager.get_single_pg_stats(victim) + state = stats['state'] + + # wait for the scrub to finish + if 'scrubbing' in state: + time.sleep(3) + continue + + inconsistent = stats['state'].find('+inconsistent') != -1 + assert not inconsistent + break + + log.info('test successful!') diff --git a/qa/tasks/test/__init__.py b/qa/tasks/test/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/tasks/test/test_devstack.py b/qa/tasks/test/test_devstack.py new file mode 100644 index 00000000000..117b3076818 --- /dev/null +++ b/qa/tasks/test/test_devstack.py @@ -0,0 +1,48 @@ +from textwrap import dedent + +from .. import devstack + + +class TestDevstack(object): + def test_parse_os_table(self): + table_str = dedent(""" + +---------------------+--------------------------------------+ + | Property | Value | + +---------------------+--------------------------------------+ + | attachments | [] | + | availability_zone | nova | + | bootable | false | + | created_at | 2014-02-21T17:14:47.548361 | + | display_description | None | + | display_name | NAME | + | id | ffdbd1bb-60dc-4d95-acfe-88774c09ad3e | + | metadata | {} | + | size | 1 | + | snapshot_id | None | + | source_volid | None | + | status | creating | + | volume_type | None | + +---------------------+--------------------------------------+ + """).strip() + expected = { + 'Property': 'Value', + 'attachments': '[]', + 'availability_zone': 'nova', + 'bootable': 'false', + 'created_at': '2014-02-21T17:14:47.548361', + 'display_description': 'None', + 'display_name': 'NAME', + 'id': 'ffdbd1bb-60dc-4d95-acfe-88774c09ad3e', + 'metadata': '{}', + 'size': '1', + 'snapshot_id': 'None', + 'source_volid': 'None', + 'status': 'creating', + 'volume_type': 'None'} + + vol_info = devstack.parse_os_table(table_str) + assert vol_info == expected + + + + diff --git a/qa/tasks/tgt.py b/qa/tasks/tgt.py new file mode 100644 index 00000000000..c2b322e0829 --- /dev/null +++ b/qa/tasks/tgt.py @@ -0,0 +1,177 @@ +""" +Task to handle tgt + +Assumptions made: + The ceph-extras tgt package may need to get installed. + The open-iscsi package needs to get installed. +""" +import logging +import contextlib + +from teuthology import misc as teuthology +from teuthology import contextutil + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def start_tgt_remotes(ctx, start_tgtd): + """ + This subtask starts up a tgtd on the clients specified + """ + remotes = ctx.cluster.only(teuthology.is_type('client')).remotes + tgtd_list = [] + for rem, roles in remotes.iteritems(): + for _id in roles: + if _id in start_tgtd: + if not rem in tgtd_list: + tgtd_list.append(rem) + size = ctx.config.get('image_size', 10240) + rem.run( + args=[ + 'rbd', + 'create', + 'iscsi-image', + '--size', + str(size), + ]) + rem.run( + args=[ + 'sudo', + 'tgtadm', + '--lld', + 'iscsi', + '--mode', + 'target', + '--op', + 'new', + '--tid', + '1', + '--targetname', + 'rbd', + ]) + rem.run( + args=[ + 'sudo', + 'tgtadm', + '--lld', + 'iscsi', + '--mode', + 'logicalunit', + '--op', + 'new', + '--tid', + '1', + '--lun', + '1', + '--backing-store', + 'iscsi-image', + '--bstype', + 'rbd', + ]) + rem.run( + args=[ + 'sudo', + 'tgtadm', + '--lld', + 'iscsi', + '--op', + 'bind', + '--mode', + 'target', + '--tid', + '1', + '-I', + 'ALL', + ]) + try: + yield + + finally: + for rem in tgtd_list: + rem.run( + args=[ + 'sudo', + 'tgtadm', + '--lld', + 'iscsi', + '--mode', + 'target', + '--op', + 'delete', + '--force', + '--tid', + '1', + ]) + rem.run( + args=[ + 'rbd', + 'snap', + 'purge', + 'iscsi-image', + ]) + rem.run( + args=[ + 'sudo', + 'rbd', + 'rm', + 'iscsi-image', + ]) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Start up tgt. + + To start on on all clients:: + + tasks: + - ceph: + - tgt: + + To start on certain clients:: + + tasks: + - ceph: + - tgt: [client.0, client.3] + + or + + tasks: + - ceph: + - tgt: + client.0: + client.3: + + An image blocksize size can also be specified:: + + tasks: + - ceph: + - tgt: + image_size = 20480 + + The general flow of things here is: + 1. Find clients on which tgt is supposed to run (start_tgtd) + 2. Remotely start up tgt daemon + On cleanup: + 3. Stop tgt daemon + + The iscsi administration is handled by the iscsi task. + """ + if config: + config = {key : val for key, val in config.items() + if key.startswith('client')} + # config at this point should only contain keys starting with 'client' + start_tgtd = [] + remotes = ctx.cluster.only(teuthology.is_type('client')).remotes + log.info(remotes) + if not config: + start_tgtd = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + else: + start_tgtd = config + log.info(start_tgtd) + with contextutil.nested( + lambda: start_tgt_remotes(ctx=ctx, start_tgtd=start_tgtd),): + yield diff --git a/qa/tasks/thrashosds.py b/qa/tasks/thrashosds.py new file mode 100644 index 00000000000..c20a6457c69 --- /dev/null +++ b/qa/tasks/thrashosds.py @@ -0,0 +1,160 @@ +""" +Thrash -- Simulate random osd failures. +""" +import contextlib +import logging +import ceph_manager +from teuthology import misc as teuthology + + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + "Thrash" the OSDs by randomly marking them out/down (and then back + in) until the task is ended. This loops, and every op_delay + seconds it randomly chooses to add or remove an OSD (even odds) + unless there are fewer than min_out OSDs out of the cluster, or + more than min_in OSDs in the cluster. + + All commands are run on mon0 and it stops when __exit__ is called. + + The config is optional, and is a dict containing some or all of: + + min_in: (default 3) the minimum number of OSDs to keep in the + cluster + + min_out: (default 0) the minimum number of OSDs to keep out of the + cluster + + op_delay: (5) the length of time to sleep between changing an + OSD's status + + min_dead: (0) minimum number of osds to leave down/dead. + + max_dead: (0) maximum number of osds to leave down/dead before waiting + for clean. This should probably be num_replicas - 1. + + clean_interval: (60) the approximate length of time to loop before + waiting until the cluster goes clean. (In reality this is used + to probabilistically choose when to wait, and the method used + makes it closer to -- but not identical to -- the half-life.) + + scrub_interval: (-1) the approximate length of time to loop before + waiting until a scrub is performed while cleaning. (In reality + this is used to probabilistically choose when to wait, and it + only applies to the cases where cleaning is being performed). + -1 is used to indicate that no scrubbing will be done. + + chance_down: (0.4) the probability that the thrasher will mark an + OSD down rather than marking it out. (The thrasher will not + consider that OSD out of the cluster, since presently an OSD + wrongly marked down will mark itself back up again.) This value + can be either an integer (eg, 75) or a float probability (eg + 0.75). + + chance_test_min_size: (0) chance to run test_pool_min_size, + which: + - kills all but one osd + - waits + - kills that osd + - revives all other osds + - verifies that the osds fully recover + + timeout: (360) the number of seconds to wait for the cluster + to become clean after each cluster change. If this doesn't + happen within the timeout, an exception will be raised. + + revive_timeout: (150) number of seconds to wait for an osd asok to + appear after attempting to revive the osd + + thrash_primary_affinity: (true) randomly adjust primary-affinity + + chance_pgnum_grow: (0) chance to increase a pool's size + chance_pgpnum_fix: (0) chance to adjust pgpnum to pg for a pool + pool_grow_by: (10) amount to increase pgnum by + max_pgs_per_pool_osd: (1200) don't expand pools past this size per osd + + pause_short: (3) duration of short pause + pause_long: (80) duration of long pause + pause_check_after: (50) assert osd down after this long + chance_inject_pause_short: (1) chance of injecting short stall + chance_inject_pause_long: (0) chance of injecting long stall + + clean_wait: (0) duration to wait before resuming thrashing once clean + + powercycle: (false) whether to power cycle the node instead + of just the osd process. Note that this assumes that a single + osd is the only important process on the node. + + chance_test_backfill_full: (0) chance to simulate full disks stopping + backfill + + chance_test_map_discontinuity: (0) chance to test map discontinuity + map_discontinuity_sleep_time: (40) time to wait for map trims + + ceph_objectstore_tool: (true) whether to export/import a pg while an osd is down + chance_move_pg: (1.0) chance of moving a pg if more than 1 osd is down (default 100%) + + example: + + tasks: + - ceph: + - thrashosds: + chance_down: 10 + op_delay: 3 + min_in: 1 + timeout: 600 + - interactive: + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'thrashosds task only accepts a dict for configuration' + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('thrashosds', {})) + + if 'powercycle' in config: + + # sync everyone first to avoid collateral damage to / etc. + log.info('Doing preliminary sync to avoid collateral damage...') + ctx.cluster.run(args=['sync']) + + if 'ipmi_user' in ctx.teuthology_config: + for remote in ctx.cluster.remotes.keys(): + log.debug('checking console status of %s' % remote.shortname) + if not remote.console.check_status(): + log.warn('Failed to get console status for %s', + remote.shortname) + + # check that all osd remotes have a valid console + osds = ctx.cluster.only(teuthology.is_type('osd')) + for remote in osds.remotes.keys(): + if not remote.console.has_ipmi_credentials: + raise Exception( + 'IPMI console required for powercycling, ' + 'but not available on osd role: {r}'.format( + r=remote.name)) + + log.info('Beginning thrashosds...') + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + config=config, + logger=log.getChild('ceph_manager'), + ) + ctx.manager = manager + thrash_proc = ceph_manager.Thrasher( + manager, + config, + logger=log.getChild('thrasher') + ) + try: + yield + finally: + log.info('joining thrashosds') + thrash_proc.do_join() + manager.wait_for_recovery(config.get('timeout', 360)) diff --git a/qa/tasks/userdata_setup.yaml b/qa/tasks/userdata_setup.yaml new file mode 100644 index 00000000000..d39695bef0f --- /dev/null +++ b/qa/tasks/userdata_setup.yaml @@ -0,0 +1,25 @@ +#cloud-config-archive + +- type: text/cloud-config + content: | + output: + all: '| tee -a /var/log/cloud-init-output.log' + +# allow passwordless access for debugging +- | + #!/bin/bash + exec passwd -d ubuntu + +- | + #!/bin/bash + + # mount a NFS share for storing logs + apt-get update + apt-get -y install nfs-common + mkdir /mnt/log + # 10.0.2.2 is the host + mount -v -t nfs -o proto=tcp 10.0.2.2:{mnt_dir} /mnt/log + + # mount the iso image that has the test script + mkdir /mnt/cdrom + mount -t auto /dev/cdrom /mnt/cdrom diff --git a/qa/tasks/userdata_teardown.yaml b/qa/tasks/userdata_teardown.yaml new file mode 100644 index 00000000000..7f3d64ff742 --- /dev/null +++ b/qa/tasks/userdata_teardown.yaml @@ -0,0 +1,11 @@ +- | + #!/bin/bash + cp /var/log/cloud-init-output.log /mnt/log + +- | + #!/bin/bash + umount /mnt/log + +- | + #!/bin/bash + shutdown -h -P now diff --git a/qa/tasks/util/__init__.py b/qa/tasks/util/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/qa/tasks/util/kclient.py b/qa/tasks/util/kclient.py new file mode 100644 index 00000000000..c6a259fc755 --- /dev/null +++ b/qa/tasks/util/kclient.py @@ -0,0 +1,22 @@ +from teuthology.misc import get_testdir +from teuthology.orchestra import run + + +def write_secret_file(ctx, remote, role, keyring, filename): + """ + Stash the kerying in the filename specified. + """ + testdir = get_testdir(ctx) + remote.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph-authtool', + '--name={role}'.format(role=role), + '--print-key', + keyring, + run.Raw('>'), + filename, + ], + ) diff --git a/qa/tasks/util/rados.py b/qa/tasks/util/rados.py new file mode 100644 index 00000000000..f6a806c95db --- /dev/null +++ b/qa/tasks/util/rados.py @@ -0,0 +1,50 @@ +import logging + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def rados(ctx, remote, cmd, wait=True, check_status=False): + testdir = teuthology.get_testdir(ctx) + log.info("rados %s" % ' '.join(cmd)) + pre = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rados', + ]; + pre.extend(cmd) + proc = remote.run( + args=pre, + check_status=check_status, + wait=wait, + ) + if wait: + return proc.exitstatus + else: + return proc + +def create_ec_pool(remote, name, profile_name, pgnum, m=1, k=2): + remote.run(args=[ + 'ceph', 'osd', 'erasure-code-profile', 'set', + profile_name, 'm=' + str(m), 'k=' + str(k), + 'ruleset-failure-domain=osd', + ]) + remote.run(args=[ + 'ceph', 'osd', 'pool', 'create', name, + str(pgnum), str(pgnum), 'erasure', profile_name, + ]) + +def create_replicated_pool(remote, name, pgnum): + remote.run(args=[ + 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), + ]) + +def create_cache_pool(remote, base_name, cache_name, pgnum, size): + remote.run(args=[ + 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum) + ]) + remote.run(args=[ + 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name, + str(size), + ]) diff --git a/qa/tasks/util/rgw.py b/qa/tasks/util/rgw.py new file mode 100644 index 00000000000..e5fba9f82f9 --- /dev/null +++ b/qa/tasks/util/rgw.py @@ -0,0 +1,171 @@ +from cStringIO import StringIO +import logging +import json +import requests +from urlparse import urlparse + +from teuthology.orchestra.connection import split_user +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +# simple test to indicate if multi-region testing should occur +def multi_region_enabled(ctx): + # this is populated by the radosgw-agent task, seems reasonable to + # use that as an indicator that we're testing multi-region sync + return 'radosgw_agent' in ctx + +def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False): + log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd)) + testdir = teuthology.get_testdir(ctx) + pre = [ + 'adjust-ulimits', + 'ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin'.format(tdir=testdir), + '--log-to-stderr', + '--format', 'json', + '-n', client, + ] + pre.extend(cmd) + log.info('rgwadmin: cmd=%s' % pre) + (remote,) = ctx.cluster.only(client).remotes.iterkeys() + proc = remote.run( + args=pre, + check_status=check_status, + stdout=StringIO(), + stderr=StringIO(), + stdin=stdin, + ) + r = proc.exitstatus + out = proc.stdout.getvalue() + j = None + if not r and out != '': + try: + j = json.loads(out) + log.info(' json result: %s' % j) + except ValueError: + j = out + log.info(' raw result: %s' % j) + return (r, j) + +def get_user_summary(out, user): + """Extract the summary for a given user""" + user_summary = None + for summary in out['summary']: + if summary.get('user') == user: + user_summary = summary + + if not user_summary: + raise AssertionError('No summary info found for user: %s' % user) + + return user_summary + +def get_user_successful_ops(out, user): + summary = out['summary'] + if len(summary) == 0: + return 0 + return get_user_summary(out, user)['total']['successful_ops'] + +def get_zone_host_and_port(ctx, client, zone): + _, region_map = rgwadmin(ctx, client, check_status=True, + cmd=['-n', client, 'region-map', 'get']) + regions = region_map['regions'] + for region in regions: + for zone_info in region['val']['zones']: + if zone_info['name'] == zone: + endpoint = urlparse(zone_info['endpoints'][0]) + host, port = endpoint.hostname, endpoint.port + if port is None: + port = 80 + return host, port + assert False, 'no endpoint for zone {zone} found'.format(zone=zone) + +def get_master_zone(ctx, client): + _, region_map = rgwadmin(ctx, client, check_status=True, + cmd=['-n', client, 'region-map', 'get']) + regions = region_map['regions'] + for region in regions: + is_master = (region['val']['is_master'] == "true") + log.info('region={r} is_master={ism}'.format(r=region, ism=is_master)) + if not is_master: + continue + master_zone = region['val']['master_zone'] + log.info('master_zone=%s' % master_zone) + for zone_info in region['val']['zones']: + if zone_info['name'] == master_zone: + return master_zone + log.info('couldn\'t find master zone') + return None + +def get_master_client(ctx, clients): + master_zone = get_master_zone(ctx, clients[0]) # can use any client for this as long as system configured correctly + if not master_zone: + return None + + for client in clients: + zone = zone_for_client(ctx, client) + if zone == master_zone: + return client + + return None + +def get_zone_system_keys(ctx, client, zone): + _, zone_info = rgwadmin(ctx, client, check_status=True, + cmd=['-n', client, + 'zone', 'get', '--rgw-zone', zone]) + system_key = zone_info['system_key'] + return system_key['access_key'], system_key['secret_key'] + +def zone_for_client(ctx, client): + ceph_config = ctx.ceph.conf.get('global', {}) + ceph_config.update(ctx.ceph.conf.get('client', {})) + ceph_config.update(ctx.ceph.conf.get(client, {})) + return ceph_config.get('rgw zone') + +def region_for_client(ctx, client): + ceph_config = ctx.ceph.conf.get('global', {}) + ceph_config.update(ctx.ceph.conf.get('client', {})) + ceph_config.update(ctx.ceph.conf.get(client, {})) + return ceph_config.get('rgw region') + +def radosgw_data_log_window(ctx, client): + ceph_config = ctx.ceph.conf.get('global', {}) + ceph_config.update(ctx.ceph.conf.get('client', {})) + ceph_config.update(ctx.ceph.conf.get(client, {})) + return ceph_config.get('rgw data log window', 30) + +def radosgw_agent_sync_data(ctx, agent_host, agent_port, full=False): + log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port)) + method = "full" if full else "incremental" + return requests.post('http://{addr}:{port}/data/{method}'.format(addr = agent_host, port = agent_port, method = method)) + +def radosgw_agent_sync_metadata(ctx, agent_host, agent_port, full=False): + log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port)) + method = "full" if full else "incremental" + return requests.post('http://{addr}:{port}/metadata/{method}'.format(addr = agent_host, port = agent_port, method = method)) + +def radosgw_agent_sync_all(ctx, full=False, data=False): + if ctx.radosgw_agent.procs: + for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): + zone_for_client(ctx, agent_client) + sync_host, sync_port = get_sync_agent(ctx, agent_client) + log.debug('doing a sync via {host1}'.format(host1=sync_host)) + radosgw_agent_sync_metadata(ctx, sync_host, sync_port, full) + if (data): + radosgw_agent_sync_data(ctx, sync_host, sync_port, full) + +def host_for_role(ctx, role): + for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + if role in roles: + _, host = split_user(target) + return host + +def get_sync_agent(ctx, source): + for task in ctx.config['tasks']: + if 'radosgw-agent' not in task: + continue + for client, conf in task['radosgw-agent'].iteritems(): + if conf['src'] == source: + return host_for_role(ctx, source), conf.get('port', 8000) + return None, None diff --git a/qa/tasks/watch_notify_stress.py b/qa/tasks/watch_notify_stress.py new file mode 100644 index 00000000000..6db313fea6d --- /dev/null +++ b/qa/tasks/watch_notify_stress.py @@ -0,0 +1,69 @@ +""" +test_stress_watch task +""" +import contextlib +import logging +import proc_thrasher + +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run test_stress_watch + + The config should be as follows: + + test_stress_watch: + clients: [client list] + + example: + + tasks: + - ceph: + - test_stress_watch: + clients: [client.0] + - interactive: + """ + log.info('Beginning test_stress_watch...') + assert isinstance(config, dict), \ + "please list clients to run on" + testwatch = {} + + remotes = [] + + for role in config.get('clients', ['client.0']): + assert isinstance(role, basestring) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remotes.append(remote) + + args =['CEPH_CLIENT_ID={id_}'.format(id_=id_), + 'CEPH_ARGS="{flags}"'.format(flags=config.get('flags', '')), + 'daemon-helper', + 'kill', + 'multi_stress_watch foo foo' + ] + + log.info("args are %s" % (args,)) + + proc = proc_thrasher.ProcThrasher({}, remote, + args=[run.Raw(i) for i in args], + logger=log.getChild('testwatch.{id}'.format(id=id_)), + stdin=run.PIPE, + wait=False + ) + proc.start() + testwatch[id_] = proc + + try: + yield + finally: + log.info('joining watch_notify_stress') + for i in testwatch.itervalues(): + i.join() diff --git a/qa/tasks/workunit.py b/qa/tasks/workunit.py new file mode 100644 index 00000000000..548fc5898b8 --- /dev/null +++ b/qa/tasks/workunit.py @@ -0,0 +1,374 @@ +""" +Workunit task -- Run ceph on sets of specific clients +""" +import logging +import pipes +import os + +from teuthology import misc +from teuthology.orchestra.run import CommandFailedError +from teuthology.parallel import parallel +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +CLIENT_PREFIX = 'client.' + + +def task(ctx, config): + """ + Run ceph on all workunits found under the specified path. + + For example:: + + tasks: + - ceph: + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: [direct_io, xattrs.sh] + client.1: [snaps] + branch: foo + + You can also run a list of workunits on all clients: + tasks: + - ceph: + - ceph-fuse: + - workunit: + tag: v0.47 + clients: + all: [direct_io, xattrs.sh, snaps] + + If you have an "all" section it will run all the workunits + on each client simultaneously, AFTER running any workunits specified + for individual clients. (This prevents unintended simultaneous runs.) + + To customize tests, you can specify environment variables as a dict. You + can also specify a time limit for each work unit (defaults to 3h): + + tasks: + - ceph: + - ceph-fuse: + - workunit: + sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6 + clients: + all: [snaps] + env: + FOO: bar + BAZ: quux + timeout: 3h + + :param ctx: Context + :param config: Configuration + """ + assert isinstance(config, dict) + assert isinstance(config.get('clients'), dict), \ + 'configuration must contain a dictionary of clients' + + overrides = ctx.config.get('overrides', {}) + misc.deep_merge(config, overrides.get('workunit', {})) + + refspec = config.get('branch') + if refspec is None: + refspec = config.get('sha1') + if refspec is None: + refspec = config.get('tag') + if refspec is None: + refspec = 'HEAD' + + timeout = config.get('timeout', '3h') + + log.info('Pulling workunits from ref %s', refspec) + + created_mountpoint = {} + + if config.get('env') is not None: + assert isinstance(config['env'], dict), 'env must be a dictionary' + clients = config['clients'] + + # Create scratch dirs for any non-all workunits + log.info('Making a separate scratch dir for every client...') + for role in clients.iterkeys(): + assert isinstance(role, basestring) + if role == "all": + continue + + assert role.startswith(CLIENT_PREFIX) + created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir')) + created_mountpoint[role] = created_mnt_dir + + # Execute any non-all workunits + with parallel() as p: + for role, tests in clients.iteritems(): + if role != "all": + p.spawn(_run_tests, ctx, refspec, role, tests, + config.get('env'), timeout=timeout) + + # Clean up dirs from any non-all workunits + for role, created in created_mountpoint.items(): + _delete_dir(ctx, role, created) + + # Execute any 'all' workunits + if 'all' in clients: + all_tasks = clients["all"] + _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'), + config.get('subdir'), timeout=timeout) + + +def _delete_dir(ctx, role, created_mountpoint): + """ + Delete file used by this role, and delete the directory that this + role appeared in. + + :param ctx: Context + :param role: "role.#" where # is used for the role id. + """ + testdir = misc.get_testdir(ctx) + id_ = role[len(CLIENT_PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + # Is there any reason why this is not: join(mnt, role) ? + client = os.path.join(mnt, 'client.{id}'.format(id=id_)) + + # Remove the directory inside the mount where the workunit ran + remote.run( + args=[ + 'sudo', + 'rm', + '-rf', + '--', + client, + ], + ) + log.info("Deleted dir {dir}".format(dir=client)) + + # If the mount was an artificially created dir, delete that too + if created_mountpoint: + remote.run( + args=[ + 'rmdir', + '--', + mnt, + ], + ) + log.info("Deleted artificial mount point {dir}".format(dir=client)) + + +def _make_scratch_dir(ctx, role, subdir): + """ + Make scratch directories for this role. This also makes the mount + point if that directory does not exist. + + :param ctx: Context + :param role: "role.#" where # is used for the role id. + :param subdir: use this subdir (False if not used) + """ + created_mountpoint = False + id_ = role[len(CLIENT_PREFIX):] + log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role)) + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + dir_owner = remote.user + mnt = os.path.join(misc.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) + # if neither kclient nor ceph-fuse are required for a workunit, + # mnt may not exist. Stat and create the directory if it doesn't. + try: + remote.run( + args=[ + 'stat', + '--', + mnt, + ], + ) + log.info('Did not need to create dir {dir}'.format(dir=mnt)) + except CommandFailedError: + remote.run( + args=[ + 'mkdir', + '--', + mnt, + ], + ) + log.info('Created dir {dir}'.format(dir=mnt)) + created_mountpoint = True + + if not subdir: + subdir = 'client.{id}'.format(id=id_) + + if created_mountpoint: + remote.run( + args=[ + 'cd', + '--', + mnt, + run.Raw('&&'), + 'mkdir', + '--', + subdir, + ], + ) + else: + remote.run( + args=[ + # cd first so this will fail if the mount point does + # not exist; pure install -d will silently do the + # wrong thing + 'cd', + '--', + mnt, + run.Raw('&&'), + 'sudo', + 'install', + '-d', + '-m', '0755', + '--owner={user}'.format(user=dir_owner), + '--', + subdir, + ], + ) + + return created_mountpoint + + +def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None): + """ + Make a scratch directory for each client in the cluster, and then for each + test spawn _run_tests() for each role. + + See run_tests() for parameter documentation. + """ + client_generator = misc.all_roles_of_type(ctx.cluster, 'client') + client_remotes = list() + + created_mountpoint = {} + for client in client_generator: + (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys() + client_remotes.append((client_remote, 'client.{id}'.format(id=client))) + created_mountpoint[client] = _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir) + + for unit in tests: + with parallel() as p: + for remote, role in client_remotes: + p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir, + timeout=timeout) + + # cleanup the generated client directories + client_generator = misc.all_roles_of_type(ctx.cluster, 'client') + for client in client_generator: + _delete_dir(ctx, 'client.{id}'.format(id=client), created_mountpoint[client]) + + +def _run_tests(ctx, refspec, role, tests, env, subdir=None, timeout=None): + """ + Run the individual test. Create a scratch directory and then extract the + workunits from git. Make the executables, and then run the tests. + Clean up (remove files created) after the tests are finished. + + :param ctx: Context + :param refspec: branch, sha1, or version tag used to identify this + build + :param tests: specific tests specified. + :param env: environment set in yaml file. Could be None. + :param subdir: subdirectory set in yaml file. Could be None + :param timeout: If present, use the 'timeout' command on the remote host + to limit execution time. Must be specified by a number + followed by 's' for seconds, 'm' for minutes, 'h' for + hours, or 'd' for days. If '0' or anything that evaluates + to False is passed, the 'timeout' command is not used. + """ + testdir = misc.get_testdir(ctx) + assert isinstance(role, basestring) + assert role.startswith(CLIENT_PREFIX) + id_ = role[len(CLIENT_PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + # subdir so we can remove and recreate this a lot without sudo + if subdir is None: + scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp') + else: + scratch_tmp = os.path.join(mnt, subdir) + srcdir = '{tdir}/workunit.{role}'.format(tdir=testdir, role=role) + + remote.run( + logger=log.getChild(role), + args=[ + 'mkdir', '--', srcdir, + run.Raw('&&'), + 'git', + 'archive', + '--remote=git://git.ceph.com/ceph.git', + '%s:qa/workunits' % refspec, + run.Raw('|'), + 'tar', + '-C', srcdir, + '-x', + '-f-', + run.Raw('&&'), + 'cd', '--', srcdir, + run.Raw('&&'), + 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi', + run.Raw('&&'), + 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir), + run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)), + ], + ) + + workunits = sorted(misc.get_file( + remote, + '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)).split('\0')) + assert workunits + + try: + assert isinstance(tests, list) + for spec in tests: + log.info('Running workunits matching %s on %s...', spec, role) + prefix = '{spec}/'.format(spec=spec) + to_run = [w for w in workunits if w == spec or w.startswith(prefix)] + if not to_run: + raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec)) + for workunit in to_run: + log.info('Running workunit %s...', workunit) + args = [ + 'mkdir', '-p', '--', scratch_tmp, + run.Raw('&&'), + 'cd', '--', scratch_tmp, + run.Raw('&&'), + run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'), + run.Raw('CEPH_REF={ref}'.format(ref=refspec)), + run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)), + run.Raw('CEPH_ID="{id}"'.format(id=id_)), + run.Raw('PATH=$PATH:/usr/sbin') + ] + if env is not None: + for var, val in env.iteritems(): + quoted_val = pipes.quote(val) + env_arg = '{var}={val}'.format(var=var, val=quoted_val) + args.append(run.Raw(env_arg)) + args.extend([ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir)]) + if timeout and timeout != '0': + args.extend(['timeout', timeout]) + args.extend([ + '{srcdir}/{workunit}'.format( + srcdir=srcdir, + workunit=workunit, + ), + ]) + remote.run( + logger=log.getChild(role), + args=args, + label="workunit test {workunit}".format(workunit=workunit) + ) + remote.run( + logger=log.getChild(role), + args=['sudo', 'rm', '-rf', '--', scratch_tmp], + ) + finally: + log.info('Stopping %s on %s...', tests, role) + remote.run( + logger=log.getChild(role), + args=[ + 'rm', '-rf', '--', '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role), srcdir, + ], + ) diff --git a/qa/tox.ini b/qa/tox.ini new file mode 100644 index 00000000000..14399d0a302 --- /dev/null +++ b/qa/tox.ini @@ -0,0 +1,8 @@ +[tox] +envlist = flake8 +skipsdist = True + +[testenv:flake8] +deps= + flake8 +commands=flake8 --select=F ceph-qa-suite diff --git a/rgw_pool_type/erasure-coded.yaml b/rgw_pool_type/erasure-coded.yaml deleted file mode 100644 index 7c99b7f85c8..00000000000 --- a/rgw_pool_type/erasure-coded.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - rgw: - ec-data-pool: true - s3tests: - slow_backend: true diff --git a/rgw_pool_type/replicated.yaml b/rgw_pool_type/replicated.yaml deleted file mode 100644 index c91709eaae7..00000000000 --- a/rgw_pool_type/replicated.yaml +++ /dev/null @@ -1,3 +0,0 @@ -overrides: - rgw: - ec-data-pool: false diff --git a/suites/% b/suites/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/big/rados-thrash/% b/suites/big/rados-thrash/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/big/rados-thrash/ceph/ceph.yaml b/suites/big/rados-thrash/ceph/ceph.yaml deleted file mode 100644 index 2030acb9083..00000000000 --- a/suites/big/rados-thrash/ceph/ceph.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install: -- ceph: diff --git a/suites/big/rados-thrash/clusters/big.yaml b/suites/big/rados-thrash/clusters/big.yaml deleted file mode 100644 index 18197ad8571..00000000000 --- a/suites/big/rados-thrash/clusters/big.yaml +++ /dev/null @@ -1,68 +0,0 @@ -roles: -- [osd.0, osd.1, osd.2, client.0, mon.a] -- [osd.3, osd.4, osd.5, client.1, mon.b] -- [osd.6, osd.7, osd.8, client.2, mon.c] -- [osd.9, osd.10, osd.11, client.3, mon.d] -- [osd.12, osd.13, osd.14, client.4, mon.e] -- [osd.15, osd.16, osd.17, client.5] -- [osd.18, osd.19, osd.20, client.6] -- [osd.21, osd.22, osd.23, client.7] -- [osd.24, osd.25, osd.26, client.8] -- [osd.27, osd.28, osd.29, client.9] -- [osd.30, osd.31, osd.32, client.10] -- [osd.33, osd.34, osd.35, client.11] -- [osd.36, osd.37, osd.38, client.12] -- [osd.39, osd.40, osd.41, client.13] -- [osd.42, osd.43, osd.44, client.14] -- [osd.45, osd.46, osd.47, client.15] -- [osd.48, osd.49, osd.50, client.16] -- [osd.51, osd.52, osd.53, client.17] -- [osd.54, osd.55, osd.56, client.18] -- [osd.57, osd.58, osd.59, client.19] -- [osd.60, osd.61, osd.62, client.20] -- [osd.63, osd.64, osd.65, client.21] -- [osd.66, osd.67, osd.68, client.22] -- [osd.69, osd.70, osd.71, client.23] -- [osd.72, osd.73, osd.74, client.24] -- [osd.75, osd.76, osd.77, client.25] -- [osd.78, osd.79, osd.80, client.26] -- [osd.81, osd.82, osd.83, client.27] -- [osd.84, osd.85, osd.86, client.28] -- [osd.87, osd.88, osd.89, client.29] -- [osd.90, osd.91, osd.92, client.30] -- [osd.93, osd.94, osd.95, client.31] -- [osd.96, osd.97, osd.98, client.32] -- [osd.99, osd.100, osd.101, client.33] -- [osd.102, osd.103, osd.104, client.34] -- [osd.105, osd.106, osd.107, client.35] -- [osd.108, osd.109, osd.110, client.36] -- [osd.111, osd.112, osd.113, client.37] -- [osd.114, osd.115, osd.116, client.38] -- [osd.117, osd.118, osd.119, client.39] -- [osd.120, osd.121, osd.122, client.40] -- [osd.123, osd.124, osd.125, client.41] -- [osd.126, osd.127, osd.128, client.42] -- [osd.129, osd.130, osd.131, client.43] -- [osd.132, osd.133, osd.134, client.44] -- [osd.135, osd.136, osd.137, client.45] -- [osd.138, osd.139, osd.140, client.46] -- [osd.141, osd.142, osd.143, client.47] -- [osd.144, osd.145, osd.146, client.48] -- [osd.147, osd.148, osd.149, client.49] -- [osd.150, osd.151, osd.152, client.50] -#- [osd.153, osd.154, osd.155, client.51] -#- [osd.156, osd.157, osd.158, client.52] -#- [osd.159, osd.160, osd.161, client.53] -#- [osd.162, osd.163, osd.164, client.54] -#- [osd.165, osd.166, osd.167, client.55] -#- [osd.168, osd.169, osd.170, client.56] -#- [osd.171, osd.172, osd.173, client.57] -#- [osd.174, osd.175, osd.176, client.58] -#- [osd.177, osd.178, osd.179, client.59] -#- [osd.180, osd.181, osd.182, client.60] -#- [osd.183, osd.184, osd.185, client.61] -#- [osd.186, osd.187, osd.188, client.62] -#- [osd.189, osd.190, osd.191, client.63] -#- [osd.192, osd.193, osd.194, client.64] -#- [osd.195, osd.196, osd.197, client.65] -#- [osd.198, osd.199, osd.200, client.66] diff --git a/suites/big/rados-thrash/clusters/medium.yaml b/suites/big/rados-thrash/clusters/medium.yaml deleted file mode 100644 index 48b66dd5ca3..00000000000 --- a/suites/big/rados-thrash/clusters/medium.yaml +++ /dev/null @@ -1,22 +0,0 @@ -roles: -- [osd.0, osd.1, osd.2, client.0, mon.a] -- [osd.3, osd.4, osd.5, client.1, mon.b] -- [osd.6, osd.7, osd.8, client.2, mon.c] -- [osd.9, osd.10, osd.11, client.3, mon.d] -- [osd.12, osd.13, osd.14, client.4, mon.e] -- [osd.15, osd.16, osd.17, client.5] -- [osd.18, osd.19, osd.20, client.6] -- [osd.21, osd.22, osd.23, client.7] -- [osd.24, osd.25, osd.26, client.8] -- [osd.27, osd.28, osd.29, client.9] -- [osd.30, osd.31, osd.32, client.10] -- [osd.33, osd.34, osd.35, client.11] -- [osd.36, osd.37, osd.38, client.12] -- [osd.39, osd.40, osd.41, client.13] -- [osd.42, osd.43, osd.44, client.14] -- [osd.45, osd.46, osd.47, client.15] -- [osd.48, osd.49, osd.50, client.16] -- [osd.51, osd.52, osd.53, client.17] -- [osd.54, osd.55, osd.56, client.18] -- [osd.57, osd.58, osd.59, client.19] -- [osd.60, osd.61, osd.62, client.20] diff --git a/suites/big/rados-thrash/clusters/small.yaml b/suites/big/rados-thrash/clusters/small.yaml deleted file mode 100644 index b5a79906c69..00000000000 --- a/suites/big/rados-thrash/clusters/small.yaml +++ /dev/null @@ -1,6 +0,0 @@ -roles: -- [osd.0, osd.1, osd.2, client.0, mon.a] -- [osd.3, osd.4, osd.5, client.1, mon.b] -- [osd.6, osd.7, osd.8, client.2, mon.c] -- [osd.9, osd.10, osd.11, client.3, mon.d] -- [osd.12, osd.13, osd.14, client.4, mon.e] diff --git a/suites/big/rados-thrash/fs/btrfs.yaml b/suites/big/rados-thrash/fs/btrfs.yaml deleted file mode 100644 index 0b3f6fac7a5..00000000000 --- a/suites/big/rados-thrash/fs/btrfs.yaml +++ /dev/null @@ -1,7 +0,0 @@ -overrides: - ceph: - fs: btrfs - conf: - osd: - osd sloppy crc: true - osd op thread timeout: 60 diff --git a/suites/big/rados-thrash/fs/xfs.yaml b/suites/big/rados-thrash/fs/xfs.yaml deleted file mode 100644 index b4a82911a2f..00000000000 --- a/suites/big/rados-thrash/fs/xfs.yaml +++ /dev/null @@ -1,6 +0,0 @@ -overrides: - ceph: - fs: xfs - conf: - osd: - osd sloppy crc: true diff --git a/suites/big/rados-thrash/thrashers/default.yaml b/suites/big/rados-thrash/thrashers/default.yaml deleted file mode 100644 index d67ff20a693..00000000000 --- a/suites/big/rados-thrash/thrashers/default.yaml +++ /dev/null @@ -1,10 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 diff --git a/suites/big/rados-thrash/workloads/snaps-few-objects.yaml b/suites/big/rados-thrash/workloads/snaps-few-objects.yaml deleted file mode 100644 index b73bb6781dc..00000000000 --- a/suites/big/rados-thrash/workloads/snaps-few-objects.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- rados: - ops: 4000 - max_seconds: 3600 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - copy_from: 50 diff --git a/suites/ceph-deploy/fs/% b/suites/ceph-deploy/fs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/ceph-deploy/fs/distros b/suites/ceph-deploy/fs/distros deleted file mode 120000 index c5d59352cb5..00000000000 --- a/suites/ceph-deploy/fs/distros +++ /dev/null @@ -1 +0,0 @@ -../../../distros/supported \ No newline at end of file diff --git a/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_blogbench.yaml b/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_blogbench.yaml deleted file mode 100644 index ee35e1a56c8..00000000000 --- a/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_blogbench.yaml +++ /dev/null @@ -1,35 +0,0 @@ -overrides: - ceph-deploy: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.3 - - osd.4 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- ceph-fuse: -- workunit: - clients: - client.0: - - suites/blogbench.sh -exclude_arch: armv7l diff --git a/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_dbench.yaml b/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_dbench.yaml deleted file mode 100644 index 58f7a5456ae..00000000000 --- a/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_dbench.yaml +++ /dev/null @@ -1,35 +0,0 @@ -overrides: - ceph-deploy: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.4 - - osd.3 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- ceph-fuse: -- workunit: - clients: - client.0: - - suites/dbench.sh -exclude_arch: armv7l diff --git a/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_fsstress.yaml b/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_fsstress.yaml deleted file mode 100644 index b912ffedf7b..00000000000 --- a/suites/ceph-deploy/fs/tasks/cfuse_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,35 +0,0 @@ -overrides: - ceph-deploy: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.3 - - osd.4 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- ceph-fuse: -- workunit: - clients: - client.0: - - suites/fsstress.sh -exclude_arch: armv7l diff --git a/suites/ceph-deploy/rados/% b/suites/ceph-deploy/rados/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/ceph-deploy/rados/distros b/suites/ceph-deploy/rados/distros deleted file mode 120000 index c5d59352cb5..00000000000 --- a/suites/ceph-deploy/rados/distros +++ /dev/null @@ -1 +0,0 @@ -../../../distros/supported \ No newline at end of file diff --git a/suites/ceph-deploy/rados/tasks/rados_api_tests.yaml b/suites/ceph-deploy/rados/tasks/rados_api_tests.yaml deleted file mode 100644 index 32baa730f73..00000000000 --- a/suites/ceph-deploy/rados/tasks/rados_api_tests.yaml +++ /dev/null @@ -1,33 +0,0 @@ -overrides: - ceph-deploy: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.3 - - osd.4 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- workunit: - clients: - client.0: - - rados/test.sh diff --git a/suites/ceph-deploy/rados/tasks/rados_python.yaml b/suites/ceph-deploy/rados/tasks/rados_python.yaml deleted file mode 100644 index 634ece27633..00000000000 --- a/suites/ceph-deploy/rados/tasks/rados_python.yaml +++ /dev/null @@ -1,34 +0,0 @@ -overrides: - ceph-deploy: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.3 - - osd.4 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- workunit: - clients: - client.0: - - rados/test_python.sh - diff --git a/suites/ceph-deploy/rados/tasks/rados_workunit_loadgen_big.yaml b/suites/ceph-deploy/rados/tasks/rados_workunit_loadgen_big.yaml deleted file mode 100644 index 9f3140393b8..00000000000 --- a/suites/ceph-deploy/rados/tasks/rados_workunit_loadgen_big.yaml +++ /dev/null @@ -1,34 +0,0 @@ -overrides: - ceph-deploy: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.3 - - osd.4 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- workunit: - clients: - all: - - rados/load-gen-big.sh - diff --git a/suites/ceph-deploy/rbd/% b/suites/ceph-deploy/rbd/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/ceph-deploy/rbd/distros b/suites/ceph-deploy/rbd/distros deleted file mode 120000 index c5d59352cb5..00000000000 --- a/suites/ceph-deploy/rbd/distros +++ /dev/null @@ -1 +0,0 @@ -../../../distros/supported \ No newline at end of file diff --git a/suites/ceph-deploy/rbd/tasks/rbd_api_tests_old_format.yaml b/suites/ceph-deploy/rbd/tasks/rbd_api_tests_old_format.yaml deleted file mode 100644 index 1333358a8ef..00000000000 --- a/suites/ceph-deploy/rbd/tasks/rbd_api_tests_old_format.yaml +++ /dev/null @@ -1,33 +0,0 @@ -overrides: - ceph-deploy: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.3 - - osd.4 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- workunit: - clients: - client.0: - - rbd/test_librbd.sh diff --git a/suites/ceph-deploy/rbd/tasks/rbd_cli_tests.yaml b/suites/ceph-deploy/rbd/tasks/rbd_cli_tests.yaml deleted file mode 100644 index 27eb5299123..00000000000 --- a/suites/ceph-deploy/rbd/tasks/rbd_cli_tests.yaml +++ /dev/null @@ -1,33 +0,0 @@ -overrides: - ceph-deploy: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.3 - - osd.4 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- workunit: - clients: - client.0: - - rbd/run_cli_tests.sh diff --git a/suites/ceph-deploy/rbd/tasks/rbd_cls_test.yaml b/suites/ceph-deploy/rbd/tasks/rbd_cls_test.yaml deleted file mode 100644 index 1abca13698c..00000000000 --- a/suites/ceph-deploy/rbd/tasks/rbd_cls_test.yaml +++ /dev/null @@ -1,33 +0,0 @@ -overrides: - ceph-deploy: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.3 - - osd.4 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- workunit: - clients: - client.0: - - cls/test_cls_rbd.sh diff --git a/suites/ceph-deploy/rbd/tasks/rbd_python_api_tests.yaml b/suites/ceph-deploy/rbd/tasks/rbd_python_api_tests.yaml deleted file mode 100644 index 9c663f53864..00000000000 --- a/suites/ceph-deploy/rbd/tasks/rbd_python_api_tests.yaml +++ /dev/null @@ -1,33 +0,0 @@ -overrides: - ceph-deploy: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.3 - - osd.4 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- workunit: - clients: - client.0: - - rbd/test_librbd_python.sh diff --git a/suites/ceph-deploy/singleton/% b/suites/ceph-deploy/singleton/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/ceph-deploy/singleton/all/basic-test.yaml b/suites/ceph-deploy/singleton/all/basic-test.yaml deleted file mode 100644 index 14711d05856..00000000000 --- a/suites/ceph-deploy/singleton/all/basic-test.yaml +++ /dev/null @@ -1,35 +0,0 @@ -overrides: - ceph: - conf: - global: - debug ms: 1 - osd: - debug osd: 10 - mon: - debug mon: 10 -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - osd.3 - - osd.4 - - osd.5 -- - mon.c - - osd.6 - - osd.7 - - osd.8 -- - client.0 - -tasks: -- install: - extras: yes -- ssh_keys: -- ceph-deploy: -- workunit: - clients: - client.0: - - suites/blogbench.sh - diff --git a/suites/ceph-deploy/singleton/distros b/suites/ceph-deploy/singleton/distros deleted file mode 120000 index c5d59352cb5..00000000000 --- a/suites/ceph-deploy/singleton/distros +++ /dev/null @@ -1 +0,0 @@ -../../../distros/supported \ No newline at end of file diff --git a/suites/clusters/samba-basic.yaml b/suites/clusters/samba-basic.yaml deleted file mode 100644 index caced4a26d1..00000000000 --- a/suites/clusters/samba-basic.yaml +++ /dev/null @@ -1,3 +0,0 @@ -roles: -- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1] -- [samba.0, client.0, client.1] diff --git a/suites/debug/mds_client.yaml b/suites/debug/mds_client.yaml deleted file mode 120000 index 2550b024ded..00000000000 --- a/suites/debug/mds_client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../debug/mds_client.yaml \ No newline at end of file diff --git a/suites/dummy/% b/suites/dummy/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/dummy/all/nop.yaml b/suites/dummy/all/nop.yaml deleted file mode 100644 index e027e553395..00000000000 --- a/suites/dummy/all/nop.yaml +++ /dev/null @@ -1,6 +0,0 @@ -roles: - - [mon.a, mds.a, osd.0, osd.1, client.0] - -tasks: - - nop: - diff --git a/suites/experimental/multimds/% b/suites/experimental/multimds/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/experimental/multimds/clusters/7-multimds.yaml b/suites/experimental/multimds/clusters/7-multimds.yaml deleted file mode 100644 index 17cfd7b3d79..00000000000 --- a/suites/experimental/multimds/clusters/7-multimds.yaml +++ /dev/null @@ -1,8 +0,0 @@ -roles: -- [mon.a, mds.a, mds.a-s] -- [mon.b, mds.b, mds.b-s] -- [mon.c, mds.c, mds.c-s] -- [osd.0] -- [osd.1] -- [osd.2] -- [client.0] diff --git a/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml b/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml deleted file mode 100644 index bee01a83586..00000000000 --- a/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml +++ /dev/null @@ -1,15 +0,0 @@ -tasks: -- install: -- ceph: - conf: - mds: - mds thrash exports: 1 - mds debug subtrees: 1 - mds debug scatterstat: 1 - mds verify scatter: 1 -- ceph-fuse: -- workunit: - clients: - client.0: - - suites/fsstress.sh - diff --git a/suites/fs/basic/% b/suites/fs/basic/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/fs/basic/clusters/fixed-3.yaml b/suites/fs/basic/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/fs/basic/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/fs/basic/debug/mds_client.yaml b/suites/fs/basic/debug/mds_client.yaml deleted file mode 120000 index 335c1cafed7..00000000000 --- a/suites/fs/basic/debug/mds_client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/suites/fs/basic/fs/btrfs.yaml b/suites/fs/basic/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/fs/basic/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/fs/basic/inline/no.yaml b/suites/fs/basic/inline/no.yaml deleted file mode 100644 index 2030acb9083..00000000000 --- a/suites/fs/basic/inline/no.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install: -- ceph: diff --git a/suites/fs/basic/inline/yes.yaml b/suites/fs/basic/inline/yes.yaml deleted file mode 100644 index 72a285c590f..00000000000 --- a/suites/fs/basic/inline/yes.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: -- ceph: -- exec: - client.0: - - ceph mds set inline_data true --yes-i-really-mean-it diff --git a/suites/fs/basic/overrides/whitelist_wrongly_marked_down.yaml b/suites/fs/basic/overrides/whitelist_wrongly_marked_down.yaml deleted file mode 120000 index 08f746bf894..00000000000 --- a/suites/fs/basic/overrides/whitelist_wrongly_marked_down.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/suites/fs/basic/tasks/cfuse_workunit_kernel_untar_build.yaml b/suites/fs/basic/tasks/cfuse_workunit_kernel_untar_build.yaml deleted file mode 100644 index 3e99204debb..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_kernel_untar_build.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - kernel_untar_build.sh diff --git a/suites/fs/basic/tasks/cfuse_workunit_misc.yaml b/suites/fs/basic/tasks/cfuse_workunit_misc.yaml deleted file mode 100644 index 683d3f592c2..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_misc.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - timeout: 6h - clients: - all: - - fs/misc diff --git a/suites/fs/basic/tasks/cfuse_workunit_misc_test_o_trunc.yaml b/suites/fs/basic/tasks/cfuse_workunit_misc_test_o_trunc.yaml deleted file mode 100644 index c9720a2fd48..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_misc_test_o_trunc.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - fs/test_o_trunc.sh diff --git a/suites/fs/basic/tasks/cfuse_workunit_suites_blogbench.yaml b/suites/fs/basic/tasks/cfuse_workunit_suites_blogbench.yaml deleted file mode 100644 index 09898e16bda..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_suites_blogbench.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/fs/basic/tasks/cfuse_workunit_suites_dbench.yaml b/suites/fs/basic/tasks/cfuse_workunit_suites_dbench.yaml deleted file mode 100644 index ad96b4c5e7f..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_suites_dbench.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/dbench.sh diff --git a/suites/fs/basic/tasks/cfuse_workunit_suites_ffsb.yaml b/suites/fs/basic/tasks/cfuse_workunit_suites_ffsb.yaml deleted file mode 100644 index 86008160034..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_suites_ffsb.yaml +++ /dev/null @@ -1,11 +0,0 @@ -overrides: - ceph: - conf: - osd: - filestore flush min: 0 -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/ffsb.sh diff --git a/suites/fs/basic/tasks/cfuse_workunit_suites_fsstress.yaml b/suites/fs/basic/tasks/cfuse_workunit_suites_fsstress.yaml deleted file mode 100644 index 5908d951b2d..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/fs/basic/tasks/cfuse_workunit_suites_fsx.yaml b/suites/fs/basic/tasks/cfuse_workunit_suites_fsx.yaml deleted file mode 100644 index 3c11ed74fc7..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_suites_fsx.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsx.sh diff --git a/suites/fs/basic/tasks/cfuse_workunit_suites_fsync.yaml b/suites/fs/basic/tasks/cfuse_workunit_suites_fsync.yaml deleted file mode 100644 index c6043e209bd..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_suites_fsync.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsync-tester.sh diff --git a/suites/fs/basic/tasks/cfuse_workunit_suites_iogen.yaml b/suites/fs/basic/tasks/cfuse_workunit_suites_iogen.yaml deleted file mode 100644 index 6989990e22a..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_suites_iogen.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/iogen.sh - diff --git a/suites/fs/basic/tasks/cfuse_workunit_suites_iozone.yaml b/suites/fs/basic/tasks/cfuse_workunit_suites_iozone.yaml deleted file mode 100644 index 1e23f670e28..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_suites_iozone.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: [client.0] -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/fs/basic/tasks/cfuse_workunit_suites_pjd.yaml b/suites/fs/basic/tasks/cfuse_workunit_suites_pjd.yaml deleted file mode 100644 index 65bcd0d0333..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_suites_pjd.yaml +++ /dev/null @@ -1,15 +0,0 @@ -overrides: - ceph: - conf: - client: - debug ms: 1 - debug client: 20 - mds: - debug ms: 1 - debug mds: 20 -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/pjd.sh diff --git a/suites/fs/basic/tasks/cfuse_workunit_suites_truncate_delay.yaml b/suites/fs/basic/tasks/cfuse_workunit_suites_truncate_delay.yaml deleted file mode 100644 index 911026e13bb..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_suites_truncate_delay.yaml +++ /dev/null @@ -1,15 +0,0 @@ -overrides: - ceph: - conf: - client: - ms_inject_delay_probability: 1 - ms_inject_delay_type: osd - ms_inject_delay_max: 5 - client_oc_max_dirty_age: 1 -tasks: -- ceph-fuse: -- exec: - client.0: - - cd $TESTDIR/mnt.* && dd if=/dev/zero of=./foo count=100 - - sleep 2 - - cd $TESTDIR/mnt.* && truncate --size 0 ./foo diff --git a/suites/fs/basic/tasks/cfuse_workunit_trivial_sync.yaml b/suites/fs/basic/tasks/cfuse_workunit_trivial_sync.yaml deleted file mode 100644 index 9509650c76c..00000000000 --- a/suites/fs/basic/tasks/cfuse_workunit_trivial_sync.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: [fs/misc/trivial_sync.sh] diff --git a/suites/fs/basic/tasks/libcephfs_interface_tests.yaml b/suites/fs/basic/tasks/libcephfs_interface_tests.yaml deleted file mode 100644 index 0b1d41fea5c..00000000000 --- a/suites/fs/basic/tasks/libcephfs_interface_tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - client.0: - - libcephfs/test.sh diff --git a/suites/fs/basic/tasks/libcephfs_java.yaml b/suites/fs/basic/tasks/libcephfs_java.yaml deleted file mode 100644 index 4330d50965e..00000000000 --- a/suites/fs/basic/tasks/libcephfs_java.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - client.0: - - libcephfs-java/test.sh diff --git a/suites/fs/basic/tasks/mds_creation_retry.yaml b/suites/fs/basic/tasks/mds_creation_retry.yaml deleted file mode 100644 index 76ceeafa8e7..00000000000 --- a/suites/fs/basic/tasks/mds_creation_retry.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: --mds_creation_failure: --ceph-fuse: -- workunit: - clients: - all: [fs/misc/trivial_sync.sh] - diff --git a/suites/fs/multiclient/% b/suites/fs/multiclient/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/fs/multiclient/clusters/three_clients.yaml b/suites/fs/multiclient/clusters/three_clients.yaml deleted file mode 100644 index fd2535fd4a0..00000000000 --- a/suites/fs/multiclient/clusters/three_clients.yaml +++ /dev/null @@ -1,5 +0,0 @@ -roles: -- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2] -- [client.2] -- [client.1] -- [client.0] diff --git a/suites/fs/multiclient/clusters/two_clients.yaml b/suites/fs/multiclient/clusters/two_clients.yaml deleted file mode 100644 index 2258befd8bf..00000000000 --- a/suites/fs/multiclient/clusters/two_clients.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2] -- [client.1] -- [client.0] diff --git a/suites/fs/multiclient/debug/mds_client.yaml b/suites/fs/multiclient/debug/mds_client.yaml deleted file mode 120000 index 335c1cafed7..00000000000 --- a/suites/fs/multiclient/debug/mds_client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/suites/fs/multiclient/fs/btrfs.yaml b/suites/fs/multiclient/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/fs/multiclient/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/fs/multiclient/mount/ceph-fuse.yaml b/suites/fs/multiclient/mount/ceph-fuse.yaml deleted file mode 100644 index 37ac5b69e61..00000000000 --- a/suites/fs/multiclient/mount/ceph-fuse.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: diff --git a/suites/fs/multiclient/mount/kclient.yaml.disabled b/suites/fs/multiclient/mount/kclient.yaml.disabled deleted file mode 100644 index 04adb48b63f..00000000000 --- a/suites/fs/multiclient/mount/kclient.yaml.disabled +++ /dev/null @@ -1,9 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false -tasks: -- install: -- ceph: -- kclient: diff --git a/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled b/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled deleted file mode 100644 index e486c44c51e..00000000000 --- a/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled +++ /dev/null @@ -1,20 +0,0 @@ -# make sure we get the same MPI version on all hosts -os_type: ubuntu -os_version: "14.04" - -tasks: -- pexec: - clients: - - cd $TESTDIR - - wget http://ceph.com/qa/fsx-mpi.c - - mpicc fsx-mpi.c -o fsx-mpi - - rm fsx-mpi.c - - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt -- ssh_keys: -- mpi: - exec: $TESTDIR/fsx-mpi 1MB -N 50000 -p 10000 -l 1048576 - workdir: $TESTDIR/gmnt -- pexec: - all: - - rm $TESTDIR/gmnt - - rm $TESTDIR/fsx-mpi diff --git a/suites/fs/multiclient/tasks/ior-shared-file.yaml b/suites/fs/multiclient/tasks/ior-shared-file.yaml deleted file mode 100644 index dcf24247a92..00000000000 --- a/suites/fs/multiclient/tasks/ior-shared-file.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# make sure we get the same MPI version on all hosts -os_type: ubuntu -os_version: "14.04" - -tasks: -- pexec: - clients: - - cd $TESTDIR - - wget http://ceph.com/qa/ior.tbz2 - - tar xvfj ior.tbz2 - - cd ior - - ./configure - - make - - make install DESTDIR=$TESTDIR/binary/ - - cd $TESTDIR/ - - rm ior.tbz2 - - rm -r ior - - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt -- ssh_keys: -- mpi: - exec: $TESTDIR/binary/usr/local/bin/ior -e -w -r -W -b 10m -a POSIX -o $TESTDIR/gmnt/ior.testfile -- pexec: - all: - - rm -f $TESTDIR/gmnt/ior.testfile - - rm -f $TESTDIR/gmnt - - rm -rf $TESTDIR/binary diff --git a/suites/fs/multiclient/tasks/mdtest.yaml b/suites/fs/multiclient/tasks/mdtest.yaml deleted file mode 100644 index 1dd95d954fb..00000000000 --- a/suites/fs/multiclient/tasks/mdtest.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# make sure we get the same MPI version on all hosts -os_type: ubuntu -os_version: "14.04" - -tasks: -- pexec: - clients: - - cd $TESTDIR - - wget http://ceph.com/qa/mdtest-1.9.3.tgz - - mkdir mdtest-1.9.3 - - cd mdtest-1.9.3 - - tar xvfz $TESTDIR/mdtest-1.9.3.tgz - - rm $TESTDIR/mdtest-1.9.3.tgz - - MPI_CC=mpicc make - - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt -- ssh_keys: -- mpi: - exec: $TESTDIR/mdtest-1.9.3/mdtest -d $TESTDIR/gmnt -I 20 -z 5 -b 2 -R -- pexec: - all: - - rm -f $TESTDIR/gmnt - - rm -rf $TESTDIR/mdtest-1.9.3 - - rm -rf $TESTDIR/._mdtest-1.9.3 \ No newline at end of file diff --git a/suites/fs/snaps/% b/suites/fs/snaps/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/fs/snaps/clusters/fixed-3.yaml b/suites/fs/snaps/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/fs/snaps/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/fs/snaps/fs/btrfs.yaml b/suites/fs/snaps/fs/btrfs.yaml deleted file mode 100644 index 4c7af311538..00000000000 --- a/suites/fs/snaps/fs/btrfs.yaml +++ /dev/null @@ -1,6 +0,0 @@ -overrides: - ceph: - fs: btrfs - conf: - osd: - osd op thread timeout: 60 diff --git a/suites/fs/snaps/mount/ceph-fuse.yaml b/suites/fs/snaps/mount/ceph-fuse.yaml deleted file mode 100644 index 37ac5b69e61..00000000000 --- a/suites/fs/snaps/mount/ceph-fuse.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: diff --git a/suites/fs/snaps/tasks/snaptests.yaml b/suites/fs/snaps/tasks/snaptests.yaml deleted file mode 100644 index 7f7b0f21569..00000000000 --- a/suites/fs/snaps/tasks/snaptests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - clients: - all: - - snaps/snaptest-0.sh - - snaps/snaptest-1.sh - - snaps/snaptest-2.sh diff --git a/suites/fs/thrash/% b/suites/fs/thrash/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/fs/thrash/ceph-thrash/default.yaml b/suites/fs/thrash/ceph-thrash/default.yaml deleted file mode 100644 index aefdf826ce7..00000000000 --- a/suites/fs/thrash/ceph-thrash/default.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- mds_thrash: diff --git a/suites/fs/thrash/ceph/base.yaml b/suites/fs/thrash/ceph/base.yaml deleted file mode 100644 index 2030acb9083..00000000000 --- a/suites/fs/thrash/ceph/base.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install: -- ceph: diff --git a/suites/fs/thrash/clusters/mds-1active-1standby.yaml b/suites/fs/thrash/clusters/mds-1active-1standby.yaml deleted file mode 100644 index 7e951b95889..00000000000 --- a/suites/fs/thrash/clusters/mds-1active-1standby.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.c, osd.0, osd.1, osd.2] -- [mon.b, mds.a, osd.3, osd.4, osd.5] -- [client.0, mds.b-s-a] diff --git a/suites/fs/thrash/debug/mds_client.yaml b/suites/fs/thrash/debug/mds_client.yaml deleted file mode 120000 index 335c1cafed7..00000000000 --- a/suites/fs/thrash/debug/mds_client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/suites/fs/thrash/fs/btrfs.yaml b/suites/fs/thrash/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/fs/thrash/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/fs/thrash/msgr-failures/none.yaml b/suites/fs/thrash/msgr-failures/none.yaml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml b/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml deleted file mode 100644 index adcebc0baac..00000000000 --- a/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml +++ /dev/null @@ -1,8 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 2500 - mds inject delay type: osd mds - ms inject delay probability: .005 - ms inject delay max: 1 diff --git a/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml b/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml deleted file mode 120000 index 08f746bf894..00000000000 --- a/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml b/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml deleted file mode 100644 index 5908d951b2d..00000000000 --- a/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml b/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml deleted file mode 100644 index 930bf4a671d..00000000000 --- a/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/pjd.sh diff --git a/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml b/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml deleted file mode 100644 index 9509650c76c..00000000000 --- a/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: [fs/misc/trivial_sync.sh] diff --git a/suites/fs/traceless/% b/suites/fs/traceless/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/fs/traceless/clusters/fixed-3.yaml b/suites/fs/traceless/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/fs/traceless/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/fs/traceless/debug/mds_client.yaml b/suites/fs/traceless/debug/mds_client.yaml deleted file mode 120000 index 335c1cafed7..00000000000 --- a/suites/fs/traceless/debug/mds_client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/suites/fs/traceless/fs/btrfs.yaml b/suites/fs/traceless/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/fs/traceless/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml b/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml deleted file mode 120000 index 08f746bf894..00000000000 --- a/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml b/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml deleted file mode 100644 index ed9d92d5bda..00000000000 --- a/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml b/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml deleted file mode 100644 index e678ed47cc6..00000000000 --- a/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - suites/dbench.sh diff --git a/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml b/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml deleted file mode 100644 index 652a3a62f59..00000000000 --- a/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: -- ceph: - conf: - osd: - filestore flush min: 0 -- ceph-fuse: -- workunit: - clients: - all: - - suites/ffsb.sh diff --git a/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml b/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml deleted file mode 100644 index b58487c0785..00000000000 --- a/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/fs/traceless/traceless/50pc.yaml b/suites/fs/traceless/traceless/50pc.yaml deleted file mode 100644 index e0418bcb2be..00000000000 --- a/suites/fs/traceless/traceless/50pc.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - mds: - mds inject traceless reply probability: .5 diff --git a/suites/fs/verify/% b/suites/fs/verify/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/fs/verify/clusters/fixed-3.yaml b/suites/fs/verify/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/fs/verify/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/fs/verify/debug/mds_client.yaml b/suites/fs/verify/debug/mds_client.yaml deleted file mode 120000 index 335c1cafed7..00000000000 --- a/suites/fs/verify/debug/mds_client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/suites/fs/verify/fs/btrfs.yaml b/suites/fs/verify/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/fs/verify/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml b/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml deleted file mode 120000 index 08f746bf894..00000000000 --- a/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml b/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml deleted file mode 100644 index 73319776f03..00000000000 --- a/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- install: -- ceph: - conf: - client: - debug client: 1/20 - debug ms: 0/10 -- ceph-fuse: -- workunit: - clients: - all: - - suites/dbench.sh diff --git a/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml b/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml deleted file mode 100644 index b58487c0785..00000000000 --- a/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/fs/verify/tasks/libcephfs_interface_tests.yaml b/suites/fs/verify/tasks/libcephfs_interface_tests.yaml deleted file mode 100644 index 22d1f142161..00000000000 --- a/suites/fs/verify/tasks/libcephfs_interface_tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - libcephfs/test.sh diff --git a/suites/fs/verify/validater/lockdep.yaml b/suites/fs/verify/validater/lockdep.yaml deleted file mode 100644 index 25f84355c0b..00000000000 --- a/suites/fs/verify/validater/lockdep.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - lockdep: true diff --git a/suites/fs/verify/validater/valgrind.yaml b/suites/fs/verify/validater/valgrind.yaml deleted file mode 100644 index c3d3aed4892..00000000000 --- a/suites/fs/verify/validater/valgrind.yaml +++ /dev/null @@ -1,12 +0,0 @@ -overrides: - install: - ceph: - flavor: notcmalloc - ceph: - valgrind: - mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - osd: [--tool=memcheck] - mds: [--tool=memcheck] - ceph-fuse: - client.0: - valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] diff --git a/suites/hadoop/basic/% b/suites/hadoop/basic/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/hadoop/basic/clusters/fixed-3.yaml b/suites/hadoop/basic/clusters/fixed-3.yaml deleted file mode 100644 index 708d751178c..00000000000 --- a/suites/hadoop/basic/clusters/fixed-3.yaml +++ /dev/null @@ -1,5 +0,0 @@ -roles: -- [mon.0, mds.0, osd.0, hadoop.master.0] -- [mon.1, osd.1, hadoop.slave.0] -- [mon.2, hadoop.slave.1, client.0] - diff --git a/suites/hadoop/basic/tasks/hadoop-internal.yaml b/suites/hadoop/basic/tasks/hadoop-internal.yaml deleted file mode 100644 index 5b52a15d56e..00000000000 --- a/suites/hadoop/basic/tasks/hadoop-internal.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- ssh_keys: -- install: -- ceph: -- hadoop: -- workunit: - clients: - client.0: [hadoop-internal-tests] diff --git a/suites/hadoop/basic/tasks/wordcount.yaml b/suites/hadoop/basic/tasks/wordcount.yaml deleted file mode 100644 index 50b29c78d8c..00000000000 --- a/suites/hadoop/basic/tasks/wordcount.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- ssh_keys: -- install: -- ceph: -- hadoop: -- workunit: - clients: - client.0: [hadoop-wordcount] diff --git a/suites/install/install.yaml b/suites/install/install.yaml deleted file mode 100644 index 12f1e852290..00000000000 --- a/suites/install/install.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: -- install: - project: samba - extra_packages: ['samba'] -- ceph: diff --git a/suites/kcephfs/cephfs/% b/suites/kcephfs/cephfs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/kcephfs/cephfs/clusters/fixed-3.yaml b/suites/kcephfs/cephfs/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/kcephfs/cephfs/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/kcephfs/cephfs/conf.yaml b/suites/kcephfs/cephfs/conf.yaml deleted file mode 100644 index 30da870b25d..00000000000 --- a/suites/kcephfs/cephfs/conf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false diff --git a/suites/kcephfs/cephfs/fs/btrfs.yaml b/suites/kcephfs/cephfs/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/kcephfs/cephfs/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml deleted file mode 100644 index 018a71f78ec..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - direct_io - diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml deleted file mode 100644 index d969e5561cb..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - kernel_untar_build.sh diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml deleted file mode 100644 index 858ec334420..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- install: -- ceph: - conf: - mds: - debug mds: 20 - debug ms: 1 -- kclient: -- workunit: - clients: - all: - - fs/misc diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml deleted file mode 100644 index 6ec5e36cddb..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - fs/test_o_trunc.sh - diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml deleted file mode 100644 index 77d045e8708..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/dbench.sh diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml deleted file mode 100644 index 2b88af692b4..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: -- ceph: - conf: - osd: - filestore flush min: 0 -- kclient: -- workunit: - clients: - all: - - suites/ffsb.sh diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml deleted file mode 100644 index 10b84b8af4e..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml deleted file mode 100644 index a0d2e765bdb..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/fsx.sh diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml deleted file mode 100644 index 1b3f4d55501..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/fsync-tester.sh diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml deleted file mode 100644 index bfe25f2f837..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml deleted file mode 100644 index 305de51e92b..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/pjd.sh diff --git a/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml b/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml deleted file mode 100644 index 3503e12820f..00000000000 --- a/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: [fs/misc/trivial_sync.sh] diff --git a/suites/kcephfs/mixed-clients/% b/suites/kcephfs/mixed-clients/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/kcephfs/mixed-clients/clusters/fixed-3.yaml b/suites/kcephfs/mixed-clients/clusters/fixed-3.yaml deleted file mode 100644 index e1d3c7b7932..00000000000 --- a/suites/kcephfs/mixed-clients/clusters/fixed-3.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mds.a, osd.0, osd.1] -- [mon.b, mon.c, osd.2, osd.3, client.0] -- [client.1] diff --git a/suites/kcephfs/mixed-clients/conf.yaml b/suites/kcephfs/mixed-clients/conf.yaml deleted file mode 100644 index 30da870b25d..00000000000 --- a/suites/kcephfs/mixed-clients/conf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false diff --git a/suites/kcephfs/mixed-clients/fs/btrfs.yaml b/suites/kcephfs/mixed-clients/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/kcephfs/mixed-clients/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml b/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml deleted file mode 100644 index 0121a01c538..00000000000 --- a/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml +++ /dev/null @@ -1,20 +0,0 @@ -tasks: -- install: -- ceph: -- parallel: - - user-workload - - kclient-workload -user-workload: - sequential: - - ceph-fuse: [client.0] - - workunit: - clients: - client.0: - - suites/iozone.sh -kclient-workload: - sequential: - - kclient: [client.1] - - workunit: - clients: - client.1: - - suites/dbench.sh diff --git a/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml b/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml deleted file mode 100644 index 7b0ce5b5d58..00000000000 --- a/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml +++ /dev/null @@ -1,20 +0,0 @@ -tasks: -- install: -- ceph: -- parallel: - - user-workload - - kclient-workload -user-workload: - sequential: - - ceph-fuse: [client.0] - - workunit: - clients: - client.0: - - suites/blogbench.sh -kclient-workload: - sequential: - - kclient: [client.1] - - workunit: - clients: - client.1: - - kernel_untar_build.sh diff --git a/suites/kcephfs/thrash/% b/suites/kcephfs/thrash/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/kcephfs/thrash/clusters/fixed-3.yaml b/suites/kcephfs/thrash/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/kcephfs/thrash/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/kcephfs/thrash/conf.yaml b/suites/kcephfs/thrash/conf.yaml deleted file mode 100644 index 30da870b25d..00000000000 --- a/suites/kcephfs/thrash/conf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false diff --git a/suites/kcephfs/thrash/fs/btrfs.yaml b/suites/kcephfs/thrash/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/kcephfs/thrash/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/kcephfs/thrash/thrashers/default.yaml b/suites/kcephfs/thrash/thrashers/default.yaml deleted file mode 100644 index 14d772583cf..00000000000 --- a/suites/kcephfs/thrash/thrashers/default.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -- thrashosds: diff --git a/suites/kcephfs/thrash/thrashers/mon-thrasher.yaml b/suites/kcephfs/thrash/thrashers/mon-thrasher.yaml deleted file mode 100644 index 90612f21865..00000000000 --- a/suites/kcephfs/thrash/thrashers/mon-thrasher.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: -- ceph: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 diff --git a/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml b/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml deleted file mode 100644 index 0c4a1528d08..00000000000 --- a/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml +++ /dev/null @@ -1,11 +0,0 @@ -overrides: - ceph: - conf: - osd: - filestore flush min: 0 -tasks: -- kclient: -- workunit: - clients: - all: - - suites/ffsb.sh diff --git a/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml b/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml deleted file mode 100644 index 832e0241b27..00000000000 --- a/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- kclient: -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/knfs/basic/% b/suites/knfs/basic/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/knfs/basic/ceph/base.yaml b/suites/knfs/basic/ceph/base.yaml deleted file mode 100644 index 7e80c462c37..00000000000 --- a/suites/knfs/basic/ceph/base.yaml +++ /dev/null @@ -1,13 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false - -tasks: -- install: -- ceph: -- kclient: [client.0] -- knfsd: - client.0: - options: [rw,no_root_squash,async] diff --git a/suites/knfs/basic/clusters/extra-client.yaml b/suites/knfs/basic/clusters/extra-client.yaml deleted file mode 120000 index 1582e308945..00000000000 --- a/suites/knfs/basic/clusters/extra-client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/extra-client.yaml \ No newline at end of file diff --git a/suites/knfs/basic/fs/btrfs.yaml b/suites/knfs/basic/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/knfs/basic/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/knfs/basic/mount/v3.yaml b/suites/knfs/basic/mount/v3.yaml deleted file mode 100644 index 1b61119242b..00000000000 --- a/suites/knfs/basic/mount/v3.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- nfs: - client.1: - server: client.0 - options: [rw,hard,intr,nfsvers=3] diff --git a/suites/knfs/basic/mount/v4.yaml b/suites/knfs/basic/mount/v4.yaml deleted file mode 100644 index 88405666bfb..00000000000 --- a/suites/knfs/basic/mount/v4.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- nfs: - client.1: - server: client.0 - options: [rw,hard,intr,nfsvers=4] diff --git a/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml b/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml deleted file mode 100644 index b9c0a5e05a3..00000000000 --- a/suites/knfs/basic/tasks/nfs-workunit-kernel-untar-build.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - timeout: 6h - clients: - client.1: - - kernel_untar_build.sh diff --git a/suites/knfs/basic/tasks/nfs_workunit_misc.yaml b/suites/knfs/basic/tasks/nfs_workunit_misc.yaml deleted file mode 100644 index 135c4a74009..00000000000 --- a/suites/knfs/basic/tasks/nfs_workunit_misc.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- workunit: - clients: - client.1: - - fs/misc/chmod.sh - - fs/misc/i_complete_vs_rename.sh - - fs/misc/trivial_sync.sh - #- fs/misc/multiple_rsync.sh - #- fs/misc/xattrs.sh -# Once we can run multiple_rsync.sh and xattrs.sh we can change to this -# - misc diff --git a/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml b/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml deleted file mode 100644 index e554a3d9a06..00000000000 --- a/suites/knfs/basic/tasks/nfs_workunit_suites_blogbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.1: - - suites/blogbench.sh diff --git a/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml b/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml deleted file mode 100644 index 1da1b768d02..00000000000 --- a/suites/knfs/basic/tasks/nfs_workunit_suites_dbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.1: - - suites/dbench-short.sh diff --git a/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml b/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml deleted file mode 100644 index 3090f91ea43..00000000000 --- a/suites/knfs/basic/tasks/nfs_workunit_suites_ffsb.yaml +++ /dev/null @@ -1,10 +0,0 @@ -overrides: - ceph: - conf: - osd: - filestore flush min: 0 -tasks: -- workunit: - clients: - client.1: - - suites/ffsb.sh diff --git a/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml b/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml deleted file mode 100644 index bbe7b7a4045..00000000000 --- a/suites/knfs/basic/tasks/nfs_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.1: - - suites/fsstress.sh diff --git a/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml b/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml deleted file mode 100644 index 7c3eec2ff3e..00000000000 --- a/suites/knfs/basic/tasks/nfs_workunit_suites_iozone.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.1: - - suites/iozone.sh diff --git a/suites/krbd/rbd-nomount/% b/suites/krbd/rbd-nomount/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/krbd/rbd-nomount/clusters/fixed-3.yaml b/suites/krbd/rbd-nomount/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/krbd/rbd-nomount/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/krbd/rbd-nomount/conf.yaml b/suites/krbd/rbd-nomount/conf.yaml deleted file mode 100644 index 30da870b25d..00000000000 --- a/suites/krbd/rbd-nomount/conf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false diff --git a/suites/krbd/rbd-nomount/fs/btrfs.yaml b/suites/krbd/rbd-nomount/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/krbd/rbd-nomount/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/krbd/rbd-nomount/install/ceph.yaml b/suites/krbd/rbd-nomount/install/ceph.yaml deleted file mode 100644 index 2030acb9083..00000000000 --- a/suites/krbd/rbd-nomount/install/ceph.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install: -- ceph: diff --git a/suites/krbd/rbd-nomount/msgr-failures/few.yaml b/suites/krbd/rbd-nomount/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/krbd/rbd-nomount/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/krbd/rbd-nomount/msgr-failures/many.yaml b/suites/krbd/rbd-nomount/msgr-failures/many.yaml deleted file mode 100644 index 86f8dde8a0e..00000000000 --- a/suites/krbd/rbd-nomount/msgr-failures/many.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 500 diff --git a/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml b/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml deleted file mode 100644 index 675b98e73a5..00000000000 --- a/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- workunit: - clients: - all: - - rbd/concurrent.sh -# Options for rbd/concurrent.sh (default values shown) -# env: -# RBD_CONCURRENT_ITER: 100 -# RBD_CONCURRENT_COUNT: 5 -# RBD_CONCURRENT_DELAY: 5 diff --git a/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml b/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml deleted file mode 100644 index e5017e118d1..00000000000 --- a/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml +++ /dev/null @@ -1,15 +0,0 @@ -tasks: -- workunit: - clients: - all: - - rbd/image_read.sh -# Options for rbd/image_read.sh (default values shown) -# env: -# IMAGE_READ_LOCAL_FILES: 'false' -# IMAGE_READ_FORMAT: '2' -# IMAGE_READ_VERBOSE: 'true' -# IMAGE_READ_PAGE_SIZE: '4096' -# IMAGE_READ_OBJECT_ORDER: '22' -# IMAGE_READ_TEST_CLONES: 'true' -# IMAGE_READ_DOUBLE_ORDER: 'true' -# IMAGE_READ_HALF_ORDER: 'false' diff --git a/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml b/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml deleted file mode 100644 index aa155827c69..00000000000 --- a/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - rbd/kernel.sh diff --git a/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml b/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml deleted file mode 100644 index c1529398b9e..00000000000 --- a/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - rbd/map-snapshot-io.sh diff --git a/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml b/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml deleted file mode 100644 index c2160997c81..00000000000 --- a/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - rbd/map-unmap.sh diff --git a/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml b/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml deleted file mode 100644 index c493cfaf420..00000000000 --- a/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - clients: - all: - - rbd/simple_big.sh - diff --git a/suites/krbd/rbd/% b/suites/krbd/rbd/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/krbd/rbd/clusters/fixed-3.yaml b/suites/krbd/rbd/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/krbd/rbd/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/krbd/rbd/conf.yaml b/suites/krbd/rbd/conf.yaml deleted file mode 100644 index 30da870b25d..00000000000 --- a/suites/krbd/rbd/conf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false diff --git a/suites/krbd/rbd/fs/btrfs.yaml b/suites/krbd/rbd/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/krbd/rbd/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/krbd/rbd/msgr-failures/few.yaml b/suites/krbd/rbd/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/krbd/rbd/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/krbd/rbd/msgr-failures/many.yaml b/suites/krbd/rbd/msgr-failures/many.yaml deleted file mode 100644 index 86f8dde8a0e..00000000000 --- a/suites/krbd/rbd/msgr-failures/many.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 500 diff --git a/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml b/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml deleted file mode 100644 index ef2a35dcc1d..00000000000 --- a/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- rbd: - all: -- workunit: - clients: - all: - - kernel_untar_build.sh diff --git a/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml b/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml deleted file mode 100644 index d779eea23ca..00000000000 --- a/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- rbd: - all: -- workunit: - clients: - all: - - suites/dbench.sh diff --git a/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml b/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml deleted file mode 100644 index 5204bb87ffe..00000000000 --- a/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: -- rbd: - all: - image_size: 20480 -- workunit: - clients: - all: - - suites/ffsb.sh diff --git a/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml b/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml deleted file mode 100644 index f9d62fefcac..00000000000 --- a/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- rbd: - all: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_btrfs.yaml b/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_btrfs.yaml deleted file mode 100644 index f3930a8986a..00000000000 --- a/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_btrfs.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: -- rbd: - all: - fs_type: btrfs -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml b/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml deleted file mode 100644 index f765b74a6c7..00000000000 --- a/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: -- rbd: - all: - fs_type: ext4 -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml b/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml deleted file mode 100644 index 98c0849c57e..00000000000 --- a/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- rbd: - all: -- workunit: - clients: - all: - - suites/fsx.sh diff --git a/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml b/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml deleted file mode 100644 index eb8f18d60de..00000000000 --- a/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: -- rbd: - all: - image_size: 20480 -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml b/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml deleted file mode 100644 index 7c2796b2a88..00000000000 --- a/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- rbd: - all: -- workunit: - clients: - all: [fs/misc/trivial_sync.sh] diff --git a/suites/krbd/singleton/% b/suites/krbd/singleton/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/krbd/singleton/conf.yaml b/suites/krbd/singleton/conf.yaml deleted file mode 100644 index 30da870b25d..00000000000 --- a/suites/krbd/singleton/conf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false diff --git a/suites/krbd/singleton/fs/btrfs.yaml b/suites/krbd/singleton/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/krbd/singleton/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/krbd/singleton/msgr-failures/few.yaml b/suites/krbd/singleton/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/krbd/singleton/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/krbd/singleton/msgr-failures/many.yaml b/suites/krbd/singleton/msgr-failures/many.yaml deleted file mode 100644 index 86f8dde8a0e..00000000000 --- a/suites/krbd/singleton/msgr-failures/many.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 500 diff --git a/suites/krbd/singleton/tasks/rbd_xfstests.yaml b/suites/krbd/singleton/tasks/rbd_xfstests.yaml deleted file mode 100644 index 2adb17c475c..00000000000 --- a/suites/krbd/singleton/tasks/rbd_xfstests.yaml +++ /dev/null @@ -1,22 +0,0 @@ -roles: -- [mon.a, mon.c, osd.0, osd.1, osd.2] -- [mon.b, mds.a, osd.3, osd.4, osd.5] -- [client.0] -- [client.1] -- [client.2] -tasks: -- install: -- ceph: -- rbd.xfstests: - client.0: - tests: 1-9 11-15 17 19-21 26-29 31-34 41 46-54 56 61 63-67 69-70 74-76 78-79 84-89 91 - test_image: 'test_image-0' - scratch_image: 'scratch_image-0' - client.1: - tests: 92 100 103 105 108 110 116-121 124 126 129-132 - test_image: 'test_image-1' - scratch_image: 'scratch_image-1' - client.2: - tests: 133-135 137-141 164-167 184 187-190 192 194 196 199 201 203 214-216 220-227 234 236-238 241 243-249 253 257-259 261-262 269 273 275 277-278 - test_image: 'test_image-2' - scratch_image: 'scratch_image-2' diff --git a/suites/krbd/thrash/% b/suites/krbd/thrash/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/krbd/thrash/clusters/fixed-3.yaml b/suites/krbd/thrash/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/krbd/thrash/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/krbd/thrash/conf.yaml b/suites/krbd/thrash/conf.yaml deleted file mode 100644 index 30da870b25d..00000000000 --- a/suites/krbd/thrash/conf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false diff --git a/suites/krbd/thrash/fs/btrfs.yaml b/suites/krbd/thrash/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/krbd/thrash/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/krbd/thrash/thrashers/default.yaml b/suites/krbd/thrash/thrashers/default.yaml deleted file mode 100644 index 14d772583cf..00000000000 --- a/suites/krbd/thrash/thrashers/default.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -- thrashosds: diff --git a/suites/krbd/thrash/thrashers/mon-thrasher.yaml b/suites/krbd/thrash/thrashers/mon-thrasher.yaml deleted file mode 100644 index 90612f21865..00000000000 --- a/suites/krbd/thrash/thrashers/mon-thrasher.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: -- ceph: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 diff --git a/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml b/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml deleted file mode 100644 index 4ae7d690905..00000000000 --- a/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- rbd: - all: - image_size: 20480 -- workunit: - clients: - all: - - suites/ffsb.sh diff --git a/suites/krbd/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled b/suites/krbd/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled deleted file mode 100644 index d61ede1bd66..00000000000 --- a/suites/krbd/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- rbd: - all: - image_size: 20480 -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/marginal/basic/% b/suites/marginal/basic/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/marginal/basic/clusters/fixed-3.yaml b/suites/marginal/basic/clusters/fixed-3.yaml deleted file mode 100644 index 0038432afa7..00000000000 --- a/suites/marginal/basic/clusters/fixed-3.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.c, osd.0, osd.1, osd.2] -- [mon.b, mds.a, osd.3, osd.4, osd.5] -- [client.0] diff --git a/suites/marginal/basic/fs/btrfs.yaml b/suites/marginal/basic/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/marginal/basic/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml b/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml deleted file mode 100644 index 4f25d806313..00000000000 --- a/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml b/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml deleted file mode 100644 index a0d2e765bdb..00000000000 --- a/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/fsx.sh diff --git a/suites/marginal/fs-misc/% b/suites/marginal/fs-misc/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/marginal/fs-misc/clusters/two_clients.yaml b/suites/marginal/fs-misc/clusters/two_clients.yaml deleted file mode 100644 index 2258befd8bf..00000000000 --- a/suites/marginal/fs-misc/clusters/two_clients.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2] -- [client.1] -- [client.0] diff --git a/suites/marginal/fs-misc/fs/btrfs.yaml b/suites/marginal/fs-misc/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/marginal/fs-misc/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/marginal/fs-misc/tasks/locktest.yaml b/suites/marginal/fs-misc/tasks/locktest.yaml deleted file mode 100644 index 444bb1f19b3..00000000000 --- a/suites/marginal/fs-misc/tasks/locktest.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- locktest: [client.0, client.1] diff --git a/suites/marginal/mds_restart/% b/suites/marginal/mds_restart/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/marginal/mds_restart/clusters/one_mds.yaml b/suites/marginal/mds_restart/clusters/one_mds.yaml deleted file mode 100644 index 9e11c02a36c..00000000000 --- a/suites/marginal/mds_restart/clusters/one_mds.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2] -- [mds.a] -- [client.0] diff --git a/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml b/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml deleted file mode 100644 index d086d4cf8d3..00000000000 --- a/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: -- ceph: - conf: - mds: - mds log segment size: 16384 - mds log max segments: 1 -- restart: - exec: - client.0: - - test-backtraces.py diff --git a/suites/marginal/multimds/% b/suites/marginal/multimds/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/marginal/multimds/clusters/3-node-3-mds.yaml b/suites/marginal/multimds/clusters/3-node-3-mds.yaml deleted file mode 100644 index 088d9f0d31d..00000000000 --- a/suites/marginal/multimds/clusters/3-node-3-mds.yaml +++ /dev/null @@ -1,5 +0,0 @@ -roles: -- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2] -- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5] -- [client.0] -- [client.1] diff --git a/suites/marginal/multimds/clusters/3-node-9-mds.yaml b/suites/marginal/multimds/clusters/3-node-9-mds.yaml deleted file mode 100644 index be824f0f554..00000000000 --- a/suites/marginal/multimds/clusters/3-node-9-mds.yaml +++ /dev/null @@ -1,5 +0,0 @@ -roles: -- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2] -- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5] -- [client.0] -- [client.1] diff --git a/suites/marginal/multimds/fs/btrfs.yaml b/suites/marginal/multimds/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/marginal/multimds/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/marginal/multimds/mounts/ceph-fuse.yaml b/suites/marginal/multimds/mounts/ceph-fuse.yaml deleted file mode 100644 index 37ac5b69e61..00000000000 --- a/suites/marginal/multimds/mounts/ceph-fuse.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: diff --git a/suites/marginal/multimds/mounts/kclient.yaml b/suites/marginal/multimds/mounts/kclient.yaml deleted file mode 100644 index c18db8f5ea6..00000000000 --- a/suites/marginal/multimds/mounts/kclient.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: diff --git a/suites/marginal/multimds/tasks/workunit_misc.yaml b/suites/marginal/multimds/tasks/workunit_misc.yaml deleted file mode 100644 index aa62b9e8c3a..00000000000 --- a/suites/marginal/multimds/tasks/workunit_misc.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - fs/misc diff --git a/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml b/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml deleted file mode 100644 index 4c1fcc11ed9..00000000000 --- a/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml b/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml deleted file mode 100644 index 41b2bc8edaa..00000000000 --- a/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/dbench.sh diff --git a/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml b/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml deleted file mode 100644 index ddb18fb791a..00000000000 --- a/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml b/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml deleted file mode 100644 index 7efa1adb82d..00000000000 --- a/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/fsync-tester.sh diff --git a/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml b/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml deleted file mode 100644 index e8882134c72..00000000000 --- a/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/pjd.sh diff --git a/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml b/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml deleted file mode 100644 index 3aa5f8825ac..00000000000 --- a/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml +++ /dev/null @@ -1,15 +0,0 @@ -tasks: -- install: -- ceph: - conf: - client: - ms_inject_delay_probability: 1 - ms_inject_delay_type: osd - ms_inject_delay_max: 5 - client_oc_max_dirty_age: 1 -- ceph-fuse: -- exec: - client.0: - - dd if=/dev/zero of=./foo count=100 - - sleep 2 - - truncate --size 0 ./foo diff --git a/suites/marginal/multimds/thrash/exports.yaml b/suites/marginal/multimds/thrash/exports.yaml deleted file mode 100644 index 240b46dfd8a..00000000000 --- a/suites/marginal/multimds/thrash/exports.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - mds: - mds thrash exports: 1 diff --git a/suites/marginal/multimds/thrash/normal.yaml b/suites/marginal/multimds/thrash/normal.yaml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/mixed-clients/basic/clusters/fixed-3.yaml b/suites/mixed-clients/basic/clusters/fixed-3.yaml deleted file mode 100644 index e1d3c7b7932..00000000000 --- a/suites/mixed-clients/basic/clusters/fixed-3.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mds.a, osd.0, osd.1] -- [mon.b, mon.c, osd.2, osd.3, client.0] -- [client.1] diff --git a/suites/mixed-clients/basic/fs/btrfs.yaml b/suites/mixed-clients/basic/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/mixed-clients/basic/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml b/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml deleted file mode 100644 index bb347be7fd7..00000000000 --- a/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml +++ /dev/null @@ -1,26 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false -tasks: -- install: - branch: dumpling -- ceph: -- parallel: - - user-workload - - kclient-workload -user-workload: - sequential: - - ceph-fuse: [client.0] - - workunit: - clients: - client.0: - - suites/iozone.sh -kclient-workload: - sequential: - - kclient: [client.1] - - workunit: - clients: - client.1: - - suites/dbench.sh diff --git a/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml b/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml deleted file mode 100644 index 2c32a61e864..00000000000 --- a/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml +++ /dev/null @@ -1,26 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false -tasks: -- install: - branch: dumpling -- ceph: -- parallel: - - user-workload - - kclient-workload -user-workload: - sequential: - - ceph-fuse: [client.0] - - workunit: - clients: - client.0: - - suites/blogbench.sh -kclient-workload: - sequential: - - kclient: [client.1] - - workunit: - clients: - client.1: - - kernel_untar_build.sh diff --git a/suites/mount/fuse.yaml b/suites/mount/fuse.yaml deleted file mode 100644 index d00ffdb4804..00000000000 --- a/suites/mount/fuse.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: [client.0] -- samba: - samba.0: - ceph: "{testdir}/mnt.0" - diff --git a/suites/mount/kclient.yaml b/suites/mount/kclient.yaml deleted file mode 100644 index 56590adcb4f..00000000000 --- a/suites/mount/kclient.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- kclient: [client.0] -- samba: - samba.0: - ceph: "{testdir}/mnt.0" - diff --git a/suites/mount/native.yaml b/suites/mount/native.yaml deleted file mode 100644 index 09b8c1c4e3d..00000000000 --- a/suites/mount/native.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- samba: diff --git a/suites/mount/noceph.yaml b/suites/mount/noceph.yaml deleted file mode 100644 index 3cad4740d8b..00000000000 --- a/suites/mount/noceph.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- localdir: [client.0] -- samba: - samba.0: - ceph: "{testdir}/mnt.0" diff --git a/suites/multimds/basic/% b/suites/multimds/basic/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/multimds/basic/clusters/3-mds.yaml b/suites/multimds/basic/clusters/3-mds.yaml deleted file mode 100644 index c655b90c81c..00000000000 --- a/suites/multimds/basic/clusters/3-mds.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2] -- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5] -- [client.0] diff --git a/suites/multimds/basic/clusters/9-mds.yaml b/suites/multimds/basic/clusters/9-mds.yaml deleted file mode 100644 index ed554c9fe3c..00000000000 --- a/suites/multimds/basic/clusters/9-mds.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2] -- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5] -- [client.0] diff --git a/suites/multimds/basic/debug/mds_client.yaml b/suites/multimds/basic/debug/mds_client.yaml deleted file mode 120000 index 335c1cafed7..00000000000 --- a/suites/multimds/basic/debug/mds_client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/suites/multimds/basic/fs/btrfs.yaml b/suites/multimds/basic/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/multimds/basic/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/multimds/basic/inline/no.yaml b/suites/multimds/basic/inline/no.yaml deleted file mode 100644 index 2030acb9083..00000000000 --- a/suites/multimds/basic/inline/no.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install: -- ceph: diff --git a/suites/multimds/basic/inline/yes.yaml b/suites/multimds/basic/inline/yes.yaml deleted file mode 100644 index 72a285c590f..00000000000 --- a/suites/multimds/basic/inline/yes.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: -- ceph: -- exec: - client.0: - - ceph mds set inline_data true --yes-i-really-mean-it diff --git a/suites/multimds/basic/mount/cfuse.yaml b/suites/multimds/basic/mount/cfuse.yaml deleted file mode 100644 index e3c34a1f604..00000000000 --- a/suites/multimds/basic/mount/cfuse.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph-fuse: diff --git a/suites/multimds/basic/mount/kclient.yaml b/suites/multimds/basic/mount/kclient.yaml deleted file mode 100644 index f00f16aea22..00000000000 --- a/suites/multimds/basic/mount/kclient.yaml +++ /dev/null @@ -1,7 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false -tasks: -- kclient: diff --git a/suites/multimds/basic/overrides/whitelist_wrongly_marked_down.yaml b/suites/multimds/basic/overrides/whitelist_wrongly_marked_down.yaml deleted file mode 120000 index 08f746bf894..00000000000 --- a/suites/multimds/basic/overrides/whitelist_wrongly_marked_down.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/suites/multimds/basic/tasks/kernel_untar_build.yaml b/suites/multimds/basic/tasks/kernel_untar_build.yaml deleted file mode 100644 index 8dbc24a9feb..00000000000 --- a/suites/multimds/basic/tasks/kernel_untar_build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -overrides: - ceph: - conf: - client: - fuse_default_permissions: 0 -tasks: -- workunit: - clients: - all: - - kernel_untar_build.sh diff --git a/suites/multimds/basic/tasks/misc.yaml b/suites/multimds/basic/tasks/misc.yaml deleted file mode 100644 index 6c8327bb0d7..00000000000 --- a/suites/multimds/basic/tasks/misc.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - timeout: 5h - clients: - all: - - fs/misc diff --git a/suites/multimds/basic/tasks/misc_test_o_trunc.yaml b/suites/multimds/basic/tasks/misc_test_o_trunc.yaml deleted file mode 100644 index c9de5c38637..00000000000 --- a/suites/multimds/basic/tasks/misc_test_o_trunc.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - fs/test_o_trunc.sh diff --git a/suites/multimds/basic/tasks/suites_blogbench.yaml b/suites/multimds/basic/tasks/suites_blogbench.yaml deleted file mode 100644 index 4c1fcc11ed9..00000000000 --- a/suites/multimds/basic/tasks/suites_blogbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/multimds/basic/tasks/suites_dbench.yaml b/suites/multimds/basic/tasks/suites_dbench.yaml deleted file mode 100644 index 41b2bc8edaa..00000000000 --- a/suites/multimds/basic/tasks/suites_dbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/dbench.sh diff --git a/suites/multimds/basic/tasks/suites_ffsb.yaml b/suites/multimds/basic/tasks/suites_ffsb.yaml deleted file mode 100644 index 4a2a627fe5d..00000000000 --- a/suites/multimds/basic/tasks/suites_ffsb.yaml +++ /dev/null @@ -1,10 +0,0 @@ -overrides: - ceph: - conf: - osd: - filestore flush min: 0 -tasks: -- workunit: - clients: - all: - - suites/ffsb.sh diff --git a/suites/multimds/basic/tasks/suites_fsstress.yaml b/suites/multimds/basic/tasks/suites_fsstress.yaml deleted file mode 100644 index ddb18fb791a..00000000000 --- a/suites/multimds/basic/tasks/suites_fsstress.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/multimds/basic/tasks/suites_fsx.yaml b/suites/multimds/basic/tasks/suites_fsx.yaml deleted file mode 100644 index 8b2b1ab5c14..00000000000 --- a/suites/multimds/basic/tasks/suites_fsx.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/fsx.sh diff --git a/suites/multimds/basic/tasks/suites_fsync.yaml b/suites/multimds/basic/tasks/suites_fsync.yaml deleted file mode 100644 index 7efa1adb82d..00000000000 --- a/suites/multimds/basic/tasks/suites_fsync.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/fsync-tester.sh diff --git a/suites/multimds/basic/tasks/suites_iogen.yaml b/suites/multimds/basic/tasks/suites_iogen.yaml deleted file mode 100644 index d45d4ea3c3f..00000000000 --- a/suites/multimds/basic/tasks/suites_iogen.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/iogen.sh diff --git a/suites/multimds/basic/tasks/suites_iozone.yaml b/suites/multimds/basic/tasks/suites_iozone.yaml deleted file mode 100644 index 9270f3c51e2..00000000000 --- a/suites/multimds/basic/tasks/suites_iozone.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/multimds/basic/tasks/suites_pjd.yaml b/suites/multimds/basic/tasks/suites_pjd.yaml deleted file mode 100644 index de21f7c3464..00000000000 --- a/suites/multimds/basic/tasks/suites_pjd.yaml +++ /dev/null @@ -1,14 +0,0 @@ -overrides: - ceph: - conf: - client: - debug ms: 1 - debug client: 20 - mds: - debug ms: 1 - debug mds: 20 -tasks: -- workunit: - clients: - all: - - suites/pjd.sh diff --git a/suites/multimds/basic/tasks/suites_truncate_delay.yaml b/suites/multimds/basic/tasks/suites_truncate_delay.yaml deleted file mode 100644 index ac5c9b13901..00000000000 --- a/suites/multimds/basic/tasks/suites_truncate_delay.yaml +++ /dev/null @@ -1,14 +0,0 @@ -overrides: - ceph: - conf: - client: - ms_inject_delay_probability: 1 - ms_inject_delay_type: osd - ms_inject_delay_max: 5 - client_oc_max_dirty_age: 1 -tasks: -- exec: - client.0: - - dd if=/dev/zero of=./foo count=100 - - sleep 2 - - truncate --size 0 ./foo diff --git a/suites/multimds/basic/tasks/trivial_sync.yaml b/suites/multimds/basic/tasks/trivial_sync.yaml deleted file mode 100644 index 36e7411b638..00000000000 --- a/suites/multimds/basic/tasks/trivial_sync.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- workunit: - clients: - all: [fs/misc/trivial_sync.sh] diff --git a/suites/multimds/libcephfs/% b/suites/multimds/libcephfs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/multimds/libcephfs/clusters/3-mds.yaml b/suites/multimds/libcephfs/clusters/3-mds.yaml deleted file mode 100644 index c655b90c81c..00000000000 --- a/suites/multimds/libcephfs/clusters/3-mds.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2] -- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5] -- [client.0] diff --git a/suites/multimds/libcephfs/clusters/9-mds.yaml b/suites/multimds/libcephfs/clusters/9-mds.yaml deleted file mode 100644 index ed554c9fe3c..00000000000 --- a/suites/multimds/libcephfs/clusters/9-mds.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2] -- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5] -- [client.0] diff --git a/suites/multimds/libcephfs/debug/mds_client.yaml b/suites/multimds/libcephfs/debug/mds_client.yaml deleted file mode 120000 index 335c1cafed7..00000000000 --- a/suites/multimds/libcephfs/debug/mds_client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/suites/multimds/libcephfs/fs/btrfs.yaml b/suites/multimds/libcephfs/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/multimds/libcephfs/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/multimds/libcephfs/inline/no.yaml b/suites/multimds/libcephfs/inline/no.yaml deleted file mode 100644 index 2030acb9083..00000000000 --- a/suites/multimds/libcephfs/inline/no.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install: -- ceph: diff --git a/suites/multimds/libcephfs/inline/yes.yaml b/suites/multimds/libcephfs/inline/yes.yaml deleted file mode 100644 index 72a285c590f..00000000000 --- a/suites/multimds/libcephfs/inline/yes.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: -- ceph: -- exec: - client.0: - - ceph mds set inline_data true --yes-i-really-mean-it diff --git a/suites/multimds/libcephfs/overrides/whitelist_wrongly_marked_down.yaml b/suites/multimds/libcephfs/overrides/whitelist_wrongly_marked_down.yaml deleted file mode 120000 index 08f746bf894..00000000000 --- a/suites/multimds/libcephfs/overrides/whitelist_wrongly_marked_down.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/suites/multimds/libcephfs/tasks/libcephfs_interface_tests.yaml b/suites/multimds/libcephfs/tasks/libcephfs_interface_tests.yaml deleted file mode 100644 index 0b1d41fea5c..00000000000 --- a/suites/multimds/libcephfs/tasks/libcephfs_interface_tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - client.0: - - libcephfs/test.sh diff --git a/suites/multimds/libcephfs/tasks/libcephfs_java.yaml b/suites/multimds/libcephfs/tasks/libcephfs_java.yaml deleted file mode 100644 index 4330d50965e..00000000000 --- a/suites/multimds/libcephfs/tasks/libcephfs_java.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - client.0: - - libcephfs-java/test.sh diff --git a/suites/multimds/libcephfs/tasks/mds_creation_retry.yaml b/suites/multimds/libcephfs/tasks/mds_creation_retry.yaml deleted file mode 100644 index cd87f28ad08..00000000000 --- a/suites/multimds/libcephfs/tasks/mds_creation_retry.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: --mds_creation_failure: --ceph-fuse: -- workunit: - clients: - all: [fs/misc/trivial_sync.sh] diff --git a/suites/multimds/verify/% b/suites/multimds/verify/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/multimds/verify/clusters/3-mds.yaml b/suites/multimds/verify/clusters/3-mds.yaml deleted file mode 100644 index c655b90c81c..00000000000 --- a/suites/multimds/verify/clusters/3-mds.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2] -- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5] -- [client.0] diff --git a/suites/multimds/verify/clusters/9-mds.yaml b/suites/multimds/verify/clusters/9-mds.yaml deleted file mode 100644 index ed554c9fe3c..00000000000 --- a/suites/multimds/verify/clusters/9-mds.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2] -- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5] -- [client.0] diff --git a/suites/multimds/verify/debug/mds_client.yaml b/suites/multimds/verify/debug/mds_client.yaml deleted file mode 120000 index 335c1cafed7..00000000000 --- a/suites/multimds/verify/debug/mds_client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../debug/mds_client.yaml \ No newline at end of file diff --git a/suites/multimds/verify/fs/btrfs.yaml b/suites/multimds/verify/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/multimds/verify/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/multimds/verify/overrides/whitelist_wrongly_marked_down.yaml b/suites/multimds/verify/overrides/whitelist_wrongly_marked_down.yaml deleted file mode 120000 index 08f746bf894..00000000000 --- a/suites/multimds/verify/overrides/whitelist_wrongly_marked_down.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/suites/multimds/verify/tasks/cfuse_workunit_suites_dbench.yaml b/suites/multimds/verify/tasks/cfuse_workunit_suites_dbench.yaml deleted file mode 100644 index 73319776f03..00000000000 --- a/suites/multimds/verify/tasks/cfuse_workunit_suites_dbench.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- install: -- ceph: - conf: - client: - debug client: 1/20 - debug ms: 0/10 -- ceph-fuse: -- workunit: - clients: - all: - - suites/dbench.sh diff --git a/suites/multimds/verify/tasks/cfuse_workunit_suites_fsstress.yaml b/suites/multimds/verify/tasks/cfuse_workunit_suites_fsstress.yaml deleted file mode 100644 index b58487c0785..00000000000 --- a/suites/multimds/verify/tasks/cfuse_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/multimds/verify/tasks/libcephfs_interface_tests.yaml b/suites/multimds/verify/tasks/libcephfs_interface_tests.yaml deleted file mode 100644 index 22d1f142161..00000000000 --- a/suites/multimds/verify/tasks/libcephfs_interface_tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - libcephfs/test.sh diff --git a/suites/multimds/verify/validater/lockdep.yaml b/suites/multimds/verify/validater/lockdep.yaml deleted file mode 100644 index 25f84355c0b..00000000000 --- a/suites/multimds/verify/validater/lockdep.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - lockdep: true diff --git a/suites/multimds/verify/validater/valgrind.yaml b/suites/multimds/verify/validater/valgrind.yaml deleted file mode 100644 index c3d3aed4892..00000000000 --- a/suites/multimds/verify/validater/valgrind.yaml +++ /dev/null @@ -1,12 +0,0 @@ -overrides: - install: - ceph: - flavor: notcmalloc - ceph: - valgrind: - mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - osd: [--tool=memcheck] - mds: [--tool=memcheck] - ceph-fuse: - client.0: - valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] diff --git a/suites/powercycle/osd/% b/suites/powercycle/osd/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/powercycle/osd/clusters/3osd-1per-target.yaml b/suites/powercycle/osd/clusters/3osd-1per-target.yaml deleted file mode 100644 index d5503a40c86..00000000000 --- a/suites/powercycle/osd/clusters/3osd-1per-target.yaml +++ /dev/null @@ -1,5 +0,0 @@ -roles: -- [mon.0, mon.1, mon.2, mds.0, client.0] -- [osd.0] -- [osd.1] -- [osd.2] diff --git a/suites/powercycle/osd/fs/btrfs.yaml b/suites/powercycle/osd/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/powercycle/osd/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/powercycle/osd/fs/ext4.yaml b/suites/powercycle/osd/fs/ext4.yaml deleted file mode 120000 index 65d71886933..00000000000 --- a/suites/powercycle/osd/fs/ext4.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/ext4.yaml \ No newline at end of file diff --git a/suites/powercycle/osd/fs/xfs.yaml b/suites/powercycle/osd/fs/xfs.yaml deleted file mode 120000 index 4c28d731f6b..00000000000 --- a/suites/powercycle/osd/fs/xfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/xfs.yaml \ No newline at end of file diff --git a/suites/powercycle/osd/powercycle/default.yaml b/suites/powercycle/osd/powercycle/default.yaml deleted file mode 100644 index b632e83e621..00000000000 --- a/suites/powercycle/osd/powercycle/default.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: -- thrashosds: - chance_down: 1.0 - powercycle: true - timeout: 600 diff --git a/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml b/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml deleted file mode 100644 index b1ddad8d3b0..00000000000 --- a/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml +++ /dev/null @@ -1,13 +0,0 @@ -overrides: - ceph: - conf: - client.0: - admin socket: /var/run/ceph/ceph-$name.asok -tasks: -- radosbench: - clients: [client.0] - time: 60 -- admin_socket: - client.0: - objecter_requests: - test: "http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}" diff --git a/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml b/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml deleted file mode 100644 index 3e99204debb..00000000000 --- a/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - kernel_untar_build.sh diff --git a/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml b/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml deleted file mode 100644 index be3f1331990..00000000000 --- a/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - fs/misc diff --git a/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml b/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml deleted file mode 100644 index 9f3fa7b1887..00000000000 --- a/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml +++ /dev/null @@ -1,14 +0,0 @@ -overrides: - ceph: - conf: - osd: - filestore flush min: 0 - mds: - debug ms: 1 - debug mds: 20 -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/ffsb.sh diff --git a/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml b/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml deleted file mode 100644 index 5908d951b2d..00000000000 --- a/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml b/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml deleted file mode 100644 index 3c11ed74fc7..00000000000 --- a/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsx.sh diff --git a/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml b/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml deleted file mode 100644 index c6043e209bd..00000000000 --- a/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsync-tester.sh diff --git a/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml b/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml deleted file mode 100644 index 930bf4a671d..00000000000 --- a/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/pjd.sh diff --git a/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml b/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml deleted file mode 100644 index f3efafa2e9d..00000000000 --- a/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml +++ /dev/null @@ -1,15 +0,0 @@ -overrides: - ceph: - conf: - client: - ms_inject_delay_probability: 1 - ms_inject_delay_type: osd - ms_inject_delay_max: 5 - client_oc_max_dirty_age: 1 -tasks: -- ceph-fuse: -- exec: - client.0: - - dd if=/dev/zero of=./foo count=100 - - sleep 2 - - truncate --size 0 ./foo diff --git a/suites/powercycle/osd/tasks/rados_api_tests.yaml b/suites/powercycle/osd/tasks/rados_api_tests.yaml deleted file mode 100644 index b4708ebd7c0..00000000000 --- a/suites/powercycle/osd/tasks/rados_api_tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - client.0: - - rados/test.sh diff --git a/suites/powercycle/osd/tasks/radosbench.yaml b/suites/powercycle/osd/tasks/radosbench.yaml deleted file mode 100644 index 3940870fce0..00000000000 --- a/suites/powercycle/osd/tasks/radosbench.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- radosbench: - clients: [client.0] - time: 1800 diff --git a/suites/powercycle/osd/tasks/readwrite.yaml b/suites/powercycle/osd/tasks/readwrite.yaml deleted file mode 100644 index c53e52b0872..00000000000 --- a/suites/powercycle/osd/tasks/readwrite.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 500 - op_weights: - read: 45 - write: 45 - delete: 10 diff --git a/suites/powercycle/osd/tasks/snaps-few-objects.yaml b/suites/powercycle/osd/tasks/snaps-few-objects.yaml deleted file mode 100644 index aa82d973ae1..00000000000 --- a/suites/powercycle/osd/tasks/snaps-few-objects.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - copy_from: 50 diff --git a/suites/powercycle/osd/tasks/snaps-many-objects.yaml b/suites/powercycle/osd/tasks/snaps-many-objects.yaml deleted file mode 100644 index 1ffe4e14888..00000000000 --- a/suites/powercycle/osd/tasks/snaps-many-objects.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 500 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - copy_from: 50 diff --git a/suites/rados/basic/% b/suites/rados/basic/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rados/basic/clusters/fixed-2.yaml b/suites/rados/basic/clusters/fixed-2.yaml deleted file mode 120000 index cd0791a1486..00000000000 --- a/suites/rados/basic/clusters/fixed-2.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/suites/rados/basic/fs/btrfs.yaml b/suites/rados/basic/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/rados/basic/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/rados/basic/msgr-failures/few.yaml b/suites/rados/basic/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/rados/basic/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/rados/basic/msgr-failures/many.yaml b/suites/rados/basic/msgr-failures/many.yaml deleted file mode 100644 index 038c3a79908..00000000000 --- a/suites/rados/basic/msgr-failures/many.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 1500 diff --git a/suites/rados/basic/tasks/rados_api_tests.yaml b/suites/rados/basic/tasks/rados_api_tests.yaml deleted file mode 100644 index acfc597dec3..00000000000 --- a/suites/rados/basic/tasks/rados_api_tests.yaml +++ /dev/null @@ -1,14 +0,0 @@ -overrides: - ceph: - log-whitelist: - - reached quota - - wrongly marked me down -tasks: -- install: -- ceph: -- workunit: - clients: - client.0: - - rados/test.sh - - rados/test_pool_quota.sh - diff --git a/suites/rados/basic/tasks/rados_cls_all.yaml b/suites/rados/basic/tasks/rados_cls_all.yaml deleted file mode 100644 index 34f7cbbb4a0..00000000000 --- a/suites/rados/basic/tasks/rados_cls_all.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: -- workunit: - clients: - client.0: - - cls diff --git a/suites/rados/basic/tasks/rados_python.yaml b/suites/rados/basic/tasks/rados_python.yaml deleted file mode 100644 index 4faf10e39e0..00000000000 --- a/suites/rados/basic/tasks/rados_python.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: -- workunit: - clients: - client.0: - - rados/test_python.sh diff --git a/suites/rados/basic/tasks/rados_stress_watch.yaml b/suites/rados/basic/tasks/rados_stress_watch.yaml deleted file mode 100644 index ae2e5fd0083..00000000000 --- a/suites/rados/basic/tasks/rados_stress_watch.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: -- workunit: - clients: - client.0: - - rados/stress_watch.sh diff --git a/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml b/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml deleted file mode 100644 index 9432367e356..00000000000 --- a/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml +++ /dev/null @@ -1,11 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down -tasks: -- install: -- ceph: -- workunit: - clients: - all: - - rados/load-gen-big.sh diff --git a/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml b/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml deleted file mode 100644 index 7d882cac9c9..00000000000 --- a/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml +++ /dev/null @@ -1,11 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down -tasks: -- install: -- ceph: -- workunit: - clients: - all: - - rados/load-gen-mix.sh diff --git a/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml b/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml deleted file mode 100644 index 69c06b7b049..00000000000 --- a/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml +++ /dev/null @@ -1,11 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down -tasks: -- install: -- ceph: -- workunit: - clients: - all: - - rados/load-gen-mostlyread.sh diff --git a/suites/rados/monthrash/% b/suites/rados/monthrash/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rados/monthrash/ceph/ceph.yaml b/suites/rados/monthrash/ceph/ceph.yaml deleted file mode 100644 index a2c0efc7779..00000000000 --- a/suites/rados/monthrash/ceph/ceph.yaml +++ /dev/null @@ -1,9 +0,0 @@ -overrides: - ceph: - conf: - mon: - mon min osdmap epochs: 25 - paxos service trim min: 5 -tasks: -- install: -- ceph: diff --git a/suites/rados/monthrash/clusters/3-mons.yaml b/suites/rados/monthrash/clusters/3-mons.yaml deleted file mode 100644 index 6298ff23c55..00000000000 --- a/suites/rados/monthrash/clusters/3-mons.yaml +++ /dev/null @@ -1,3 +0,0 @@ -roles: -- [mon.a, mon.c, osd.0, osd.1, osd.2] -- [mon.b, mds.a, osd.3, osd.4, osd.5, client.0] diff --git a/suites/rados/monthrash/clusters/9-mons.yaml b/suites/rados/monthrash/clusters/9-mons.yaml deleted file mode 100644 index a22e6c5a0fc..00000000000 --- a/suites/rados/monthrash/clusters/9-mons.yaml +++ /dev/null @@ -1,3 +0,0 @@ -roles: -- [mon.a, mon.b, mon.c, mon.d, mon.e, osd.0, osd.1, osd.2] -- [mon.f, mon.g, mon.h, mon.i, mds.a, osd.3, osd.4, osd.5, client.0] diff --git a/suites/rados/monthrash/fs/xfs.yaml b/suites/rados/monthrash/fs/xfs.yaml deleted file mode 120000 index 4c28d731f6b..00000000000 --- a/suites/rados/monthrash/fs/xfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/xfs.yaml \ No newline at end of file diff --git a/suites/rados/monthrash/msgr-failures/few.yaml b/suites/rados/monthrash/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/rados/monthrash/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/rados/monthrash/msgr-failures/mon-delay.yaml b/suites/rados/monthrash/msgr-failures/mon-delay.yaml deleted file mode 100644 index 03b7e37f842..00000000000 --- a/suites/rados/monthrash/msgr-failures/mon-delay.yaml +++ /dev/null @@ -1,9 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 2500 - ms inject delay type: mon - ms inject delay probability: .005 - ms inject delay max: 1 - ms inject internal delays: .002 diff --git a/suites/rados/monthrash/thrashers/force-sync-many.yaml b/suites/rados/monthrash/thrashers/force-sync-many.yaml deleted file mode 100644 index 2867f2db5ec..00000000000 --- a/suites/rados/monthrash/thrashers/force-sync-many.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 90 - thrash_delay: 1 - thrash_store: true - thrash_many: true diff --git a/suites/rados/monthrash/thrashers/many.yaml b/suites/rados/monthrash/thrashers/many.yaml deleted file mode 100644 index fe52bb2bbeb..00000000000 --- a/suites/rados/monthrash/thrashers/many.yaml +++ /dev/null @@ -1,13 +0,0 @@ -overrides: - ceph: - conf: - osd: - mon client ping interval: 4 - mon client ping timeout: 12 -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 - thrash_many: true - freeze_mon_duration: 20 - freeze_mon_probability: 10 diff --git a/suites/rados/monthrash/thrashers/one.yaml b/suites/rados/monthrash/thrashers/one.yaml deleted file mode 100644 index 2ce44c8601f..00000000000 --- a/suites/rados/monthrash/thrashers/one.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 diff --git a/suites/rados/monthrash/thrashers/sync-many.yaml b/suites/rados/monthrash/thrashers/sync-many.yaml deleted file mode 100644 index 9868f18159f..00000000000 --- a/suites/rados/monthrash/thrashers/sync-many.yaml +++ /dev/null @@ -1,11 +0,0 @@ -overrides: - ceph: - conf: - mon: - paxos min: 10 - paxos trim min: 10 -tasks: -- mon_thrash: - revive_delay: 90 - thrash_delay: 1 - thrash_many: true diff --git a/suites/rados/monthrash/thrashers/sync.yaml b/suites/rados/monthrash/thrashers/sync.yaml deleted file mode 100644 index 1e7054c271d..00000000000 --- a/suites/rados/monthrash/thrashers/sync.yaml +++ /dev/null @@ -1,10 +0,0 @@ -overrides: - ceph: - conf: - mon: - paxos min: 10 - paxos trim min: 10 -tasks: -- mon_thrash: - revive_delay: 90 - thrash_delay: 1 diff --git a/suites/rados/monthrash/workloads/pool-create-delete.yaml b/suites/rados/monthrash/workloads/pool-create-delete.yaml deleted file mode 100644 index c0f0f2e35b4..00000000000 --- a/suites/rados/monthrash/workloads/pool-create-delete.yaml +++ /dev/null @@ -1,56 +0,0 @@ -overrides: - ceph: - log-whitelist: - - slow request -tasks: -- exec: - client.0: - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel - - ceph_test_rados_delete_pools_parallel diff --git a/suites/rados/monthrash/workloads/rados_5925.yaml b/suites/rados/monthrash/workloads/rados_5925.yaml deleted file mode 100644 index b49937f76df..00000000000 --- a/suites/rados/monthrash/workloads/rados_5925.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- exec: - client.0: - - ceph_test_rados_delete_pools_parallel --debug_objecter 20 --debug_ms 1 --debug_rados 20 --debug_monc 20 diff --git a/suites/rados/monthrash/workloads/rados_api_tests.yaml b/suites/rados/monthrash/workloads/rados_api_tests.yaml deleted file mode 100644 index cd11ae6ca0c..00000000000 --- a/suites/rados/monthrash/workloads/rados_api_tests.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rados/test.sh diff --git a/suites/rados/monthrash/workloads/rados_mon_workunits.yaml b/suites/rados/monthrash/workloads/rados_mon_workunits.yaml deleted file mode 100644 index 31465cffe71..00000000000 --- a/suites/rados/monthrash/workloads/rados_mon_workunits.yaml +++ /dev/null @@ -1,13 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down -tasks: -- workunit: - clients: - client.0: - - mon/pool_ops.sh - - mon/crush_ops.sh - - mon/osd.sh - - mon/caps.sh - diff --git a/suites/rados/monthrash/workloads/snaps-few-objects.yaml b/suites/rados/monthrash/workloads/snaps-few-objects.yaml deleted file mode 100644 index aa82d973ae1..00000000000 --- a/suites/rados/monthrash/workloads/snaps-few-objects.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - copy_from: 50 diff --git a/suites/rados/multimon/% b/suites/rados/multimon/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rados/multimon/clusters/21.yaml b/suites/rados/multimon/clusters/21.yaml deleted file mode 100644 index 2d134788a6f..00000000000 --- a/suites/rados/multimon/clusters/21.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.d, mon.g, mon.j, mon.m, mon.p, mon.s, osd.0] -- [mon.b, mon.e, mon.h, mon.k, mon.n, mon.q, mon.t, mds.a] -- [mon.c, mon.f, mon.i, mon.l, mon.o, mon.r, mon.u, osd.1] diff --git a/suites/rados/multimon/clusters/3.yaml b/suites/rados/multimon/clusters/3.yaml deleted file mode 100644 index 703cc664f87..00000000000 --- a/suites/rados/multimon/clusters/3.yaml +++ /dev/null @@ -1,2 +0,0 @@ -roles: -- [mon.a, mon.b, mon.c, osd.0, osd.1, mds.a] diff --git a/suites/rados/multimon/clusters/6.yaml b/suites/rados/multimon/clusters/6.yaml deleted file mode 100644 index 62780660361..00000000000 --- a/suites/rados/multimon/clusters/6.yaml +++ /dev/null @@ -1,3 +0,0 @@ -roles: -- [mon.a, mon.c, mon.e, osd.0] -- [mon.b, mon.d, mon.f, osd.1, mds.a] diff --git a/suites/rados/multimon/clusters/9.yaml b/suites/rados/multimon/clusters/9.yaml deleted file mode 100644 index b87a158dbb0..00000000000 --- a/suites/rados/multimon/clusters/9.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.d, mon.g, osd.0] -- [mon.b, mon.e, mon.h, mds.a] -- [mon.c, mon.f, mon.i, osd.1] diff --git a/suites/rados/multimon/msgr-failures/few.yaml b/suites/rados/multimon/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/rados/multimon/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/rados/multimon/msgr-failures/many.yaml b/suites/rados/multimon/msgr-failures/many.yaml deleted file mode 100644 index 86f8dde8a0e..00000000000 --- a/suites/rados/multimon/msgr-failures/many.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 500 diff --git a/suites/rados/multimon/tasks/mon_clock_no_skews.yaml b/suites/rados/multimon/tasks/mon_clock_no_skews.yaml deleted file mode 100644 index e86bdde1d7d..00000000000 --- a/suites/rados/multimon/tasks/mon_clock_no_skews.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - slow request - - .*clock.*skew.* - - clocks not synchronized -- mon_clock_skew_check: - expect-skew: false diff --git a/suites/rados/multimon/tasks/mon_clock_with_skews.yaml b/suites/rados/multimon/tasks/mon_clock_with_skews.yaml deleted file mode 100644 index 2953e0d6dc2..00000000000 --- a/suites/rados/multimon/tasks/mon_clock_with_skews.yaml +++ /dev/null @@ -1,15 +0,0 @@ -overrides: - ceph: - conf: - mon.b: - clock offset: 10 -tasks: -- install: -- ceph: - wait-for-healthy: false - log-whitelist: - - slow request - - .*clock.*skew.* - - clocks not synchronized -- mon_clock_skew_check: - expect-skew: true diff --git a/suites/rados/multimon/tasks/mon_recovery.yaml b/suites/rados/multimon/tasks/mon_recovery.yaml deleted file mode 100644 index 94721ea53a4..00000000000 --- a/suites/rados/multimon/tasks/mon_recovery.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: -- ceph: -- mon_recovery: diff --git a/suites/rados/objectstore/ceph_objectstore_tool.yaml b/suites/rados/objectstore/ceph_objectstore_tool.yaml deleted file mode 100644 index 698e6e2679e..00000000000 --- a/suites/rados/objectstore/ceph_objectstore_tool.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- [mon.0, osd.0, osd.1, osd.2] -- [osd.3, osd.4, osd.5] -- [client.0] - -tasks: -- install: -- ceph: -- ceph_objectstore_tool: - objects: 20 diff --git a/suites/rados/singleton-nomsgr/% b/suites/rados/singleton-nomsgr/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rados/singleton-nomsgr/all/11429.yaml b/suites/rados/singleton-nomsgr/all/11429.yaml deleted file mode 100644 index 06fdc3b557b..00000000000 --- a/suites/rados/singleton-nomsgr/all/11429.yaml +++ /dev/null @@ -1,105 +0,0 @@ -overrides: - ceph: - conf: - mon: - debug mon: 20 - debug ms: 1 - debug paxos: 20 - mon warn on legacy crush tunables: false - mon min osdmap epochs: 3 - osd: - osd map cache size: 2 - osd map max advance: 1 - debug filestore: 20 - debug journal: 20 - debug ms: 1 - debug osd: 20 - log-whitelist: - - osd_map_cache_size - - slow request - - scrub mismatch - - ScrubResult -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - mon.b - - mon.c - - osd.2 - - client.0 -tasks: -- install: - branch: v0.80.8 -- print: '**** done installing firefly' -- ceph: - fs: xfs -- print: '**** done ceph' -- full_sequential: - - ceph_manager.create_pool: - args: ['toremove'] - kwargs: - pg_num: 4096 - - sleep: - duration: 30 - - ceph_manager.wait_for_clean: null - - radosbench: - clients: [client.0] - time: 120 - size: 1 - pool: toremove - create_pool: false - - ceph_manager.remove_pool: - args: ['toremove'] - - sleep: - duration: 10 - - ceph.restart: - daemons: - - osd.0 - - osd.1 - - osd.2 - - sleep: - duration: 30 - - ceph_manager.wait_for_clean: null - - radosbench: - clients: [client.0] - time: 60 - size: 1 - - ceph_manager.create_pool: - args: ['newpool'] - - loop: - count: 100 - body: - - ceph_manager.set_pool_property: - args: ['newpool', 'min_size', 2] - - ceph_manager.set_pool_property: - args: ['newpool', 'min_size', 1] - - sleep: - duration: 30 - - ceph_manager.wait_for_clean: null - - loop: - count: 100 - body: - - ceph_manager.set_pool_property: - args: ['newpool', 'min_size', 2] - - ceph_manager.set_pool_property: - args: ['newpool', 'min_size', 1] - - sleep: - duration: 30 - - ceph_manager.wait_for_clean: null - - sleep: - duration: 30 - - install.upgrade: - mon.a: null - - ceph.restart: - daemons: - - osd.0 - - osd.1 - - osd.2 - - sleep: - duration: 30 - - radosbench: - clients: [client.0] - time: 30 - size: 1 - - ceph_manager.wait_for_clean: null diff --git a/suites/rados/singleton-nomsgr/all/alloc-hint.yaml b/suites/rados/singleton-nomsgr/all/alloc-hint.yaml deleted file mode 100644 index dca38c67bb5..00000000000 --- a/suites/rados/singleton-nomsgr/all/alloc-hint.yaml +++ /dev/null @@ -1,17 +0,0 @@ -roles: -- [mon.a, mds.a, osd.0, osd.1, osd.2, client.0] - -overrides: - ceph: - fs: xfs - conf: - osd: - filestore xfs extsize: true - -tasks: -- install: -- ceph: -- workunit: - clients: - all: - - rados/test_alloc_hint.sh diff --git a/suites/rados/singleton-nomsgr/all/filejournal.yaml b/suites/rados/singleton-nomsgr/all/filejournal.yaml deleted file mode 100644 index 28a0c041d9e..00000000000 --- a/suites/rados/singleton-nomsgr/all/filejournal.yaml +++ /dev/null @@ -1,8 +0,0 @@ -roles: -- [mon.0, osd.0, osd.1, mds.a, client.0] -tasks: -- install: -- ceph: -- exec: - client.0: - - ceph_test_filejournal diff --git a/suites/rados/singleton-nomsgr/all/filestore-idempotent-aio-journal.yaml b/suites/rados/singleton-nomsgr/all/filestore-idempotent-aio-journal.yaml deleted file mode 100644 index 15437cf65c7..00000000000 --- a/suites/rados/singleton-nomsgr/all/filestore-idempotent-aio-journal.yaml +++ /dev/null @@ -1,9 +0,0 @@ -roles: -- [mon.0, osd.0, osd.1, mds.a, client.0] -tasks: -- install: -- ceph: - conf: - global: - journal aio: true -- filestore_idempotent: diff --git a/suites/rados/singleton-nomsgr/all/filestore-idempotent.yaml b/suites/rados/singleton-nomsgr/all/filestore-idempotent.yaml deleted file mode 100644 index c6af200d57f..00000000000 --- a/suites/rados/singleton-nomsgr/all/filestore-idempotent.yaml +++ /dev/null @@ -1,6 +0,0 @@ -roles: -- [mon.0, osd.0, osd.1, mds.a, client.0] -tasks: -- install: -- ceph: -- filestore_idempotent: diff --git a/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml deleted file mode 100644 index 2089c9f56ae..00000000000 --- a/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml +++ /dev/null @@ -1,31 +0,0 @@ -roles: -- - mon.a - - osd.0 - - osd.1 - - osd.2 - - client.0 -- - mds.a - - osd.3 - - osd.4 - - osd.5 -tasks: -- install: -- ceph: - conf: - osd: - osd debug reject backfill probability: .3 - osd min pg log entries: 25 - osd max pg log entries: 100 -- exec: - client.0: - - ceph osd pool create foo 64 - - rados -p foo bench 60 write -b 1024 --no-cleanup - - ceph osd pool set foo size 3 - - ceph osd out 0 1 -- sleep: - duration: 60 -- exec: - client.0: - - ceph osd in 0 1 -- sleep: - duration: 60 diff --git a/suites/rados/singleton-nomsgr/all/objectcacher-stress.yaml b/suites/rados/singleton-nomsgr/all/objectcacher-stress.yaml deleted file mode 100644 index bc5a2838ef9..00000000000 --- a/suites/rados/singleton-nomsgr/all/objectcacher-stress.yaml +++ /dev/null @@ -1,9 +0,0 @@ -roles: -- [mon.0, osd.0, osd.1, mds.a, client.0] -tasks: -- install: -- ceph: -- workunit: - clients: - all: - - osdc/stress_objectcacher.sh diff --git a/suites/rados/singleton-nomsgr/all/objectstore.yaml b/suites/rados/singleton-nomsgr/all/objectstore.yaml deleted file mode 100644 index 2cab026638e..00000000000 --- a/suites/rados/singleton-nomsgr/all/objectstore.yaml +++ /dev/null @@ -1,9 +0,0 @@ -roles: -- [mon.0, osd.0, osd.1, mds.a, client.0] -tasks: -- install: -- ceph: -- exec: - client.0: - - mkdir $TESTDIR/ostest && cd $TESTDIR/ostest && ceph_test_objectstore - - rm -rf $TESTDIR/ostest diff --git a/suites/rados/singleton/% b/suites/rados/singleton/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rados/singleton/all/admin-socket.yaml b/suites/rados/singleton/all/admin-socket.yaml deleted file mode 100644 index 9e580f29db6..00000000000 --- a/suites/rados/singleton/all/admin-socket.yaml +++ /dev/null @@ -1,18 +0,0 @@ -roles: -- - mon.a - - osd.0 - - mds.a - - osd.1 - - client.a -tasks: -- install: -- ceph: -- admin_socket: - osd.0: - version: - git_version: - help: - config show: - config set filestore_dump_file /tmp/foo: - perf dump: - perf schema: diff --git a/suites/rados/singleton/all/cephtool.yaml b/suites/rados/singleton/all/cephtool.yaml deleted file mode 100644 index 2ed5434960c..00000000000 --- a/suites/rados/singleton/all/cephtool.yaml +++ /dev/null @@ -1,22 +0,0 @@ -roles: -- - mon.a - - mon.b - - mon.c - - mds.a - - osd.0 - - osd.1 - - osd.2 - - client.0 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - had wrong client addr - - had wrong cluster addr - - must scrub before tier agent can activate -- workunit: - clients: - all: - - cephtool - - mon/pool_ops.sh diff --git a/suites/rados/singleton/all/dump-stuck.yaml b/suites/rados/singleton/all/dump-stuck.yaml deleted file mode 100644 index 9bdcb0c3c73..00000000000 --- a/suites/rados/singleton/all/dump-stuck.yaml +++ /dev/null @@ -1,11 +0,0 @@ -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down -- dump_stuck: diff --git a/suites/rados/singleton/all/ec-lost-unfound.yaml b/suites/rados/singleton/all/ec-lost-unfound.yaml deleted file mode 100644 index 1dd47518a8a..00000000000 --- a/suites/rados/singleton/all/ec-lost-unfound.yaml +++ /dev/null @@ -1,15 +0,0 @@ -roles: -- - mon.a - - mon.b - - mon.c - - mds.a - - osd.0 - - osd.1 - - osd.2 - - osd.3 -tasks: -- install: -- ceph: - log-whitelist: - - objects unfound and apparently lost -- ec_lost_unfound: diff --git a/suites/rados/singleton/all/lost-unfound-delete.yaml b/suites/rados/singleton/all/lost-unfound-delete.yaml deleted file mode 100644 index 3df9e2ed601..00000000000 --- a/suites/rados/singleton/all/lost-unfound-delete.yaml +++ /dev/null @@ -1,14 +0,0 @@ -roles: -- - mon.a - - mon.b - - mon.c - - mds.a - - osd.0 - - osd.1 - - osd.2 -tasks: -- install: -- ceph: - log-whitelist: - - objects unfound and apparently lost -- rep_lost_unfound_delete: diff --git a/suites/rados/singleton/all/lost-unfound.yaml b/suites/rados/singleton/all/lost-unfound.yaml deleted file mode 100644 index 6014723ed08..00000000000 --- a/suites/rados/singleton/all/lost-unfound.yaml +++ /dev/null @@ -1,14 +0,0 @@ -roles: -- - mon.a - - mon.b - - mon.c - - mds.a - - osd.0 - - osd.1 - - osd.2 -tasks: -- install: -- ceph: - log-whitelist: - - objects unfound and apparently lost -- lost_unfound: diff --git a/suites/rados/singleton/all/mon-config-keys.yaml b/suites/rados/singleton/all/mon-config-keys.yaml deleted file mode 100644 index 524c6b6f570..00000000000 --- a/suites/rados/singleton/all/mon-config-keys.yaml +++ /dev/null @@ -1,16 +0,0 @@ -roles: -- - mon.0 - - mon.1 - - mon.2 - - mds.a - - osd.0 - - osd.1 - - osd.2 - - client.0 -tasks: -- install: -- ceph: -- workunit: - clients: - all: - - mon/test_mon_config_key.py diff --git a/suites/rados/singleton/all/mon-thrasher.yaml b/suites/rados/singleton/all/mon-thrasher.yaml deleted file mode 100644 index 4e4e8571b35..00000000000 --- a/suites/rados/singleton/all/mon-thrasher.yaml +++ /dev/null @@ -1,22 +0,0 @@ -roles: -- - mon.a - - mon.b - - mon.c - - osd.0 - - osd.1 - - mds.0 - - client.0 -tasks: -- install: -- ceph: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- workunit: - clients: - all: - - mon/workloadgen.sh - env: - LOADGEN_NUM_OSDS: "5" - VERBOSE: "1" - DURATION: "600" diff --git a/suites/rados/singleton/all/osd-backfill.yaml b/suites/rados/singleton/all/osd-backfill.yaml deleted file mode 100644 index 7c18a3b9bb0..00000000000 --- a/suites/rados/singleton/all/osd-backfill.yaml +++ /dev/null @@ -1,17 +0,0 @@ -roles: -- - mon.a - - mon.b - - mon.c - - mds.a - - osd.0 - - osd.1 - - osd.2 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - conf: - osd: - osd min pg log entries: 5 -- osd_backfill: diff --git a/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/suites/rados/singleton/all/osd-recovery-incomplete.yaml deleted file mode 100644 index e6f99983e02..00000000000 --- a/suites/rados/singleton/all/osd-recovery-incomplete.yaml +++ /dev/null @@ -1,18 +0,0 @@ -roles: -- - mon.a - - mon.b - - mon.c - - mds.a - - osd.0 - - osd.1 - - osd.2 - - osd.3 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - conf: - osd: - osd min pg log entries: 5 -- osd_recovery.test_incomplete_pgs: diff --git a/suites/rados/singleton/all/osd-recovery.yaml b/suites/rados/singleton/all/osd-recovery.yaml deleted file mode 100644 index 8307d424533..00000000000 --- a/suites/rados/singleton/all/osd-recovery.yaml +++ /dev/null @@ -1,17 +0,0 @@ -roles: -- - mon.a - - mon.b - - mon.c - - mds.a - - osd.0 - - osd.1 - - osd.2 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - conf: - osd: - osd min pg log entries: 5 -- osd_recovery: diff --git a/suites/rados/singleton/all/peer.yaml b/suites/rados/singleton/all/peer.yaml deleted file mode 100644 index a441059bbb8..00000000000 --- a/suites/rados/singleton/all/peer.yaml +++ /dev/null @@ -1,17 +0,0 @@ -roles: -- - mon.0 - - mon.1 - - mon.2 - - mds.a - - osd.0 - - osd.1 - - osd.2 -tasks: -- install: -- ceph: - config: - global: - osd pool default min size : 1 - log-whitelist: - - objects unfound and apparently lost -- peer: diff --git a/suites/rados/singleton/all/radostool.yaml b/suites/rados/singleton/all/radostool.yaml deleted file mode 100644 index 05ab4a3f7c2..00000000000 --- a/suites/rados/singleton/all/radostool.yaml +++ /dev/null @@ -1,16 +0,0 @@ -roles: -- - mon.a - - osd.0 - - osd.1 - - client.0 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - had wrong client addr - - had wrong cluster addr -- workunit: - clients: - all: - - rados/test_rados_tool.sh diff --git a/suites/rados/singleton/all/rest-api.yaml b/suites/rados/singleton/all/rest-api.yaml deleted file mode 100644 index 425db55660d..00000000000 --- a/suites/rados/singleton/all/rest-api.yaml +++ /dev/null @@ -1,20 +0,0 @@ -roles: -- - mon.0 - - mon.1 - - mon.2 - - mds.a - - osd.0 - - osd.1 - - osd.2 - - client.0 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - had wrong client addr -- rest-api: [client.0] -- workunit: - clients: - all: - - rest/test.py diff --git a/suites/rados/singleton/all/thrash-rados.yaml b/suites/rados/singleton/all/thrash-rados.yaml deleted file mode 100644 index 4bdcf226546..00000000000 --- a/suites/rados/singleton/all/thrash-rados.yaml +++ /dev/null @@ -1,23 +0,0 @@ -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - osd.3 - - osd.4 - - osd.5 - - client.0 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down -- thrashosds: - op_delay: 30 - clean_interval: 120 - chance_down: .5 -- workunit: - clients: - all: - - rados/load-gen-mix-small.sh diff --git a/suites/rados/singleton/all/thrash_cache_writeback_forward_none.yaml b/suites/rados/singleton/all/thrash_cache_writeback_forward_none.yaml deleted file mode 100644 index 0c971e034bc..00000000000 --- a/suites/rados/singleton/all/thrash_cache_writeback_forward_none.yaml +++ /dev/null @@ -1,61 +0,0 @@ -roles: -- - mon.a - - mds.0 - - osd.0 - - osd.1 - - osd.2 -- - osd.3 - - osd.4 - - osd.5 - - client.0 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - slow request -- exec: - client.0: - - ceph osd pool create base 4 - - ceph osd pool create cache 4 - - ceph osd tier add base cache - - ceph osd tier cache-mode cache writeback - - ceph osd tier set-overlay base cache - - ceph osd pool set cache hit_set_type bloom - - ceph osd pool set cache hit_set_count 8 - - ceph osd pool set cache hit_set_period 60 - - ceph osd pool set cache target_max_objects 500 -- background_exec: - mon.a: - - while true - - do sleep 30 - - echo forward - - ceph osd tier cache-mode cache forward - - sleep 10 - - ceph osd pool set cache cache_target_full_ratio .001 - - echo cache-try-flush-evict-all - - rados -p cache cache-try-flush-evict-all - - sleep 5 - - echo cache-flush-evict-all - - rados -p cache cache-flush-evict-all - - sleep 5 - - echo remove overlay - - ceph osd tier remove-overlay base - - sleep 20 - - echo add writeback overlay - - ceph osd tier cache-mode cache writeback - - ceph osd pool set cache cache_target_full_ratio .8 - - ceph osd tier set-overlay base cache - - done -- rados: - clients: [client.0] - pools: [base] - max_seconds: 600 - ops: 400000 - objects: 10000 - size: 1024 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 diff --git a/suites/rados/singleton/fs/btrfs.yaml b/suites/rados/singleton/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/rados/singleton/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/rados/singleton/msgr-failures/few.yaml b/suites/rados/singleton/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/rados/singleton/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/rados/singleton/msgr-failures/many.yaml b/suites/rados/singleton/msgr-failures/many.yaml deleted file mode 100644 index 86f8dde8a0e..00000000000 --- a/suites/rados/singleton/msgr-failures/many.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 500 diff --git a/suites/rados/thrash/% b/suites/rados/thrash/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rados/thrash/clusters/+ b/suites/rados/thrash/clusters/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rados/thrash/clusters/fixed-2.yaml b/suites/rados/thrash/clusters/fixed-2.yaml deleted file mode 120000 index cd0791a1486..00000000000 --- a/suites/rados/thrash/clusters/fixed-2.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/suites/rados/thrash/clusters/openstack.yaml b/suites/rados/thrash/clusters/openstack.yaml deleted file mode 100644 index 00d927ac232..00000000000 --- a/suites/rados/thrash/clusters/openstack.yaml +++ /dev/null @@ -1,8 +0,0 @@ -openstack: - machine: - disk: 40 # GB - ram: 8000 # MB - cpus: 1 - volumes: # attached to each instance - count: 3 - size: 30 # GB diff --git a/suites/rados/thrash/fs/btrfs.yaml b/suites/rados/thrash/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/rados/thrash/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/rados/thrash/fs/ext4.yaml b/suites/rados/thrash/fs/ext4.yaml deleted file mode 120000 index 65d71886933..00000000000 --- a/suites/rados/thrash/fs/ext4.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/ext4.yaml \ No newline at end of file diff --git a/suites/rados/thrash/fs/xfs.yaml b/suites/rados/thrash/fs/xfs.yaml deleted file mode 120000 index 4c28d731f6b..00000000000 --- a/suites/rados/thrash/fs/xfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/xfs.yaml \ No newline at end of file diff --git a/suites/rados/thrash/msgr-failures/fastclose.yaml b/suites/rados/thrash/msgr-failures/fastclose.yaml deleted file mode 100644 index 77fd730aff7..00000000000 --- a/suites/rados/thrash/msgr-failures/fastclose.yaml +++ /dev/null @@ -1,6 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 2500 - ms tcp read timeout: 5 diff --git a/suites/rados/thrash/msgr-failures/few.yaml b/suites/rados/thrash/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/rados/thrash/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/rados/thrash/msgr-failures/osd-delay.yaml b/suites/rados/thrash/msgr-failures/osd-delay.yaml deleted file mode 100644 index a33ba89e14f..00000000000 --- a/suites/rados/thrash/msgr-failures/osd-delay.yaml +++ /dev/null @@ -1,9 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 2500 - ms inject delay type: osd - ms inject delay probability: .005 - ms inject delay max: 1 - ms inject internal delays: .002 diff --git a/suites/rados/thrash/thrashers/default.yaml b/suites/rados/thrash/thrashers/default.yaml deleted file mode 100644 index a5958b6d6ed..00000000000 --- a/suites/rados/thrash/thrashers/default.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - conf: - osd: - osd debug reject backfill probability: .3 -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 diff --git a/suites/rados/thrash/thrashers/mapgap.yaml b/suites/rados/thrash/thrashers/mapgap.yaml deleted file mode 100644 index fd7fd17957d..00000000000 --- a/suites/rados/thrash/thrashers/mapgap.yaml +++ /dev/null @@ -1,19 +0,0 @@ -overrides: - ceph: - conf: - mon: - mon min osdmap epochs: 2 - osd: - osd map cache size: 1 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - osd_map_cache_size -- thrashosds: - timeout: 1800 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 - chance_test_map_discontinuity: 0.5 diff --git a/suites/rados/thrash/thrashers/morepggrow.yaml b/suites/rados/thrash/thrashers/morepggrow.yaml deleted file mode 100644 index 93379a82c33..00000000000 --- a/suites/rados/thrash/thrashers/morepggrow.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 3 - chance_pgpnum_fix: 1 diff --git a/suites/rados/thrash/thrashers/pggrow.yaml b/suites/rados/thrash/thrashers/pggrow.yaml deleted file mode 100644 index 6131b00012d..00000000000 --- a/suites/rados/thrash/thrashers/pggrow.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 2 - chance_pgpnum_fix: 1 diff --git a/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml b/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml deleted file mode 100644 index b1ddad8d3b0..00000000000 --- a/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml +++ /dev/null @@ -1,13 +0,0 @@ -overrides: - ceph: - conf: - client.0: - admin socket: /var/run/ceph/ceph-$name.asok -tasks: -- radosbench: - clients: [client.0] - time: 60 -- admin_socket: - client.0: - objecter_requests: - test: "http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}" diff --git a/suites/rados/thrash/workloads/cache-agent-big.yaml b/suites/rados/thrash/workloads/cache-agent-big.yaml deleted file mode 100644 index d3c404b5414..00000000000 --- a/suites/rados/thrash/workloads/cache-agent-big.yaml +++ /dev/null @@ -1,29 +0,0 @@ -overrides: - ceph: - log-whitelist: - - must scrub before tier agent can activate -tasks: -- exec: - client.0: - - ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd - m=1 k=2 - - ceph osd pool create base 4 erasure teuthologyprofile - - ceph osd pool create cache 4 - - ceph osd tier add base cache - - ceph osd tier cache-mode cache writeback - - ceph osd tier set-overlay base cache - - ceph osd pool set cache hit_set_type bloom - - ceph osd pool set cache hit_set_count 8 - - ceph osd pool set cache hit_set_period 60 - - ceph osd pool set cache target_max_objects 5000 -- rados: - clients: [client.0] - pools: [base] - ops: 4000 - objects: 10000 - size: 1024 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 diff --git a/suites/rados/thrash/workloads/cache-agent-small.yaml b/suites/rados/thrash/workloads/cache-agent-small.yaml deleted file mode 100644 index 50bb3ac3c33..00000000000 --- a/suites/rados/thrash/workloads/cache-agent-small.yaml +++ /dev/null @@ -1,26 +0,0 @@ -overrides: - ceph: - log-whitelist: - - must scrub before tier agent can activate -tasks: -- exec: - client.0: - - ceph osd pool create base 4 - - ceph osd pool create cache 4 - - ceph osd tier add base cache - - ceph osd tier cache-mode cache writeback - - ceph osd tier set-overlay base cache - - ceph osd pool set cache hit_set_type bloom - - ceph osd pool set cache hit_set_count 8 - - ceph osd pool set cache hit_set_period 60 - - ceph osd pool set cache target_max_objects 250 -- rados: - clients: [client.0] - pools: [base] - ops: 4000 - objects: 500 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 diff --git a/suites/rados/thrash/workloads/cache-snaps.yaml b/suites/rados/thrash/workloads/cache-snaps.yaml deleted file mode 100644 index 199d6b2b1be..00000000000 --- a/suites/rados/thrash/workloads/cache-snaps.yaml +++ /dev/null @@ -1,31 +0,0 @@ -overrides: - ceph: - log-whitelist: - - must scrub before tier agent can activate -tasks: -- exec: - client.0: - - ceph osd pool create base 4 - - ceph osd pool create cache 4 - - ceph osd tier add base cache - - ceph osd tier cache-mode cache writeback - - ceph osd tier set-overlay base cache - - ceph osd pool set cache hit_set_type bloom - - ceph osd pool set cache hit_set_count 8 - - ceph osd pool set cache hit_set_period 3600 -- rados: - clients: [client.0] - pools: [base] - ops: 4000 - objects: 500 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 - flush: 50 - try_flush: 50 - evict: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/rados/thrash/workloads/cache.yaml b/suites/rados/thrash/workloads/cache.yaml deleted file mode 100644 index 0f15cad6763..00000000000 --- a/suites/rados/thrash/workloads/cache.yaml +++ /dev/null @@ -1,28 +0,0 @@ -overrides: - ceph: - log-whitelist: - - must scrub before tier agent can activate -tasks: -- exec: - client.0: - - ceph osd pool create base 4 - - ceph osd pool create cache 4 - - ceph osd tier add base cache - - ceph osd tier cache-mode cache writeback - - ceph osd tier set-overlay base cache - - ceph osd pool set cache hit_set_type bloom - - ceph osd pool set cache hit_set_count 8 - - ceph osd pool set cache hit_set_period 3600 -- rados: - clients: [client.0] - pools: [base] - ops: 4000 - objects: 500 - op_weights: - read: 100 - write: 100 - delete: 50 - copy_from: 50 - flush: 50 - try_flush: 50 - evict: 50 diff --git a/suites/rados/thrash/workloads/ec-radosbench.yaml b/suites/rados/thrash/workloads/ec-radosbench.yaml deleted file mode 100644 index 70875fb2325..00000000000 --- a/suites/rados/thrash/workloads/ec-radosbench.yaml +++ /dev/null @@ -1,10 +0,0 @@ -overrides: - ceph: - log-whitelist: - - shard.*missing -tasks: -- radosbench: - clients: [client.0] - time: 1800 - unique_pool: true - ec_pool: true diff --git a/suites/rados/thrash/workloads/ec-readwrite.yaml b/suites/rados/thrash/workloads/ec-readwrite.yaml deleted file mode 100644 index 80b9140e707..00000000000 --- a/suites/rados/thrash/workloads/ec-readwrite.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 500 - ec_pool: true - op_weights: - read: 45 - write: 0 - append: 45 - delete: 10 diff --git a/suites/rados/thrash/workloads/ec-small-objects.yaml b/suites/rados/thrash/workloads/ec-small-objects.yaml deleted file mode 100644 index a8ac39716e5..00000000000 --- a/suites/rados/thrash/workloads/ec-small-objects.yaml +++ /dev/null @@ -1,20 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 400000 - max_seconds: 600 - max_in_flight: 64 - objects: 1024 - size: 16384 - ec_pool: true - op_weights: - read: 100 - write: 0 - append: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - copy_from: 50 - setattr: 25 - rmattr: 25 diff --git a/suites/rados/thrash/workloads/ec-snaps-few-objects.yaml b/suites/rados/thrash/workloads/ec-snaps-few-objects.yaml deleted file mode 100644 index c64d4ffb35b..00000000000 --- a/suites/rados/thrash/workloads/ec-snaps-few-objects.yaml +++ /dev/null @@ -1,15 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 50 - ec_pool: true - op_weights: - read: 100 - write: 0 - append: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - copy_from: 50 diff --git a/suites/rados/thrash/workloads/rados_api_tests.yaml b/suites/rados/thrash/workloads/rados_api_tests.yaml deleted file mode 100644 index cd11ae6ca0c..00000000000 --- a/suites/rados/thrash/workloads/rados_api_tests.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rados/test.sh diff --git a/suites/rados/thrash/workloads/radosbench.yaml b/suites/rados/thrash/workloads/radosbench.yaml deleted file mode 100644 index 3940870fce0..00000000000 --- a/suites/rados/thrash/workloads/radosbench.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- radosbench: - clients: [client.0] - time: 1800 diff --git a/suites/rados/thrash/workloads/readwrite.yaml b/suites/rados/thrash/workloads/readwrite.yaml deleted file mode 100644 index c53e52b0872..00000000000 --- a/suites/rados/thrash/workloads/readwrite.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 500 - op_weights: - read: 45 - write: 45 - delete: 10 diff --git a/suites/rados/thrash/workloads/small-objects.yaml b/suites/rados/thrash/workloads/small-objects.yaml deleted file mode 100644 index bb5a934de34..00000000000 --- a/suites/rados/thrash/workloads/small-objects.yaml +++ /dev/null @@ -1,18 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 400000 - max_seconds: 600 - max_in_flight: 64 - objects: 1024 - size: 16384 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - copy_from: 50 - setattr: 25 - rmattr: 25 diff --git a/suites/rados/thrash/workloads/snaps-few-objects.yaml b/suites/rados/thrash/workloads/snaps-few-objects.yaml deleted file mode 100644 index aa82d973ae1..00000000000 --- a/suites/rados/thrash/workloads/snaps-few-objects.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - copy_from: 50 diff --git a/suites/rados/verify/% b/suites/rados/verify/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rados/verify/1thrash/default.yaml b/suites/rados/verify/1thrash/default.yaml deleted file mode 100644 index 9435b146af6..00000000000 --- a/suites/rados/verify/1thrash/default.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 diff --git a/suites/rados/verify/1thrash/none.yaml b/suites/rados/verify/1thrash/none.yaml deleted file mode 100644 index 2030acb9083..00000000000 --- a/suites/rados/verify/1thrash/none.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install: -- ceph: diff --git a/suites/rados/verify/clusters/fixed-2.yaml b/suites/rados/verify/clusters/fixed-2.yaml deleted file mode 120000 index cd0791a1486..00000000000 --- a/suites/rados/verify/clusters/fixed-2.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/suites/rados/verify/fs/btrfs.yaml b/suites/rados/verify/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/rados/verify/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/rados/verify/msgr-failures/few.yaml b/suites/rados/verify/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/rados/verify/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/rados/verify/tasks/mon_recovery.yaml b/suites/rados/verify/tasks/mon_recovery.yaml deleted file mode 100644 index 6986303409e..00000000000 --- a/suites/rados/verify/tasks/mon_recovery.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- mon_recovery: diff --git a/suites/rados/verify/tasks/rados_api_tests.yaml b/suites/rados/verify/tasks/rados_api_tests.yaml deleted file mode 100644 index 0031704784e..00000000000 --- a/suites/rados/verify/tasks/rados_api_tests.yaml +++ /dev/null @@ -1,14 +0,0 @@ -overrides: - ceph: - conf: - client: - debug ms: 1 - debug objecter: 20 - debug rados: 20 - debug monc: 20 -tasks: -- workunit: - timeout: 6h - clients: - client.0: - - rados/test.sh diff --git a/suites/rados/verify/tasks/rados_cls_all.yaml b/suites/rados/verify/tasks/rados_cls_all.yaml deleted file mode 100644 index 853da39ad99..00000000000 --- a/suites/rados/verify/tasks/rados_cls_all.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - cls diff --git a/suites/rados/verify/validater/lockdep.yaml b/suites/rados/verify/validater/lockdep.yaml deleted file mode 100644 index 25f84355c0b..00000000000 --- a/suites/rados/verify/validater/lockdep.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - lockdep: true diff --git a/suites/rados/verify/validater/valgrind.yaml b/suites/rados/verify/validater/valgrind.yaml deleted file mode 100644 index 7b8f7a28629..00000000000 --- a/suites/rados/verify/validater/valgrind.yaml +++ /dev/null @@ -1,9 +0,0 @@ -overrides: - install: - ceph: - flavor: notcmalloc - ceph: - valgrind: - mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - osd: [--tool=memcheck] - mds: [--tool=memcheck] diff --git a/suites/rbd/basic/% b/suites/rbd/basic/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rbd/basic/base/install.yaml b/suites/rbd/basic/base/install.yaml deleted file mode 100644 index 2030acb9083..00000000000 --- a/suites/rbd/basic/base/install.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install: -- ceph: diff --git a/suites/rbd/basic/cachepool/none.yaml b/suites/rbd/basic/cachepool/none.yaml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rbd/basic/cachepool/small.yaml b/suites/rbd/basic/cachepool/small.yaml deleted file mode 100644 index f8ed11040fa..00000000000 --- a/suites/rbd/basic/cachepool/small.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- exec: - client.0: - - ceph osd pool create cache 4 - - ceph osd tier add rbd cache - - ceph osd tier cache-mode cache writeback - - ceph osd tier set-overlay rbd cache - - ceph osd pool set cache hit_set_type bloom - - ceph osd pool set cache hit_set_count 8 - - ceph osd pool set cache hit_set_period 60 - - ceph osd pool set cache target_max_objects 250 diff --git a/suites/rbd/basic/clusters/fixed-1.yaml b/suites/rbd/basic/clusters/fixed-1.yaml deleted file mode 120000 index 435ea3c7546..00000000000 --- a/suites/rbd/basic/clusters/fixed-1.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-1.yaml \ No newline at end of file diff --git a/suites/rbd/basic/fs/btrfs.yaml b/suites/rbd/basic/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/rbd/basic/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/rbd/basic/msgr-failures/few.yaml b/suites/rbd/basic/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/rbd/basic/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/rbd/basic/msgr-failures/many.yaml b/suites/rbd/basic/msgr-failures/many.yaml deleted file mode 100644 index 86f8dde8a0e..00000000000 --- a/suites/rbd/basic/msgr-failures/many.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 500 diff --git a/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml b/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml deleted file mode 100644 index a98768540ba..00000000000 --- a/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/test_librbd.sh diff --git a/suites/rbd/basic/tasks/rbd_cli_copy.yaml b/suites/rbd/basic/tasks/rbd_cli_copy.yaml deleted file mode 100644 index ae95e51e066..00000000000 --- a/suites/rbd/basic/tasks/rbd_cli_copy.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/copy.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/rbd/basic/tasks/rbd_cli_copy_old_format.yaml b/suites/rbd/basic/tasks/rbd_cli_copy_old_format.yaml deleted file mode 100644 index 2f99f8990de..00000000000 --- a/suites/rbd/basic/tasks/rbd_cli_copy_old_format.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/copy.sh diff --git a/suites/rbd/basic/tasks/rbd_cli_import_export.yaml b/suites/rbd/basic/tasks/rbd_cli_import_export.yaml deleted file mode 100644 index 49070827be0..00000000000 --- a/suites/rbd/basic/tasks/rbd_cli_import_export.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/rbd/basic/tasks/rbd_cli_import_export_old_format.yaml b/suites/rbd/basic/tasks/rbd_cli_import_export_old_format.yaml deleted file mode 100644 index b08f2612f7a..00000000000 --- a/suites/rbd/basic/tasks/rbd_cli_import_export_old_format.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/import_export.sh diff --git a/suites/rbd/basic/tasks/rbd_cli_tests.yaml b/suites/rbd/basic/tasks/rbd_cli_tests.yaml deleted file mode 100644 index a37db057b5d..00000000000 --- a/suites/rbd/basic/tasks/rbd_cli_tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/run_cli_tests.sh - diff --git a/suites/rbd/basic/tasks/rbd_cls_tests.yaml b/suites/rbd/basic/tasks/rbd_cls_tests.yaml deleted file mode 100644 index 9ccd57c4a82..00000000000 --- a/suites/rbd/basic/tasks/rbd_cls_tests.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - cls/test_cls_rbd.sh diff --git a/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml b/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml deleted file mode 100644 index d2c80ad6585..00000000000 --- a/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/test_lock_fence.sh diff --git a/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml b/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml deleted file mode 100644 index 263b784e27d..00000000000 --- a/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/test_librbd_python.sh diff --git a/suites/rbd/librbd/% b/suites/rbd/librbd/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rbd/librbd/cache/none.yaml b/suites/rbd/librbd/cache/none.yaml deleted file mode 100644 index 42fd9c95562..00000000000 --- a/suites/rbd/librbd/cache/none.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: -- ceph: - conf: - client: - rbd cache: false diff --git a/suites/rbd/librbd/cache/writeback.yaml b/suites/rbd/librbd/cache/writeback.yaml deleted file mode 100644 index 86fe06afa05..00000000000 --- a/suites/rbd/librbd/cache/writeback.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: -- ceph: - conf: - client: - rbd cache: true diff --git a/suites/rbd/librbd/cache/writethrough.yaml b/suites/rbd/librbd/cache/writethrough.yaml deleted file mode 100644 index 6dc29e16c02..00000000000 --- a/suites/rbd/librbd/cache/writethrough.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: - conf: - client: - rbd cache: true - rbd cache max dirty: 0 diff --git a/suites/rbd/librbd/cachepool/none.yaml b/suites/rbd/librbd/cachepool/none.yaml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rbd/librbd/cachepool/small.yaml b/suites/rbd/librbd/cachepool/small.yaml deleted file mode 100644 index f8ed11040fa..00000000000 --- a/suites/rbd/librbd/cachepool/small.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- exec: - client.0: - - ceph osd pool create cache 4 - - ceph osd tier add rbd cache - - ceph osd tier cache-mode cache writeback - - ceph osd tier set-overlay rbd cache - - ceph osd pool set cache hit_set_type bloom - - ceph osd pool set cache hit_set_count 8 - - ceph osd pool set cache hit_set_period 60 - - ceph osd pool set cache target_max_objects 250 diff --git a/suites/rbd/librbd/clusters/fixed-3.yaml b/suites/rbd/librbd/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/rbd/librbd/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/rbd/librbd/fs b/suites/rbd/librbd/fs deleted file mode 120000 index 3658920363d..00000000000 --- a/suites/rbd/librbd/fs +++ /dev/null @@ -1 +0,0 @@ -../basic/fs \ No newline at end of file diff --git a/suites/rbd/librbd/msgr-failures/few.yaml b/suites/rbd/librbd/msgr-failures/few.yaml deleted file mode 100644 index a8bc68355ea..00000000000 --- a/suites/rbd/librbd/msgr-failures/few.yaml +++ /dev/null @@ -1,7 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 - log-whitelist: - - wrongly marked me down diff --git a/suites/rbd/librbd/workloads/c_api_tests.yaml b/suites/rbd/librbd/workloads/c_api_tests.yaml deleted file mode 100644 index 188ddc56c60..00000000000 --- a/suites/rbd/librbd/workloads/c_api_tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/test_librbd.sh - env: - RBD_FEATURES: "1" diff --git a/suites/rbd/librbd/workloads/fsx.yaml b/suites/rbd/librbd/workloads/fsx.yaml deleted file mode 100644 index ef512d8a9b4..00000000000 --- a/suites/rbd/librbd/workloads/fsx.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- rbd_fsx: - clients: [client.0] - ops: 5000 diff --git a/suites/rbd/librbd/workloads/python_api_tests.yaml b/suites/rbd/librbd/workloads/python_api_tests.yaml deleted file mode 100644 index a7b3ce7d3e6..00000000000 --- a/suites/rbd/librbd/workloads/python_api_tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/test_librbd_python.sh - env: - RBD_FEATURES: "1" diff --git a/suites/rbd/librbd/workloads/qemu_bonnie.yaml b/suites/rbd/librbd/workloads/qemu_bonnie.yaml deleted file mode 100644 index 7c964265a0b..00000000000 --- a/suites/rbd/librbd/workloads/qemu_bonnie.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- qemu: - all: - test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/bonnie.sh -exclude_arch: armv7l diff --git a/suites/rbd/librbd/workloads/qemu_fsstress.yaml b/suites/rbd/librbd/workloads/qemu_fsstress.yaml deleted file mode 100644 index bae9e009935..00000000000 --- a/suites/rbd/librbd/workloads/qemu_fsstress.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- qemu: - all: - test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/fsstress.sh;h=firefly -exclude_arch: armv7l diff --git a/suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled b/suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled deleted file mode 100644 index 3dae6e78a2f..00000000000 --- a/suites/rbd/librbd/workloads/qemu_iozone.yaml.disabled +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- qemu: - all: - test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/iozone.sh - image_size: 20480 -exclude_arch: armv7l diff --git a/suites/rbd/librbd/workloads/qemu_xfstests.yaml b/suites/rbd/librbd/workloads/qemu_xfstests.yaml deleted file mode 100644 index c4b2327cc8e..00000000000 --- a/suites/rbd/librbd/workloads/qemu_xfstests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- qemu: - all: - type: block - num_rbd: 2 - test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/run_xfstests_qemu.sh -exclude_arch: armv7l diff --git a/suites/rbd/singleton/% b/suites/rbd/singleton/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rbd/singleton/all/formatted-output.yaml b/suites/rbd/singleton/all/formatted-output.yaml deleted file mode 100644 index de930bc02bd..00000000000 --- a/suites/rbd/singleton/all/formatted-output.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- [mon.a, osd.0, osd.1, client.0] -tasks: -- install: -- ceph: -- cram: - clients: - client.0: - - http://git.ceph.com/?p=ceph.git;a=blob_plain;hb=firefly;f=src/test/cli-integration/rbd/formatted-output.t - diff --git a/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml b/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml deleted file mode 100644 index 2771d4e8db1..00000000000 --- a/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml +++ /dev/null @@ -1,12 +0,0 @@ -exclude_arch: armv7l -roles: -- [mon.a, osd.0, osd.1, client.0] -tasks: -- install: -- ceph: - conf: - client: - rbd cache: false -- workunit: - clients: - all: [rbd/qemu-iotests.sh] diff --git a/suites/rbd/singleton/all/qemu-iotests-writeback.yaml b/suites/rbd/singleton/all/qemu-iotests-writeback.yaml deleted file mode 100644 index f6768df5a22..00000000000 --- a/suites/rbd/singleton/all/qemu-iotests-writeback.yaml +++ /dev/null @@ -1,12 +0,0 @@ -exclude_arch: armv7l -roles: -- [mon.a, osd.0, osd.1, client.0] -tasks: -- install: -- ceph: - conf: - client: - rbd cache: true -- workunit: - clients: - all: [rbd/qemu-iotests.sh] diff --git a/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml b/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml deleted file mode 100644 index 287509e4953..00000000000 --- a/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml +++ /dev/null @@ -1,13 +0,0 @@ -exclude_arch: armv7l -roles: -- [mon.a, osd.0, osd.1, client.0] -tasks: -- install: -- ceph: - conf: - client: - rbd cache: true - rbd cache max dirty: 0 -- workunit: - clients: - all: [rbd/qemu-iotests.sh] diff --git a/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml b/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml deleted file mode 100644 index c5230d0554e..00000000000 --- a/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- [mon.a, osd.0, osd.1, client.0] -tasks: -- install: -- ceph: -- workunit: - clients: - all: - - mon/rbd_snaps_ops.sh - diff --git a/suites/rbd/singleton/all/read-flags-no-cache.yaml b/suites/rbd/singleton/all/read-flags-no-cache.yaml deleted file mode 100644 index f7d44456d3b..00000000000 --- a/suites/rbd/singleton/all/read-flags-no-cache.yaml +++ /dev/null @@ -1,11 +0,0 @@ -roles: -- [mon.a, osd.0, osd.1, client.0] -tasks: -- install: -- ceph: - conf: - client: - rbd cache: false -- workunit: - clients: - all: [rbd/read-flags.sh] diff --git a/suites/rbd/singleton/all/read-flags-writeback.yaml b/suites/rbd/singleton/all/read-flags-writeback.yaml deleted file mode 100644 index f25be79e0b6..00000000000 --- a/suites/rbd/singleton/all/read-flags-writeback.yaml +++ /dev/null @@ -1,11 +0,0 @@ -roles: -- [mon.a, osd.0, osd.1, client.0] -tasks: -- install: -- ceph: - conf: - client: - rbd cache: true -- workunit: - clients: - all: [rbd/read-flags.sh] diff --git a/suites/rbd/singleton/all/read-flags-writethrough.yaml b/suites/rbd/singleton/all/read-flags-writethrough.yaml deleted file mode 100644 index 80d7b4254b6..00000000000 --- a/suites/rbd/singleton/all/read-flags-writethrough.yaml +++ /dev/null @@ -1,12 +0,0 @@ -roles: -- [mon.a, osd.0, osd.1, client.0] -tasks: -- install: -- ceph: - conf: - client: - rbd cache: true - rbd cache max dirty: 0 -- workunit: - clients: - all: [rbd/read-flags.sh] diff --git a/suites/rbd/thrash/% b/suites/rbd/thrash/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rbd/thrash/base/install.yaml b/suites/rbd/thrash/base/install.yaml deleted file mode 100644 index 2030acb9083..00000000000 --- a/suites/rbd/thrash/base/install.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install: -- ceph: diff --git a/suites/rbd/thrash/clusters/fixed-2.yaml b/suites/rbd/thrash/clusters/fixed-2.yaml deleted file mode 120000 index cd0791a1486..00000000000 --- a/suites/rbd/thrash/clusters/fixed-2.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/suites/rbd/thrash/fs/btrfs.yaml b/suites/rbd/thrash/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/rbd/thrash/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/rbd/thrash/fs/xfs.yaml b/suites/rbd/thrash/fs/xfs.yaml deleted file mode 120000 index 4c28d731f6b..00000000000 --- a/suites/rbd/thrash/fs/xfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/xfs.yaml \ No newline at end of file diff --git a/suites/rbd/thrash/msgr-failures/few.yaml b/suites/rbd/thrash/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/rbd/thrash/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/rbd/thrash/thrashers/cache.yaml b/suites/rbd/thrash/thrashers/cache.yaml deleted file mode 100644 index 5bab78ee840..00000000000 --- a/suites/rbd/thrash/thrashers/cache.yaml +++ /dev/null @@ -1,18 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -tasks: -- exec: - client.0: - - ceph osd pool create cache 4 - - ceph osd tier add rbd cache - - ceph osd tier cache-mode cache writeback - - ceph osd tier set-overlay rbd cache - - ceph osd pool set cache hit_set_type bloom - - ceph osd pool set cache hit_set_count 8 - - ceph osd pool set cache hit_set_period 60 - - ceph osd pool set cache target_max_objects 250 -- thrashosds: - timeout: 1200 diff --git a/suites/rbd/thrash/thrashers/default.yaml b/suites/rbd/thrash/thrashers/default.yaml deleted file mode 100644 index 89c9bdfb0e5..00000000000 --- a/suites/rbd/thrash/thrashers/default.yaml +++ /dev/null @@ -1,8 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -tasks: -- thrashosds: - timeout: 1200 diff --git a/suites/rbd/thrash/workloads/rbd_api_tests.yaml b/suites/rbd/thrash/workloads/rbd_api_tests.yaml deleted file mode 100644 index 188ddc56c60..00000000000 --- a/suites/rbd/thrash/workloads/rbd_api_tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/test_librbd.sh - env: - RBD_FEATURES: "1" diff --git a/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml b/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml deleted file mode 100644 index bd812695c83..00000000000 --- a/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- rbd_fsx: - clients: [client.0] - ops: 2000 -overrides: - ceph: - conf: - client: - rbd cache: true diff --git a/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml b/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml deleted file mode 100644 index 56895298025..00000000000 --- a/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- rbd_fsx: - clients: [client.0] - ops: 2000 -overrides: - ceph: - conf: - client: - rbd cache: true - rbd cache max dirty: 0 diff --git a/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml b/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml deleted file mode 100644 index 6c5e0e45707..00000000000 --- a/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- rbd_fsx: - clients: [client.0] - ops: 2000 -overrides: - ceph: - conf: - client: - rbd cache: false diff --git a/suites/rest/basic/tasks/rest_test.yaml b/suites/rest/basic/tasks/rest_test.yaml deleted file mode 100644 index 8ed1918ce7a..00000000000 --- a/suites/rest/basic/tasks/rest_test.yaml +++ /dev/null @@ -1,24 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 - - client.0 - -tasks: -- install: -- ceph: -- rest-api: [client.0] -- workunit: - clients: - client.0: - - rest/test.py diff --git a/suites/rgw/multifs/% b/suites/rgw/multifs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rgw/multifs/clusters/fixed-2.yaml b/suites/rgw/multifs/clusters/fixed-2.yaml deleted file mode 120000 index cd0791a1486..00000000000 --- a/suites/rgw/multifs/clusters/fixed-2.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/suites/rgw/multifs/fs/btrfs.yaml b/suites/rgw/multifs/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/rgw/multifs/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/rgw/multifs/fs/ext4.yaml b/suites/rgw/multifs/fs/ext4.yaml deleted file mode 120000 index 65d71886933..00000000000 --- a/suites/rgw/multifs/fs/ext4.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/ext4.yaml \ No newline at end of file diff --git a/suites/rgw/multifs/fs/xfs.yaml b/suites/rgw/multifs/fs/xfs.yaml deleted file mode 120000 index 4c28d731f6b..00000000000 --- a/suites/rgw/multifs/fs/xfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/xfs.yaml \ No newline at end of file diff --git a/suites/rgw/multifs/rgw_pool_type b/suites/rgw/multifs/rgw_pool_type deleted file mode 120000 index 0506f616ce2..00000000000 --- a/suites/rgw/multifs/rgw_pool_type +++ /dev/null @@ -1 +0,0 @@ -../../../rgw_pool_type \ No newline at end of file diff --git a/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml b/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml deleted file mode 100644 index 767debdf3c8..00000000000 --- a/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- rgw: [client.0] -- workunit: - clients: - client.0: - - rgw/s3_bucket_quota.pl diff --git a/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml b/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml deleted file mode 100644 index 1781dee096b..00000000000 --- a/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- rgw: [client.0] -- workunit: - clients: - client.0: - - rgw/s3_multipart_upload.pl diff --git a/suites/rgw/multifs/tasks/rgw_readwrite.yaml b/suites/rgw/multifs/tasks/rgw_readwrite.yaml deleted file mode 100644 index c7efaa1c757..00000000000 --- a/suites/rgw/multifs/tasks/rgw_readwrite.yaml +++ /dev/null @@ -1,16 +0,0 @@ -tasks: -- install: -- ceph: -- rgw: [client.0] -- s3readwrite: - client.0: - rgw_server: client.0 - readwrite: - bucket: rwtest - readers: 10 - writers: 3 - duration: 300 - files: - num: 10 - size: 2000 - stddev: 500 diff --git a/suites/rgw/multifs/tasks/rgw_roundtrip.yaml b/suites/rgw/multifs/tasks/rgw_roundtrip.yaml deleted file mode 100644 index 47b3c1894a2..00000000000 --- a/suites/rgw/multifs/tasks/rgw_roundtrip.yaml +++ /dev/null @@ -1,16 +0,0 @@ -tasks: -- install: -- ceph: -- rgw: [client.0] -- s3roundtrip: - client.0: - rgw_server: client.0 - roundtrip: - bucket: rttest - readers: 10 - writers: 3 - duration: 300 - files: - num: 10 - size: 2000 - stddev: 500 diff --git a/suites/rgw/multifs/tasks/rgw_s3tests.yaml b/suites/rgw/multifs/tasks/rgw_s3tests.yaml deleted file mode 100644 index 62608773a2a..00000000000 --- a/suites/rgw/multifs/tasks/rgw_s3tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: -- rgw: [client.0] -- s3tests: - client.0: - rgw_server: client.0 diff --git a/suites/rgw/multifs/tasks/rgw_swift.yaml b/suites/rgw/multifs/tasks/rgw_swift.yaml deleted file mode 100644 index 569741b0e15..00000000000 --- a/suites/rgw/multifs/tasks/rgw_swift.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: -- rgw: [client.0] -- swift: - client.0: - rgw_server: client.0 diff --git a/suites/rgw/multifs/tasks/rgw_user_quota.yaml b/suites/rgw/multifs/tasks/rgw_user_quota.yaml deleted file mode 100644 index c2c38a816cc..00000000000 --- a/suites/rgw/multifs/tasks/rgw_user_quota.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- rgw: [client.0] -- workunit: - clients: - client.0: - - rgw/s3_user_quota.pl diff --git a/suites/rgw/singleton/% b/suites/rgw/singleton/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml b/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml deleted file mode 100644 index 0e61941718e..00000000000 --- a/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml +++ /dev/null @@ -1,56 +0,0 @@ -roles: -- [mon.a, osd.0, osd.1, client.0, client.1] -tasks: -- install: -- ceph: - conf: - client: - debug ms: 1 - rgw gc obj min wait: 15 - rgw data log window: 30 - osd: - debug ms: 1 - debug objclass : 20 - client.0: - rgw region: region0 - rgw zone: r0z0 - rgw region root pool: .rgw.region.0 - rgw zone root pool: .rgw.zone.0 - rgw gc pool: .rgw.gc.0 - rgw user uid pool: .users.uid.0 - rgw user keys pool: .users.0 - rgw log data: True - rgw log meta: True - client.1: - rgw region: region0 - rgw zone: r0z1 - rgw region root pool: .rgw.region.0 - rgw zone root pool: .rgw.zone.1 - rgw gc pool: .rgw.gc.1 - rgw user uid pool: .users.uid.1 - rgw user keys pool: .users.1 - rgw log data: False - rgw log meta: False -- rgw: - regions: - region0: - api name: api1 - is master: True - master zone: r0z0 - zones: [r0z0, r0z1] - client.0: - system user: - name: client0-system-user - access key: 0te6NH5mcdcq0Tc5i8i2 - secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv - client.1: - system user: - name: client1-system-user - access key: 1te6NH5mcdcq0Tc5i8i3 - secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw -- radosgw-agent: - client.0: - max-entries: 10 - src: client.0 - dest: client.1 -- radosgw-admin: diff --git a/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml b/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml deleted file mode 100644 index 05aed994d93..00000000000 --- a/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml +++ /dev/null @@ -1,61 +0,0 @@ -roles: -- [mon.a, mds.a, osd.0, osd.1, client.0] -- [mon.b, mon.c, osd.2, osd.3, client.1] -tasks: -- install: -- ceph: - conf: - client: - debug ms: 1 - rgw gc obj min wait: 15 - osd: - debug ms: 1 - debug objclass : 20 - client.0: - rgw region: region0 - rgw zone: r0z1 - rgw region root pool: .rgw.region.0 - rgw zone root pool: .rgw.zone.0 - rgw gc pool: .rgw.gc.0 - rgw user uid pool: .users.uid.0 - rgw user keys pool: .users.0 - rgw log data: True - rgw log meta: True - client.1: - rgw region: region1 - rgw zone: r1z1 - rgw region root pool: .rgw.region.1 - rgw zone root pool: .rgw.zone.1 - rgw gc pool: .rgw.gc.1 - rgw user uid pool: .users.uid.1 - rgw user keys pool: .users.1 - rgw log data: False - rgw log meta: False -- rgw: - regions: - region0: - api name: api1 - is master: True - master zone: r0z1 - zones: [r0z1] - region1: - api name: api1 - is master: False - master zone: r1z1 - zones: [r1z1] - client.0: - system user: - name: client0-system-user - access key: 0te6NH5mcdcq0Tc5i8i2 - secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv - client.1: - system user: - name: client1-system-user - access key: 1te6NH5mcdcq0Tc5i8i3 - secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw -- radosgw-agent: - client.0: - src: client.0 - dest: client.1 - metadata-only: true -- radosgw-admin: diff --git a/suites/rgw/singleton/all/radosgw-admin.yaml b/suites/rgw/singleton/all/radosgw-admin.yaml deleted file mode 100644 index 67aa5f92efe..00000000000 --- a/suites/rgw/singleton/all/radosgw-admin.yaml +++ /dev/null @@ -1,15 +0,0 @@ -roles: -- [mon.a, mds.a, osd.0, client.0, osd.1] -tasks: -- install: -- ceph: - conf: - client: - debug ms: 1 - rgw gc obj min wait: 15 - osd: - debug ms: 1 - debug objclass : 20 -- rgw: - client.0: -- radosgw-admin: diff --git a/suites/rgw/singleton/all/radosgw-convert-to-region.yaml b/suites/rgw/singleton/all/radosgw-convert-to-region.yaml deleted file mode 100644 index 4b6d7469ef8..00000000000 --- a/suites/rgw/singleton/all/radosgw-convert-to-region.yaml +++ /dev/null @@ -1,67 +0,0 @@ -overrides: - s3readwrite: - s3: - user_id: s3readwrite-test-user - display_name: test user for the s3readwrite tests - email: tester@inktank - access_key: 2te6NH5mcdcq0Tc5i8i4 - secret_key: Qy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXx - readwrite: - deterministic_file_names: True - duration: 30 - bucket: testbucket - files: - num: 10 - size: 2000 - stddev: 500 -roles: -- [mon.a, mds.a, osd.0, osd.1, client.0] -- [mon.b, mon.c, osd.2, osd.3, client.1] - -tasks: -- install: -- ceph: - conf: - client.1: - rgw region: default - rgw zone: r1z1 - rgw region root pool: .rgw - rgw zone root pool: .rgw - rgw domain root: .rgw - rgw gc pool: .rgw.gc - rgw user uid pool: .users.uid - rgw user keys pool: .users -- rgw: - client.0: - system user: - name: nr-system - access key: 0te6NH5mcdcq0Tc5i8i2 - secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv -- s3readwrite: - client.0: - extra_args: ['--no-cleanup'] - s3: - delete_user: False - readwrite: - writers: 1 - readers: 0 -- rgw: - regions: - default: - api name: api1 - is master: true - master zone: r1z1 - zones: [r1z1] - client.1: - system user: - name: r2-system - access key: 1te6NH5mcdcq0Tc5i8i3 - secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw -- s3readwrite: - client.1: - s3: - create_user: False - readwrite: - writers: 0 - readers: 2 - diff --git a/suites/rgw/singleton/rgw_pool_type b/suites/rgw/singleton/rgw_pool_type deleted file mode 120000 index 77fa7e71b78..00000000000 --- a/suites/rgw/singleton/rgw_pool_type +++ /dev/null @@ -1 +0,0 @@ -../../../rgw_pool_type/ \ No newline at end of file diff --git a/suites/rgw/verify/% b/suites/rgw/verify/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/rgw/verify/clusters/fixed-2.yaml b/suites/rgw/verify/clusters/fixed-2.yaml deleted file mode 120000 index cd0791a1486..00000000000 --- a/suites/rgw/verify/clusters/fixed-2.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-2.yaml \ No newline at end of file diff --git a/suites/rgw/verify/fs/btrfs.yaml b/suites/rgw/verify/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/rgw/verify/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/rgw/verify/msgr-failures/few.yaml b/suites/rgw/verify/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/rgw/verify/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/rgw/verify/rgw_pool_type b/suites/rgw/verify/rgw_pool_type deleted file mode 120000 index 77fa7e71b78..00000000000 --- a/suites/rgw/verify/rgw_pool_type +++ /dev/null @@ -1 +0,0 @@ -../../../rgw_pool_type/ \ No newline at end of file diff --git a/suites/rgw/verify/tasks/rgw_s3tests.yaml b/suites/rgw/verify/tasks/rgw_s3tests.yaml deleted file mode 100644 index c23a2cbf4ec..00000000000 --- a/suites/rgw/verify/tasks/rgw_s3tests.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: - flavor: notcmalloc -- ceph: -- rgw: - client.0: - valgrind: [--tool=memcheck] -- s3tests: - client.0: - rgw_server: client.0 diff --git a/suites/rgw/verify/tasks/rgw_s3tests_multiregion.yaml b/suites/rgw/verify/tasks/rgw_s3tests_multiregion.yaml deleted file mode 100644 index 399f4aac2f6..00000000000 --- a/suites/rgw/verify/tasks/rgw_s3tests_multiregion.yaml +++ /dev/null @@ -1,59 +0,0 @@ -tasks: -- install: - flavor: notcmalloc -- ceph: - conf: - client.0: - rgw region: zero - rgw zone: r0z1 - rgw region root pool: .rgw.region.0 - rgw zone root pool: .rgw.zone.0 - rgw gc pool: .rgw.gc.0 - rgw user uid pool: .users.uid.0 - rgw user keys pool: .users.0 - rgw log data: True - rgw log meta: True - client.1: - rgw region: one - rgw zone: r1z1 - rgw region root pool: .rgw.region.1 - rgw zone root pool: .rgw.zone.1 - rgw gc pool: .rgw.gc.1 - rgw user uid pool: .users.uid.1 - rgw user keys pool: .users.1 - rgw log data: False - rgw log meta: False -- rgw: - default_idle_timeout: 300 - regions: - zero: - api name: api1 - is master: True - master zone: r0z1 - zones: [r0z1] - one: - api name: api1 - is master: False - master zone: r1z1 - zones: [r1z1] - client.0: - valgrind: [--tool=memcheck] - system user: - name: client0-system-user - access key: 1te6NH5mcdcq0Tc5i8i2 - secret key: 1y4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv - client.1: - valgrind: [--tool=memcheck] - system user: - name: client1-system-user - access key: 0te6NH5mcdcq0Tc5i8i2 - secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv -- radosgw-agent: - client.0: - src: client.0 - dest: client.1 - metadata-only: true -- s3tests: - client.0: - idle_timeout: 300 - rgw_server: client.0 diff --git a/suites/rgw/verify/tasks/rgw_swift.yaml b/suites/rgw/verify/tasks/rgw_swift.yaml deleted file mode 100644 index 792fb848a9e..00000000000 --- a/suites/rgw/verify/tasks/rgw_swift.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: - flavor: notcmalloc -- ceph: -- rgw: - client.0: - valgrind: [--tool=memcheck] -- swift: - client.0: - rgw_server: client.0 diff --git a/suites/rgw/verify/validater/lockdep.yaml b/suites/rgw/verify/validater/lockdep.yaml deleted file mode 100644 index 941fe12b1e4..00000000000 --- a/suites/rgw/verify/validater/lockdep.yaml +++ /dev/null @@ -1,7 +0,0 @@ -overrides: - ceph: - conf: - osd: - lockdep: true - mon: - lockdep: true diff --git a/suites/rgw/verify/validater/valgrind.yaml b/suites/rgw/verify/validater/valgrind.yaml deleted file mode 100644 index 7b8f7a28629..00000000000 --- a/suites/rgw/verify/validater/valgrind.yaml +++ /dev/null @@ -1,9 +0,0 @@ -overrides: - install: - ceph: - flavor: notcmalloc - ceph: - valgrind: - mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - osd: [--tool=memcheck] - mds: [--tool=memcheck] diff --git a/suites/samba/% b/suites/samba/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/samba/clusters/samba-basic.yaml b/suites/samba/clusters/samba-basic.yaml deleted file mode 100644 index caced4a26d1..00000000000 --- a/suites/samba/clusters/samba-basic.yaml +++ /dev/null @@ -1,3 +0,0 @@ -roles: -- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1] -- [samba.0, client.0, client.1] diff --git a/suites/samba/debug/mds_client.yaml b/suites/samba/debug/mds_client.yaml deleted file mode 120000 index 2550b024ded..00000000000 --- a/suites/samba/debug/mds_client.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../debug/mds_client.yaml \ No newline at end of file diff --git a/suites/samba/fs/btrfs.yaml b/suites/samba/fs/btrfs.yaml deleted file mode 120000 index ea693ab0b42..00000000000 --- a/suites/samba/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/samba/install/install.yaml b/suites/samba/install/install.yaml deleted file mode 100644 index c53f9c55b17..00000000000 --- a/suites/samba/install/install.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# we currently can't install Samba on RHEL; need a gitbuilder and code updates -os_type: ubuntu - -tasks: -- install: -- install: - project: samba - extra_packages: ['samba'] -- ceph: diff --git a/suites/samba/mount/fuse.yaml b/suites/samba/mount/fuse.yaml deleted file mode 100644 index d00ffdb4804..00000000000 --- a/suites/samba/mount/fuse.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: [client.0] -- samba: - samba.0: - ceph: "{testdir}/mnt.0" - diff --git a/suites/samba/mount/kclient.yaml b/suites/samba/mount/kclient.yaml deleted file mode 100644 index 14fee85d266..00000000000 --- a/suites/samba/mount/kclient.yaml +++ /dev/null @@ -1,11 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false -tasks: -- kclient: [client.0] -- samba: - samba.0: - ceph: "{testdir}/mnt.0" - diff --git a/suites/samba/mount/native.yaml b/suites/samba/mount/native.yaml deleted file mode 100644 index 09b8c1c4e3d..00000000000 --- a/suites/samba/mount/native.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- samba: diff --git a/suites/samba/mount/noceph.yaml b/suites/samba/mount/noceph.yaml deleted file mode 100644 index 3cad4740d8b..00000000000 --- a/suites/samba/mount/noceph.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- localdir: [client.0] -- samba: - samba.0: - ceph: "{testdir}/mnt.0" diff --git a/suites/samba/workload/cifs-dbench.yaml b/suites/samba/workload/cifs-dbench.yaml deleted file mode 100644 index c13c1c099e5..00000000000 --- a/suites/samba/workload/cifs-dbench.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- cifs-mount: - client.1: - share: ceph -- workunit: - clients: - client.1: - - suites/dbench.sh diff --git a/suites/samba/workload/cifs-fsstress.yaml b/suites/samba/workload/cifs-fsstress.yaml deleted file mode 100644 index ff003af3433..00000000000 --- a/suites/samba/workload/cifs-fsstress.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- cifs-mount: - client.1: - share: ceph -- workunit: - clients: - client.1: - - suites/fsstress.sh diff --git a/suites/samba/workload/cifs-kernel-build.yaml.disabled b/suites/samba/workload/cifs-kernel-build.yaml.disabled deleted file mode 100644 index ab9ff8ac731..00000000000 --- a/suites/samba/workload/cifs-kernel-build.yaml.disabled +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- cifs-mount: - client.1: - share: ceph -- workunit: - clients: - client.1: - - kernel_untar_build.sh - diff --git a/suites/samba/workload/smbtorture.yaml b/suites/samba/workload/smbtorture.yaml deleted file mode 100644 index 823489a2082..00000000000 --- a/suites/samba/workload/smbtorture.yaml +++ /dev/null @@ -1,39 +0,0 @@ -tasks: -- pexec: - client.1: - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb -# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon -# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl -# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid diff --git a/suites/smoke/basic/% b/suites/smoke/basic/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/smoke/basic/clusters/fixed-3.yaml b/suites/smoke/basic/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/smoke/basic/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/smoke/basic/fs/btrfs.yaml b/suites/smoke/basic/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/smoke/basic/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml b/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml deleted file mode 100644 index ed9d92d5bda..00000000000 --- a/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml b/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml deleted file mode 100644 index b58487c0785..00000000000 --- a/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml b/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml deleted file mode 100644 index dc6df2f709f..00000000000 --- a/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: [client.0] -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml b/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml deleted file mode 100644 index 347c7fdf04c..00000000000 --- a/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - suites/pjd.sh diff --git a/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml b/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml deleted file mode 100644 index 21820071dbc..00000000000 --- a/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml +++ /dev/null @@ -1,13 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - direct_io diff --git a/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml b/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml deleted file mode 100644 index cda94a38f6d..00000000000 --- a/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml +++ /dev/null @@ -1,13 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/dbench.sh diff --git a/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml b/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml deleted file mode 100644 index 64bfc5f3811..00000000000 --- a/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,13 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml b/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml deleted file mode 100644 index 272610b2915..00000000000 --- a/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml +++ /dev/null @@ -1,13 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/pjd.sh diff --git a/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml b/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml deleted file mode 100644 index 22d1f142161..00000000000 --- a/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - libcephfs/test.sh diff --git a/suites/smoke/basic/tasks/rados_python.yaml b/suites/smoke/basic/tasks/rados_python.yaml deleted file mode 100644 index b9ac20e57f0..00000000000 --- a/suites/smoke/basic/tasks/rados_python.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - rados/test_python.sh diff --git a/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml b/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml deleted file mode 100644 index 716deac2156..00000000000 --- a/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - rados/load-gen-mix.sh diff --git a/suites/smoke/basic/tasks/rbd_api_tests.yaml b/suites/smoke/basic/tasks/rbd_api_tests.yaml deleted file mode 100644 index 46e43b98ab4..00000000000 --- a/suites/smoke/basic/tasks/rbd_api_tests.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - rbd/test_librbd.sh - env: - RBD_FEATURES: "1" diff --git a/suites/smoke/basic/tasks/rbd_api_tests_old_format.yaml b/suites/smoke/basic/tasks/rbd_api_tests_old_format.yaml deleted file mode 100644 index 390b9c034f1..00000000000 --- a/suites/smoke/basic/tasks/rbd_api_tests_old_format.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - rbd/test_librbd.sh diff --git a/suites/smoke/basic/tasks/rbd_cli_import_export.yaml b/suites/smoke/basic/tasks/rbd_cli_import_export.yaml deleted file mode 100644 index df23dc58f33..00000000000 --- a/suites/smoke/basic/tasks/rbd_cli_import_export.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/smoke/basic/tasks/rbd_cli_import_export_old_format.yaml b/suites/smoke/basic/tasks/rbd_cli_import_export_old_format.yaml deleted file mode 100644 index c870ad42153..00000000000 --- a/suites/smoke/basic/tasks/rbd_cli_import_export_old_format.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - rbd/import_export.sh diff --git a/suites/smoke/basic/tasks/rbd_python_api_tests.yaml b/suites/smoke/basic/tasks/rbd_python_api_tests.yaml deleted file mode 100644 index 9714a6e40ee..00000000000 --- a/suites/smoke/basic/tasks/rbd_python_api_tests.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - rbd/test_librbd_python.sh - env: - RBD_FEATURES: "1" diff --git a/suites/smoke/basic/tasks/rbd_python_api_tests_old_format.yaml b/suites/smoke/basic/tasks/rbd_python_api_tests_old_format.yaml deleted file mode 100644 index 642175f7766..00000000000 --- a/suites/smoke/basic/tasks/rbd_python_api_tests_old_format.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - rbd/test_librbd_python.sh diff --git a/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml b/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml deleted file mode 100644 index 461a59a1df0..00000000000 --- a/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml +++ /dev/null @@ -1,15 +0,0 @@ -overrides: - ceph: - conf: - global: - ms die on skipped message: false -tasks: -- install: -- ceph: -- rbd: - all: - image_size: 20480 -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/smoke/multiclient/% b/suites/smoke/multiclient/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/smoke/multiclient/clusters/two_clients.yaml b/suites/smoke/multiclient/clusters/two_clients.yaml deleted file mode 100644 index d062b8ce040..00000000000 --- a/suites/smoke/multiclient/clusters/two_clients.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1] -- [client.1] -- [client.0] diff --git a/suites/smoke/multiclient/fs/btrfs.yaml b/suites/smoke/multiclient/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/smoke/multiclient/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/smoke/multiclient/tasks/locktest.yaml b/suites/smoke/multiclient/tasks/locktest.yaml deleted file mode 100644 index 444bb1f19b3..00000000000 --- a/suites/smoke/multiclient/tasks/locktest.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- locktest: [client.0, client.1] diff --git a/suites/smoke/multifs/% b/suites/smoke/multifs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/smoke/multifs/clusters/fixed-3.yaml b/suites/smoke/multifs/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/smoke/multifs/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/smoke/multifs/fs/btrfs.yaml b/suites/smoke/multifs/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/smoke/multifs/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/smoke/multifs/tasks/rgw_s3tests.yaml b/suites/smoke/multifs/tasks/rgw_s3tests.yaml deleted file mode 100644 index 62608773a2a..00000000000 --- a/suites/smoke/multifs/tasks/rgw_s3tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: -- rgw: [client.0] -- s3tests: - client.0: - rgw_server: client.0 diff --git a/suites/smoke/multimon/% b/suites/smoke/multimon/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/smoke/multimon/clusters/6.yaml b/suites/smoke/multimon/clusters/6.yaml deleted file mode 100644 index 662fc92b8de..00000000000 --- a/suites/smoke/multimon/clusters/6.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.d, osd.0] -- [mon.b, mon.e, mds.a] -- [mon.c, mon.f, osd.1] diff --git a/suites/smoke/multimon/tasks/mon_recovery.yaml b/suites/smoke/multimon/tasks/mon_recovery.yaml deleted file mode 100644 index 94721ea53a4..00000000000 --- a/suites/smoke/multimon/tasks/mon_recovery.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: -- ceph: -- mon_recovery: diff --git a/suites/smoke/singleton/% b/suites/smoke/singleton/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/smoke/singleton/all/filestore-idempotent.yaml b/suites/smoke/singleton/all/filestore-idempotent.yaml deleted file mode 100644 index c6af200d57f..00000000000 --- a/suites/smoke/singleton/all/filestore-idempotent.yaml +++ /dev/null @@ -1,6 +0,0 @@ -roles: -- [mon.0, osd.0, osd.1, mds.a, client.0] -tasks: -- install: -- ceph: -- filestore_idempotent: diff --git a/suites/smoke/singleton/all/osd-backfill.yaml b/suites/smoke/singleton/all/osd-backfill.yaml deleted file mode 100644 index 77a79440f55..00000000000 --- a/suites/smoke/singleton/all/osd-backfill.yaml +++ /dev/null @@ -1,17 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - conf: - osd: - osd min pg log entries: 5 -- osd_backfill: diff --git a/suites/smoke/singleton/all/thrash-rados.yaml b/suites/smoke/singleton/all/thrash-rados.yaml deleted file mode 100644 index 157f0f71cc8..00000000000 --- a/suites/smoke/singleton/all/thrash-rados.yaml +++ /dev/null @@ -1,23 +0,0 @@ -roles: -- - mon.a - - mds.0 - - osd.0 -- - osd.1 -- - osd.2 -- - osd.3 -- - osd.4 -- - client.0 -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down -- thrashosds: - op_delay: 30 - clean_interval: 120 - chance_down: .5 -- ceph-fuse: -- workunit: - clients: - all: - - rados/load-gen-mix-small.sh diff --git a/suites/smoke/singleton/fs/btrfs.yaml b/suites/smoke/singleton/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/smoke/singleton/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/smoke/thrash/% b/suites/smoke/thrash/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/smoke/thrash/clusters/6-osd-3-machine.yaml b/suites/smoke/thrash/clusters/6-osd-3-machine.yaml deleted file mode 100644 index f6247ebf2f3..00000000000 --- a/suites/smoke/thrash/clusters/6-osd-3-machine.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, osd.0, osd.1, osd.2] -- [mds.a, osd.3, osd.4, osd.5] -- [client.0] diff --git a/suites/smoke/thrash/fs/xfs.yaml b/suites/smoke/thrash/fs/xfs.yaml deleted file mode 120000 index 4c28d731f6b..00000000000 --- a/suites/smoke/thrash/fs/xfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/xfs.yaml \ No newline at end of file diff --git a/suites/smoke/thrash/thrashers/default.yaml b/suites/smoke/thrash/thrashers/default.yaml deleted file mode 100644 index 14d772583cf..00000000000 --- a/suites/smoke/thrash/thrashers/default.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -- thrashosds: diff --git a/suites/smoke/thrash/workloads/admin_socket_objecter_requests.yaml b/suites/smoke/thrash/workloads/admin_socket_objecter_requests.yaml deleted file mode 100644 index 66791551fb0..00000000000 --- a/suites/smoke/thrash/workloads/admin_socket_objecter_requests.yaml +++ /dev/null @@ -1,13 +0,0 @@ -overrides: - ceph: - conf: - client.0: - admin socket: /var/run/ceph/ceph-$name.asok -tasks: -- radosbench: - clients: [client.0] - time: 60 -- admin_socket: - client.0: - objecter_requests: - test: "http://ceph.newdream.net/git/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}" diff --git a/suites/smoke/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled b/suites/smoke/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled deleted file mode 100644 index d61ede1bd66..00000000000 --- a/suites/smoke/thrash/workloads/rbd_workunit_suites_iozone.yaml.disabled +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- rbd: - all: - image_size: 20480 -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/smoke/thrash/workloads/snaps-few-objects.yaml b/suites/smoke/thrash/workloads/snaps-few-objects.yaml deleted file mode 100644 index aa82d973ae1..00000000000 --- a/suites/smoke/thrash/workloads/snaps-few-objects.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - copy_from: 50 diff --git a/suites/smoke/verify/% b/suites/smoke/verify/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/smoke/verify/clusters/fixed-3.yaml b/suites/smoke/verify/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/smoke/verify/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/smoke/verify/fs/btrfs.yaml b/suites/smoke/verify/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/smoke/verify/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/smoke/verify/tasks/cfuse_workunit_suites_fsstress.yaml b/suites/smoke/verify/tasks/cfuse_workunit_suites_fsstress.yaml deleted file mode 100644 index b58487c0785..00000000000 --- a/suites/smoke/verify/tasks/cfuse_workunit_suites_fsstress.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/smoke/verify/tasks/libcephfs_interface_tests.yaml b/suites/smoke/verify/tasks/libcephfs_interface_tests.yaml deleted file mode 100644 index 22d1f142161..00000000000 --- a/suites/smoke/verify/tasks/libcephfs_interface_tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - libcephfs/test.sh diff --git a/suites/smoke/verify/tasks/mon_recovery.yaml b/suites/smoke/verify/tasks/mon_recovery.yaml deleted file mode 100644 index 94721ea53a4..00000000000 --- a/suites/smoke/verify/tasks/mon_recovery.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: -- ceph: -- mon_recovery: diff --git a/suites/smoke/verify/tasks/rados_api_tests.yaml b/suites/smoke/verify/tasks/rados_api_tests.yaml deleted file mode 100644 index c154219bc1b..00000000000 --- a/suites/smoke/verify/tasks/rados_api_tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - rados/test.sh diff --git a/suites/smoke/verify/tasks/rados_cls_all.yaml b/suites/smoke/verify/tasks/rados_cls_all.yaml deleted file mode 100644 index 80be56276db..00000000000 --- a/suites/smoke/verify/tasks/rados_cls_all.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - client.0: - - cls diff --git a/suites/smoke/verify/tasks/rgw_s3tests.yaml b/suites/smoke/verify/tasks/rgw_s3tests.yaml deleted file mode 100644 index 7e5b409f390..00000000000 --- a/suites/smoke/verify/tasks/rgw_s3tests.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: -- ceph: -- rgw: - client.0: - valgrind: [--tool=memcheck] -- s3tests: - default_idle_timeout: 300 - client.0: - rgw_server: client.0 diff --git a/suites/smoke/verify/validater/lockdep.yaml b/suites/smoke/verify/validater/lockdep.yaml deleted file mode 100644 index 25f84355c0b..00000000000 --- a/suites/smoke/verify/validater/lockdep.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - lockdep: true diff --git a/suites/smoke/verify/validater/valgrind.yaml b/suites/smoke/verify/validater/valgrind.yaml deleted file mode 100644 index 518d72b0ffe..00000000000 --- a/suites/smoke/verify/validater/valgrind.yaml +++ /dev/null @@ -1,6 +0,0 @@ -overrides: - ceph: - valgrind: - mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - osd: [--tool=memcheck] - mds: [--tool=memcheck] diff --git a/suites/stress/bench/% b/suites/stress/bench/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/stress/bench/clusters/fixed-3.yaml b/suites/stress/bench/clusters/fixed-3.yaml deleted file mode 120000 index a3ac9fc4dec..00000000000 --- a/suites/stress/bench/clusters/fixed-3.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../clusters/fixed-3.yaml \ No newline at end of file diff --git a/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml b/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml deleted file mode 100644 index eafec39e3d0..00000000000 --- a/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- ceph-fuse: -- workunit: - clients: - all: - - snaps diff --git a/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml b/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml deleted file mode 100644 index a0d2e765bdb..00000000000 --- a/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: -- kclient: -- workunit: - clients: - all: - - suites/fsx.sh diff --git a/suites/stress/thrash/% b/suites/stress/thrash/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/stress/thrash/clusters/16-osd.yaml b/suites/stress/thrash/clusters/16-osd.yaml deleted file mode 100644 index 373dd4052c3..00000000000 --- a/suites/stress/thrash/clusters/16-osd.yaml +++ /dev/null @@ -1,18 +0,0 @@ -roles: -- [mon.0, mds.a, osd.0] -- [mon.1, osd.1] -- [mon.2, osd.2] -- [osd.3] -- [osd.4] -- [osd.5] -- [osd.6] -- [osd.7] -- [osd.8] -- [osd.9] -- [osd.10] -- [osd.11] -- [osd.12] -- [osd.13] -- [osd.14] -- [osd.15] -- [client.0] diff --git a/suites/stress/thrash/clusters/3-osd-1-machine.yaml b/suites/stress/thrash/clusters/3-osd-1-machine.yaml deleted file mode 100644 index d8ff594b95d..00000000000 --- a/suites/stress/thrash/clusters/3-osd-1-machine.yaml +++ /dev/null @@ -1,3 +0,0 @@ -roles: -- [mon.0, mds.a, osd.0, osd.1, osd.2] -- [mon.1, mon.2, client.0] diff --git a/suites/stress/thrash/clusters/8-osd.yaml b/suites/stress/thrash/clusters/8-osd.yaml deleted file mode 100644 index 3b131054e95..00000000000 --- a/suites/stress/thrash/clusters/8-osd.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- [mon.0, mds.a, osd.0] -- [mon.1, osd.1] -- [mon.2, osd.2] -- [osd.3] -- [osd.4] -- [osd.5] -- [osd.6] -- [osd.7] -- [client.0] diff --git a/suites/stress/thrash/fs/btrfs.yaml b/suites/stress/thrash/fs/btrfs.yaml deleted file mode 120000 index 10d0c3f1266..00000000000 --- a/suites/stress/thrash/fs/btrfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/btrfs.yaml \ No newline at end of file diff --git a/suites/stress/thrash/fs/none.yaml b/suites/stress/thrash/fs/none.yaml deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/stress/thrash/fs/xfs.yaml b/suites/stress/thrash/fs/xfs.yaml deleted file mode 120000 index 4c28d731f6b..00000000000 --- a/suites/stress/thrash/fs/xfs.yaml +++ /dev/null @@ -1 +0,0 @@ -../../../../fs/xfs.yaml \ No newline at end of file diff --git a/suites/stress/thrash/thrashers/default.yaml b/suites/stress/thrash/thrashers/default.yaml deleted file mode 100644 index 14d772583cf..00000000000 --- a/suites/stress/thrash/thrashers/default.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -- thrashosds: diff --git a/suites/stress/thrash/thrashers/fast.yaml b/suites/stress/thrash/thrashers/fast.yaml deleted file mode 100644 index eea9c06cd90..00000000000 --- a/suites/stress/thrash/thrashers/fast.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -- thrashosds: - op_delay: 1 - chance_down: 10 diff --git a/suites/stress/thrash/thrashers/more-down.yaml b/suites/stress/thrash/thrashers/more-down.yaml deleted file mode 100644 index e39098b1cb6..00000000000 --- a/suites/stress/thrash/thrashers/more-down.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: -- ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost -- thrashosds: - chance_down: 50 diff --git a/suites/stress/thrash/workloads/bonnie_cfuse.yaml b/suites/stress/thrash/workloads/bonnie_cfuse.yaml deleted file mode 100644 index 912f12d6ce7..00000000000 --- a/suites/stress/thrash/workloads/bonnie_cfuse.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/bonnie.sh diff --git a/suites/stress/thrash/workloads/iozone_cfuse.yaml b/suites/stress/thrash/workloads/iozone_cfuse.yaml deleted file mode 100644 index 18a6051be39..00000000000 --- a/suites/stress/thrash/workloads/iozone_cfuse.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- ceph-fuse: -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/stress/thrash/workloads/radosbench.yaml b/suites/stress/thrash/workloads/radosbench.yaml deleted file mode 100644 index 3940870fce0..00000000000 --- a/suites/stress/thrash/workloads/radosbench.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- radosbench: - clients: [client.0] - time: 1800 diff --git a/suites/stress/thrash/workloads/readwrite.yaml b/suites/stress/thrash/workloads/readwrite.yaml deleted file mode 100644 index c53e52b0872..00000000000 --- a/suites/stress/thrash/workloads/readwrite.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 500 - op_weights: - read: 45 - write: 45 - delete: 10 diff --git a/suites/tgt/basic/% b/suites/tgt/basic/% deleted file mode 100644 index 8b137891791..00000000000 --- a/suites/tgt/basic/% +++ /dev/null @@ -1 +0,0 @@ - diff --git a/suites/tgt/basic/clusters/fixed-3.yaml b/suites/tgt/basic/clusters/fixed-3.yaml deleted file mode 100644 index 0038432afa7..00000000000 --- a/suites/tgt/basic/clusters/fixed-3.yaml +++ /dev/null @@ -1,4 +0,0 @@ -roles: -- [mon.a, mon.c, osd.0, osd.1, osd.2] -- [mon.b, mds.a, osd.3, osd.4, osd.5] -- [client.0] diff --git a/suites/tgt/basic/fs/btrfs.yaml b/suites/tgt/basic/fs/btrfs.yaml deleted file mode 100644 index 4c7af311538..00000000000 --- a/suites/tgt/basic/fs/btrfs.yaml +++ /dev/null @@ -1,6 +0,0 @@ -overrides: - ceph: - fs: btrfs - conf: - osd: - osd op thread timeout: 60 diff --git a/suites/tgt/basic/msgr-failures/few.yaml b/suites/tgt/basic/msgr-failures/few.yaml deleted file mode 100644 index 0de320d46b8..00000000000 --- a/suites/tgt/basic/msgr-failures/few.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 5000 diff --git a/suites/tgt/basic/msgr-failures/many.yaml b/suites/tgt/basic/msgr-failures/many.yaml deleted file mode 100644 index 86f8dde8a0e..00000000000 --- a/suites/tgt/basic/msgr-failures/many.yaml +++ /dev/null @@ -1,5 +0,0 @@ -overrides: - ceph: - conf: - global: - ms inject socket failures: 500 diff --git a/suites/tgt/basic/tasks/blogbench.yaml b/suites/tgt/basic/tasks/blogbench.yaml deleted file mode 100644 index f77a78b6bc0..00000000000 --- a/suites/tgt/basic/tasks/blogbench.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/tgt/basic/tasks/bonnie.yaml b/suites/tgt/basic/tasks/bonnie.yaml deleted file mode 100644 index 2cbfcf8872e..00000000000 --- a/suites/tgt/basic/tasks/bonnie.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/bonnie.sh diff --git a/suites/tgt/basic/tasks/dbench-short.yaml b/suites/tgt/basic/tasks/dbench-short.yaml deleted file mode 100644 index fcb721a4d14..00000000000 --- a/suites/tgt/basic/tasks/dbench-short.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/dbench-short.sh diff --git a/suites/tgt/basic/tasks/dbench.yaml b/suites/tgt/basic/tasks/dbench.yaml deleted file mode 100644 index 7f732175faa..00000000000 --- a/suites/tgt/basic/tasks/dbench.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/dbench.sh diff --git a/suites/tgt/basic/tasks/ffsb.yaml b/suites/tgt/basic/tasks/ffsb.yaml deleted file mode 100644 index f50a3a19647..00000000000 --- a/suites/tgt/basic/tasks/ffsb.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/ffsb.sh diff --git a/suites/tgt/basic/tasks/fio.yaml b/suites/tgt/basic/tasks/fio.yaml deleted file mode 100644 index e7346ce528e..00000000000 --- a/suites/tgt/basic/tasks/fio.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/fio.sh diff --git a/suites/tgt/basic/tasks/fsstress.yaml b/suites/tgt/basic/tasks/fsstress.yaml deleted file mode 100644 index c77f511c0f6..00000000000 --- a/suites/tgt/basic/tasks/fsstress.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/fsstress.sh diff --git a/suites/tgt/basic/tasks/fsx.yaml b/suites/tgt/basic/tasks/fsx.yaml deleted file mode 100644 index 04732c84009..00000000000 --- a/suites/tgt/basic/tasks/fsx.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/fsx.sh diff --git a/suites/tgt/basic/tasks/fsync-tester.yaml b/suites/tgt/basic/tasks/fsync-tester.yaml deleted file mode 100644 index ea627b7d184..00000000000 --- a/suites/tgt/basic/tasks/fsync-tester.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/fsync-tester.sh diff --git a/suites/tgt/basic/tasks/iogen.yaml b/suites/tgt/basic/tasks/iogen.yaml deleted file mode 100644 index 1065c74daba..00000000000 --- a/suites/tgt/basic/tasks/iogen.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/iogen.sh diff --git a/suites/tgt/basic/tasks/iozone-sync.yaml b/suites/tgt/basic/tasks/iozone-sync.yaml deleted file mode 100644 index ac241a417e8..00000000000 --- a/suites/tgt/basic/tasks/iozone-sync.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/iozone-sync.sh diff --git a/suites/tgt/basic/tasks/iozone.yaml b/suites/tgt/basic/tasks/iozone.yaml deleted file mode 100644 index cf5604c21a7..00000000000 --- a/suites/tgt/basic/tasks/iozone.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/iozone.sh diff --git a/suites/tgt/basic/tasks/pjd.yaml b/suites/tgt/basic/tasks/pjd.yaml deleted file mode 100644 index ba5c631f157..00000000000 --- a/suites/tgt/basic/tasks/pjd.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- install: -- ceph: -- tgt: -- iscsi: -- workunit: - clients: - all: - - suites/pjd.sh diff --git a/suites/upgrade/cuttlefish/fs/% b/suites/upgrade/cuttlefish/fs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/cuttlefish/fs/0-cluster/start.yaml b/suites/upgrade/cuttlefish/fs/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/cuttlefish/fs/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/bobtail.v0.61.5.yaml b/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/bobtail.v0.61.5.yaml deleted file mode 100644 index 286cdd66624..00000000000 --- a/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/bobtail.v0.61.5.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: bobtail -- ceph: -- install.upgrade: - all: - tag: v0.61.5 -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.5.yaml b/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.5.yaml deleted file mode 100644 index 07d04317ec1..00000000000 --- a/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.5.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.61.5 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.6.yaml b/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.6.yaml deleted file mode 100644 index 9d74ab38f00..00000000000 --- a/suites/upgrade/cuttlefish/fs/1-cuttlefish-install/v0.61.6.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.61.6 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/fs/2-workload/blogbench.yaml b/suites/upgrade/cuttlefish/fs/2-workload/blogbench.yaml deleted file mode 100644 index 0cd59eaafde..00000000000 --- a/suites/upgrade/cuttlefish/fs/2-workload/blogbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -workload: - workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index c97aef77e13..00000000000 --- a/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 9d06ef37d05..00000000000 --- a/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index dd76b10b5ce..00000000000 --- a/suites/upgrade/cuttlefish/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] diff --git a/suites/upgrade/cuttlefish/fs/4-final/monthrash.yaml b/suites/upgrade/cuttlefish/fs/4-final/monthrash.yaml deleted file mode 100644 index 13af446eb3b..00000000000 --- a/suites/upgrade/cuttlefish/fs/4-final/monthrash.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- ceph-fuse: -- workunit: - clients: - client.0: - - suites/dbench.sh - diff --git a/suites/upgrade/cuttlefish/fs/4-final/osdthrash.yaml b/suites/upgrade/cuttlefish/fs/4-final/osdthrash.yaml deleted file mode 100644 index dbd7191e36c..00000000000 --- a/suites/upgrade/cuttlefish/fs/4-final/osdthrash.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- ceph-fuse: -- workunit: - clients: - all: - - suites/iogen.sh - diff --git a/suites/upgrade/cuttlefish/rados-older/% b/suites/upgrade/cuttlefish/rados-older/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/cuttlefish/rados-older/0-cluster/start.yaml b/suites/upgrade/cuttlefish/rados-older/0-cluster/start.yaml deleted file mode 100644 index 8626abc26c4..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/0-cluster/start.yaml +++ /dev/null @@ -1,20 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - conf: - paxos service trim min: 5 - mon min osdmap epochs: 25 - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/cuttlefish/rados-older/1-install/bobtail.yaml b/suites/upgrade/cuttlefish/rados-older/1-install/bobtail.yaml deleted file mode 100644 index 21dc2bb2b27..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/1-install/bobtail.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: - branch: bobtail -- ceph: diff --git a/suites/upgrade/cuttlefish/rados-older/1-install/v0.61.1.yaml b/suites/upgrade/cuttlefish/rados-older/1-install/v0.61.1.yaml deleted file mode 100644 index c77ab27ba83..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/1-install/v0.61.1.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: - tag: v0.61.1 -- ceph: diff --git a/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.3.yaml b/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.3.yaml deleted file mode 100644 index aca276b8d39..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.3.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install.upgrade: - all: - tag: v0.61.3 -- ceph.restart: diff --git a/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.4.yaml b/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.4.yaml deleted file mode 100644 index c44994e5eaf..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.4.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install.upgrade: - all: - tag: v0.61.4 -- ceph.restart: diff --git a/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.5.yaml b/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.5.yaml deleted file mode 100644 index d46a1a1527e..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/2-upgrade/v0.61.5.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install.upgrade: - all: - tag: v0.61.5 -- ceph.restart: diff --git a/suites/upgrade/cuttlefish/rados-older/3-rolling-upgrade/all.yaml b/suites/upgrade/cuttlefish/rados-older/3-rolling-upgrade/all.yaml deleted file mode 100644 index fe892358df8..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/3-rolling-upgrade/all.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index e70e5d0cf1b..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,39 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.b] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index ed25b701597..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,39 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.b] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index 33368cd8df7..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/4-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,41 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.b] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 diff --git a/suites/upgrade/cuttlefish/rados-older/5-workload/testrados.yaml b/suites/upgrade/cuttlefish/rados-older/5-workload/testrados.yaml deleted file mode 100644 index 49339ecd044..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/5-workload/testrados.yaml +++ /dev/null @@ -1,12 +0,0 @@ -workload: - rados: - clients: [client.0] - ops: 2000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/upgrade/cuttlefish/rados-older/6-final/monthrash.yaml b/suites/upgrade/cuttlefish/rados-older/6-final/monthrash.yaml deleted file mode 100644 index 810ba1b30e2..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/6-final/monthrash.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- workunit: - clients: - client.0: - - rados/test.sh - diff --git a/suites/upgrade/cuttlefish/rados-older/6-final/osdthrash.yaml b/suites/upgrade/cuttlefish/rados-older/6-final/osdthrash.yaml deleted file mode 100644 index f81504233ad..00000000000 --- a/suites/upgrade/cuttlefish/rados-older/6-final/osdthrash.yaml +++ /dev/null @@ -1,23 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- rados: - clients: [client.0] - ops: 2000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - diff --git a/suites/upgrade/cuttlefish/rados/% b/suites/upgrade/cuttlefish/rados/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/cuttlefish/rados/0-cluster/start.yaml b/suites/upgrade/cuttlefish/rados/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/cuttlefish/rados/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/bobtail.v0.61.5.yaml b/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/bobtail.v0.61.5.yaml deleted file mode 100644 index 286cdd66624..00000000000 --- a/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/bobtail.v0.61.5.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: bobtail -- ceph: -- install.upgrade: - all: - tag: v0.61.5 -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.5.yaml b/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.5.yaml deleted file mode 100644 index 07d04317ec1..00000000000 --- a/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.5.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.61.5 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.6.yaml b/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.6.yaml deleted file mode 100644 index 9d74ab38f00..00000000000 --- a/suites/upgrade/cuttlefish/rados/1-cuttlefish-install/v0.61.6.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.61.6 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/rados/2-workload/testrados.yaml b/suites/upgrade/cuttlefish/rados/2-workload/testrados.yaml deleted file mode 100644 index 8eaab19fd9e..00000000000 --- a/suites/upgrade/cuttlefish/rados/2-workload/testrados.yaml +++ /dev/null @@ -1,13 +0,0 @@ -workload: - rados: - clients: [client.0] - ops: 2000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - diff --git a/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index c97aef77e13..00000000000 --- a/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 9d06ef37d05..00000000000 --- a/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index c061399adac..00000000000 --- a/suites/upgrade/cuttlefish/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,35 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 diff --git a/suites/upgrade/cuttlefish/rados/4-final/monthrash.yaml b/suites/upgrade/cuttlefish/rados/4-final/monthrash.yaml deleted file mode 100644 index 810ba1b30e2..00000000000 --- a/suites/upgrade/cuttlefish/rados/4-final/monthrash.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- workunit: - clients: - client.0: - - rados/test.sh - diff --git a/suites/upgrade/cuttlefish/rados/4-final/osdthrash.yaml b/suites/upgrade/cuttlefish/rados/4-final/osdthrash.yaml deleted file mode 100644 index f81504233ad..00000000000 --- a/suites/upgrade/cuttlefish/rados/4-final/osdthrash.yaml +++ /dev/null @@ -1,23 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- rados: - clients: [client.0] - ops: 2000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - diff --git a/suites/upgrade/cuttlefish/rbd/% b/suites/upgrade/cuttlefish/rbd/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/cuttlefish/rbd/0-cluster/start.yaml b/suites/upgrade/cuttlefish/rbd/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/cuttlefish/rbd/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/bobtail.v0.61.5.yaml b/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/bobtail.v0.61.5.yaml deleted file mode 100644 index 286cdd66624..00000000000 --- a/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/bobtail.v0.61.5.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: bobtail -- ceph: -- install.upgrade: - all: - tag: v0.61.5 -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.5.yaml b/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.5.yaml deleted file mode 100644 index 07d04317ec1..00000000000 --- a/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.5.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.61.5 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.6.yaml b/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.6.yaml deleted file mode 100644 index 9d74ab38f00..00000000000 --- a/suites/upgrade/cuttlefish/rbd/1-cuttlefish-install/v0.61.6.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.61.6 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/rbd/2-workload/rbd.yaml b/suites/upgrade/cuttlefish/rbd/2-workload/rbd.yaml deleted file mode 100644 index ce2fabe0359..00000000000 --- a/suites/upgrade/cuttlefish/rbd/2-workload/rbd.yaml +++ /dev/null @@ -1,14 +0,0 @@ -workload: - sequential: - - workunit: - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format - - workunit: - clients: - client.0: - - cls/test_cls_rbd.sh - - diff --git a/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index c97aef77e13..00000000000 --- a/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 9d06ef37d05..00000000000 --- a/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index dd76b10b5ce..00000000000 --- a/suites/upgrade/cuttlefish/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] diff --git a/suites/upgrade/cuttlefish/rbd/4-final/monthrash.yaml b/suites/upgrade/cuttlefish/rbd/4-final/monthrash.yaml deleted file mode 100644 index 593191c24f5..00000000000 --- a/suites/upgrade/cuttlefish/rbd/4-final/monthrash.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- workunit: - clients: - client.0: - - rbd/copy.sh - env: - RBD_CREATE_ARGS: --new-format - diff --git a/suites/upgrade/cuttlefish/rbd/4-final/osdthrash.yaml b/suites/upgrade/cuttlefish/rbd/4-final/osdthrash.yaml deleted file mode 100644 index 575fd7922ad..00000000000 --- a/suites/upgrade/cuttlefish/rbd/4-final/osdthrash.yaml +++ /dev/null @@ -1,16 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- workunit: - clients: - client.0: - - rbd/test_lock_fence.sh - diff --git a/suites/upgrade/cuttlefish/rgw/% b/suites/upgrade/cuttlefish/rgw/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/cuttlefish/rgw/0-cluster/start.yaml b/suites/upgrade/cuttlefish/rgw/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/cuttlefish/rgw/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/bobtail.v0.61.5.yaml b/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/bobtail.v0.61.5.yaml deleted file mode 100644 index 286cdd66624..00000000000 --- a/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/bobtail.v0.61.5.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: bobtail -- ceph: -- install.upgrade: - all: - tag: v0.61.5 -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.5.yaml b/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.5.yaml deleted file mode 100644 index 07d04317ec1..00000000000 --- a/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.5.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.61.5 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.6.yaml b/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.6.yaml deleted file mode 100644 index 9d74ab38f00..00000000000 --- a/suites/upgrade/cuttlefish/rgw/1-cuttlefish-install/v0.61.6.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.61.6 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/cuttlefish/rgw/2-workload/testrgw.yaml b/suites/upgrade/cuttlefish/rgw/2-workload/testrgw.yaml deleted file mode 100644 index f1b2f3e88ef..00000000000 --- a/suites/upgrade/cuttlefish/rgw/2-workload/testrgw.yaml +++ /dev/null @@ -1,6 +0,0 @@ -workload: - rgw: [client.0] - s3tests: - client.0: - rgw_server: client.0 - diff --git a/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index 0de4f185fa2..00000000000 --- a/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,36 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 30 - - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index c7fa40f1f50..00000000000 --- a/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,36 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 30 - - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index 0d0639ed74a..00000000000 --- a/suites/upgrade/cuttlefish/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,36 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: cuttlefish - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/cuttlefish/rgw/4-final/monthrash.yaml b/suites/upgrade/cuttlefish/rgw/4-final/monthrash.yaml deleted file mode 100644 index 9361edc8015..00000000000 --- a/suites/upgrade/cuttlefish/rgw/4-final/monthrash.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- swift: - client.0: - rgw_server: client.0 - diff --git a/suites/upgrade/cuttlefish/rgw/4-final/osdthrash.yaml b/suites/upgrade/cuttlefish/rgw/4-final/osdthrash.yaml deleted file mode 100644 index 6cf6d861d5f..00000000000 --- a/suites/upgrade/cuttlefish/rgw/4-final/osdthrash.yaml +++ /dev/null @@ -1,15 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- swift: - client.0: - rgw_server: client.0 - diff --git a/suites/upgrade/dumpling-emperor-x/parallel/% b/suites/upgrade/dumpling-emperor-x/parallel/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-emperor-x/parallel/0-cluster/start.yaml b/suites/upgrade/dumpling-emperor-x/parallel/0-cluster/start.yaml deleted file mode 100644 index e3d7f85f9ff..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/0-cluster/start.yaml +++ /dev/null @@ -1,19 +0,0 @@ -overrides: - ceph: - conf: - mon: - mon warn on legacy crush tunables: false - log-whitelist: - - scrub mismatch - - ScrubResult -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 - - client.1 diff --git a/suites/upgrade/dumpling-emperor-x/parallel/1-dumpling-install/dumpling.yaml b/suites/upgrade/dumpling-emperor-x/parallel/1-dumpling-install/dumpling.yaml deleted file mode 100644 index 92df8cebc5f..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - fs: xfs -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling-emperor-x/parallel/2-workload/+ b/suites/upgrade/dumpling-emperor-x/parallel/2-workload/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_api.yaml b/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_api.yaml deleted file mode 100644 index 96d656e4932..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_api.yaml +++ /dev/null @@ -1,8 +0,0 @@ -workload: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rados/test.sh - - cls diff --git a/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_loadgenbig.yaml b/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_loadgenbig.yaml deleted file mode 100644 index 16241b3bed6..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/2-workload/rados_loadgenbig.yaml +++ /dev/null @@ -1,7 +0,0 @@ -workload: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rados/load-gen-big.sh diff --git a/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_api.yaml b/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_api.yaml deleted file mode 100644 index 7584f0e1ff0..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_api.yaml +++ /dev/null @@ -1,7 +0,0 @@ -workload: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd.sh diff --git a/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_python.yaml b/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_python.yaml deleted file mode 100644 index 09c5326592b..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/2-workload/test_rbd_python.yaml +++ /dev/null @@ -1,7 +0,0 @@ -workload: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd_python.sh diff --git a/suites/upgrade/dumpling-emperor-x/parallel/3-emperor-upgrade/emperor.yaml b/suites/upgrade/dumpling-emperor-x/parallel/3-emperor-upgrade/emperor.yaml deleted file mode 100644 index 626bc161cbd..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/3-emperor-upgrade/emperor.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: - - install.upgrade: - mon.a: - branch: emperor - mon.b: - branch: emperor - - ceph.restart: - - parallel: - - workload2 - - upgrade-sequence diff --git a/suites/upgrade/dumpling-emperor-x/parallel/4-workload/+ b/suites/upgrade/dumpling-emperor-x/parallel/4-workload/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_api.yaml b/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_api.yaml deleted file mode 100644 index b6bb42048a4..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_api.yaml +++ /dev/null @@ -1,8 +0,0 @@ -workload2: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rados/test.sh - - cls diff --git a/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_loadgenbig.yaml b/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_loadgenbig.yaml deleted file mode 100644 index fd5c31dc477..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/4-workload/rados_loadgenbig.yaml +++ /dev/null @@ -1,7 +0,0 @@ -workload2: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rados/load-gen-big.sh diff --git a/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_api.yaml b/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_api.yaml deleted file mode 100644 index 8c8c97a4bf3..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_api.yaml +++ /dev/null @@ -1,7 +0,0 @@ -workload2: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd.sh diff --git a/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_python.yaml b/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_python.yaml deleted file mode 100644 index 1edb13cf907..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/4-workload/test_rbd_python.yaml +++ /dev/null @@ -1,7 +0,0 @@ -workload2: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd_python.sh diff --git a/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-all.yaml b/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-all.yaml deleted file mode 100644 index da6028e47b2..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-all.yaml +++ /dev/null @@ -1,8 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - mon.a: - branch: emperor - mon.b: - branch: emperor - - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index b9027db7eea..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/5-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,35 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - mon.a: - branch: emperor - mon.b: - branch: emperor - - ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.b] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 60 - - ceph.restart: [osd.1] - - sleep: - duration: 60 - - ceph.restart: [osd.2] - - sleep: - duration: 60 - - ceph.restart: [osd.3] - - sleep: - duration: 60 - - ceph.restart: [mds.a] diff --git a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/+ b/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados-snaps-few-objects.yaml b/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados-snaps-few-objects.yaml deleted file mode 100644 index bf85020d8d9..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados-snaps-few-objects.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- rados: - clients: [client.1] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_loadgenmix.yaml b/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_loadgenmix.yaml deleted file mode 100644 index 0bddda0ab84..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_loadgenmix.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: - - workunit: - branch: dumpling - clients: - client.1: - - rados/load-gen-mix.sh diff --git a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_mon_thrash.yaml b/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_mon_thrash.yaml deleted file mode 100644 index 1a932e059f0..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rados_mon_thrash.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- workunit: - branch: dumpling - clients: - client.1: - - rados/test.sh diff --git a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_cls.yaml b/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_cls.yaml deleted file mode 100644 index 9407ab48916..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_cls.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.1: - - cls/test_cls_rbd.sh - diff --git a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_import_export.yaml b/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_import_export.yaml deleted file mode 100644 index 185cd1ab32a..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rbd_import_export.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.1: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_s3tests.yaml b/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_s3tests.yaml deleted file mode 100644 index 22c3a3f821a..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_s3tests.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- rgw: [client.1] -- s3tests: - client.1: - rgw_server: client.1 diff --git a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_swift.yaml b/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_swift.yaml deleted file mode 100644 index 0ab9febd2fc..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/6-final-workload/rgw_swift.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -# Uncomment the next line if you have not already included rgw_s3tests.yaml in your test. -# - rgw: [client.1] -- swift: - client.1: - rgw_server: client.1 diff --git a/suites/upgrade/dumpling-emperor-x/parallel/distros b/suites/upgrade/dumpling-emperor-x/parallel/distros deleted file mode 120000 index 79010c36a59..00000000000 --- a/suites/upgrade/dumpling-emperor-x/parallel/distros +++ /dev/null @@ -1 +0,0 @@ -../../../../distros/supported \ No newline at end of file diff --git a/suites/upgrade/dumpling-x/parallel/% b/suites/upgrade/dumpling-x/parallel/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-x/parallel/0-cluster/start.yaml b/suites/upgrade/dumpling-x/parallel/0-cluster/start.yaml deleted file mode 100644 index 4a9420f3906..00000000000 --- a/suites/upgrade/dumpling-x/parallel/0-cluster/start.yaml +++ /dev/null @@ -1,19 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 - - client.1 -overrides: - ceph: - conf: - mon: - mon warn on legacy crush tunables: false - log-whitelist: - - scrub mismatch - - ScrubResult diff --git a/suites/upgrade/dumpling-x/parallel/1-dumpling-install/dumpling.yaml b/suites/upgrade/dumpling-x/parallel/1-dumpling-install/dumpling.yaml deleted file mode 100644 index adbdedee518..00000000000 --- a/suites/upgrade/dumpling-x/parallel/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: dumpling -- print: "**** done install" -- ceph: - fs: xfs -- print: "**** done ceph" -- parallel: - - workload - - upgrade-sequence -- print: "**** done parallel" diff --git a/suites/upgrade/dumpling-x/parallel/2-workload/+ b/suites/upgrade/dumpling-x/parallel/2-workload/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-x/parallel/2-workload/rados_api.yaml b/suites/upgrade/dumpling-x/parallel/2-workload/rados_api.yaml deleted file mode 100644 index cd820a8a711..00000000000 --- a/suites/upgrade/dumpling-x/parallel/2-workload/rados_api.yaml +++ /dev/null @@ -1,9 +0,0 @@ -workload: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rados/test-upgrade-firefly.sh - - cls - diff --git a/suites/upgrade/dumpling-x/parallel/2-workload/rados_loadgenbig.yaml b/suites/upgrade/dumpling-x/parallel/2-workload/rados_loadgenbig.yaml deleted file mode 100644 index cc1ef874cb0..00000000000 --- a/suites/upgrade/dumpling-x/parallel/2-workload/rados_loadgenbig.yaml +++ /dev/null @@ -1,7 +0,0 @@ -workload: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rados/load-gen-big.sh diff --git a/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_api.yaml b/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_api.yaml deleted file mode 100644 index 36ffa27ec3f..00000000000 --- a/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_api.yaml +++ /dev/null @@ -1,7 +0,0 @@ -workload: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd.sh diff --git a/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_python.yaml b/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_python.yaml deleted file mode 100644 index e704a9794b9..00000000000 --- a/suites/upgrade/dumpling-x/parallel/2-workload/test_rbd_python.yaml +++ /dev/null @@ -1,7 +0,0 @@ -workload: - sequential: - - workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd_python.sh diff --git a/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-all.yaml deleted file mode 100644 index f5d10cdfcab..00000000000 --- a/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-all.yaml +++ /dev/null @@ -1,6 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - mon.a: - mon.b: - - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index fcb61b1cef2..00000000000 --- a/suites/upgrade/dumpling-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - mon.a: - mon.b: - - ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.b] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 60 - - ceph.restart: [osd.1] - - sleep: - duration: 60 - - ceph.restart: [osd.2] - - sleep: - duration: 60 - - ceph.restart: [osd.3] - - sleep: - duration: 60 - - ceph.restart: [mds.a] diff --git a/suites/upgrade/dumpling-x/parallel/4-final-upgrade/client.yaml b/suites/upgrade/dumpling-x/parallel/4-final-upgrade/client.yaml deleted file mode 100644 index cf35d41e6c5..00000000000 --- a/suites/upgrade/dumpling-x/parallel/4-final-upgrade/client.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: - - install.upgrade: - client.0: - - print: "**** done install.upgrade" diff --git a/suites/upgrade/dumpling-x/parallel/5-final-workload/+ b/suites/upgrade/dumpling-x/parallel/5-final-workload/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-x/parallel/5-final-workload/rados-snaps-few-objects.yaml b/suites/upgrade/dumpling-x/parallel/5-final-workload/rados-snaps-few-objects.yaml deleted file mode 100644 index 40f66da37f2..00000000000 --- a/suites/upgrade/dumpling-x/parallel/5-final-workload/rados-snaps-few-objects.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: - - rados: - clients: [client.1] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_loadgenmix.yaml b/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_loadgenmix.yaml deleted file mode 100644 index faa96ed24d5..00000000000 --- a/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_loadgenmix.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: - - workunit: - clients: - client.1: - - rados/load-gen-mix.sh diff --git a/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_mon_thrash.yaml b/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_mon_thrash.yaml deleted file mode 100644 index 88019bef17a..00000000000 --- a/suites/upgrade/dumpling-x/parallel/5-final-workload/rados_mon_thrash.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: - - mon_thrash: - revive_delay: 20 - thrash_delay: 1 - - workunit: - clients: - client.1: - - rados/test.sh diff --git a/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_cls.yaml b/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_cls.yaml deleted file mode 100644 index 4ef47768237..00000000000 --- a/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_cls.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: - - workunit: - clients: - client.1: - - cls/test_cls_rbd.sh - diff --git a/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_import_export.yaml b/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_import_export.yaml deleted file mode 100644 index 6c40377324d..00000000000 --- a/suites/upgrade/dumpling-x/parallel/5-final-workload/rbd_import_export.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: - - workunit: - clients: - client.1: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_s3tests.yaml b/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_s3tests.yaml deleted file mode 100644 index 53ceb786ba0..00000000000 --- a/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_s3tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: - - rgw: [client.1] - - s3tests: - client.1: - rgw_server: client.1 - branch: dumpling diff --git a/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_swift.yaml b/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_swift.yaml deleted file mode 100644 index 44085b469d2..00000000000 --- a/suites/upgrade/dumpling-x/parallel/5-final-workload/rgw_swift.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -# no need for rwg when we use + -# - rgw: [client.1] - - swift: - client.1: - rgw_server: client.1 diff --git a/suites/upgrade/dumpling-x/parallel/distros b/suites/upgrade/dumpling-x/parallel/distros deleted file mode 120000 index 79010c36a59..00000000000 --- a/suites/upgrade/dumpling-x/parallel/distros +++ /dev/null @@ -1 +0,0 @@ -../../../../distros/supported \ No newline at end of file diff --git a/suites/upgrade/dumpling-x/stress-split/% b/suites/upgrade/dumpling-x/stress-split/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-x/stress-split/0-cluster/start.yaml b/suites/upgrade/dumpling-x/stress-split/0-cluster/start.yaml deleted file mode 100644 index d8f49e35396..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - conf: - mon: - mon warn on legacy crush tunables: false -roles: -- - mon.a - - mon.b - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - osd.3 - - osd.4 - - osd.5 - - mon.c -- - client.0 diff --git a/suites/upgrade/dumpling-x/stress-split/1-dumpling-install/dumpling.yaml b/suites/upgrade/dumpling-x/stress-split/1-dumpling-install/dumpling.yaml deleted file mode 100644 index c98631e2bbd..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - fs: xfs diff --git a/suites/upgrade/dumpling-x/stress-split/2-partial-upgrade/firsthalf.yaml b/suites/upgrade/dumpling-x/stress-split/2-partial-upgrade/firsthalf.yaml deleted file mode 100644 index 312df6e21c6..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/2-partial-upgrade/firsthalf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install.upgrade: - osd.0: -- ceph.restart: - daemons: [osd.0, osd.1, osd.2] diff --git a/suites/upgrade/dumpling-x/stress-split/3-thrash/default.yaml b/suites/upgrade/dumpling-x/stress-split/3-thrash/default.yaml deleted file mode 100644 index a85510eb6fa..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/3-thrash/default.yaml +++ /dev/null @@ -1,12 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 - thrash_primary_affinity: false diff --git a/suites/upgrade/dumpling-x/stress-split/4-mon/mona.yaml b/suites/upgrade/dumpling-x/stress-split/4-mon/mona.yaml deleted file mode 100644 index b6ffb3323d1..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/4-mon/mona.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true diff --git a/suites/upgrade/dumpling-x/stress-split/5-workload/+ b/suites/upgrade/dumpling-x/stress-split/5-workload/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-x/stress-split/5-workload/rados_api_tests.yaml b/suites/upgrade/dumpling-x/stress-split/5-workload/rados_api_tests.yaml deleted file mode 100644 index 7b2c72cbb2e..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/5-workload/rados_api_tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rados/test-upgrade-firefly.sh diff --git a/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-cls.yaml b/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-cls.yaml deleted file mode 100644 index db3dff7fc5c..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-cls.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - cls/test_cls_rbd.sh diff --git a/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-import-export.yaml b/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-import-export.yaml deleted file mode 100644 index a5a964ce13b..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/5-workload/rbd-import-export.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/upgrade/dumpling-x/stress-split/5-workload/readwrite.yaml b/suites/upgrade/dumpling-x/stress-split/5-workload/readwrite.yaml deleted file mode 100644 index c53e52b0872..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/5-workload/readwrite.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 500 - op_weights: - read: 45 - write: 45 - delete: 10 diff --git a/suites/upgrade/dumpling-x/stress-split/5-workload/snaps-few-objects.yaml b/suites/upgrade/dumpling-x/stress-split/5-workload/snaps-few-objects.yaml deleted file mode 100644 index c54039766c0..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/5-workload/snaps-few-objects.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/upgrade/dumpling-x/stress-split/6-next-mon/monb.yaml b/suites/upgrade/dumpling-x/stress-split/6-next-mon/monb.yaml deleted file mode 100644 index 513890c41c0..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/6-next-mon/monb.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- ceph.restart: - daemons: [mon.b] - wait-for-healthy: false - wait-for-osds-up: true diff --git a/suites/upgrade/dumpling-x/stress-split/7-workload/+ b/suites/upgrade/dumpling-x/stress-split/7-workload/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-x/stress-split/7-workload/rados_api_tests.yaml b/suites/upgrade/dumpling-x/stress-split/7-workload/rados_api_tests.yaml deleted file mode 100644 index 7b2c72cbb2e..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/7-workload/rados_api_tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rados/test-upgrade-firefly.sh diff --git a/suites/upgrade/dumpling-x/stress-split/7-workload/radosbench.yaml b/suites/upgrade/dumpling-x/stress-split/7-workload/radosbench.yaml deleted file mode 100644 index 3940870fce0..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/7-workload/radosbench.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- radosbench: - clients: [client.0] - time: 1800 diff --git a/suites/upgrade/dumpling-x/stress-split/7-workload/rbd_api.yaml b/suites/upgrade/dumpling-x/stress-split/7-workload/rbd_api.yaml deleted file mode 100644 index bbcde3e1559..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/7-workload/rbd_api.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd.sh diff --git a/suites/upgrade/dumpling-x/stress-split/8-next-mon/monc.yaml b/suites/upgrade/dumpling-x/stress-split/8-next-mon/monc.yaml deleted file mode 100644 index 73f22bd5f7c..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/8-next-mon/monc.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install.upgrade: - mon.c: -- ceph.restart: - daemons: [mon.c] - wait-for-healthy: false - wait-for-osds-up: true -- ceph.wait_for_mon_quorum: [a, b, c] diff --git a/suites/upgrade/dumpling-x/stress-split/9-workload/+ b/suites/upgrade/dumpling-x/stress-split/9-workload/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling-x/stress-split/9-workload/rados_api_tests.yaml b/suites/upgrade/dumpling-x/stress-split/9-workload/rados_api_tests.yaml deleted file mode 100644 index 7b2c72cbb2e..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/9-workload/rados_api_tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rados/test-upgrade-firefly.sh diff --git a/suites/upgrade/dumpling-x/stress-split/9-workload/rbd-python.yaml b/suites/upgrade/dumpling-x/stress-split/9-workload/rbd-python.yaml deleted file mode 100644 index 1c5e53906f8..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/9-workload/rbd-python.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd_python.sh diff --git a/suites/upgrade/dumpling-x/stress-split/9-workload/rgw-s3tests.yaml b/suites/upgrade/dumpling-x/stress-split/9-workload/rgw-s3tests.yaml deleted file mode 100644 index e44546dbcaa..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/9-workload/rgw-s3tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- rgw: - default_idle_timeout: 300 - client.0: -- swift: - client.0: - rgw_server: client.0 - diff --git a/suites/upgrade/dumpling-x/stress-split/9-workload/snaps-many-objects.yaml b/suites/upgrade/dumpling-x/stress-split/9-workload/snaps-many-objects.yaml deleted file mode 100644 index 9e311c946e1..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/9-workload/snaps-many-objects.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 500 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/upgrade/dumpling-x/stress-split/distros b/suites/upgrade/dumpling-x/stress-split/distros deleted file mode 120000 index 79010c36a59..00000000000 --- a/suites/upgrade/dumpling-x/stress-split/distros +++ /dev/null @@ -1 +0,0 @@ -../../../../distros/supported \ No newline at end of file diff --git a/suites/upgrade/dumpling/fs/% b/suites/upgrade/dumpling/fs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling/fs/0-cluster/start.yaml b/suites/upgrade/dumpling/fs/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/dumpling/fs/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/cuttlefish.v0.67.1.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/cuttlefish.v0.67.1.yaml deleted file mode 100644 index 032340ba25f..00000000000 --- a/suites/upgrade/dumpling/fs/1-dumpling-install/cuttlefish.v0.67.1.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: cuttlefish -- ceph: -- install.upgrade: - all: - tag: v0.67.1 -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.1.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.1.yaml deleted file mode 100644 index a5bf1fa9073..00000000000 --- a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.1.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.1 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.2.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.2.yaml deleted file mode 100644 index d39967fe408..00000000000 --- a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.2.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.2 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.3.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.3.yaml deleted file mode 100644 index d0c1861193d..00000000000 --- a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.3.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.3 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.4.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.4.yaml deleted file mode 100644 index 4e7d7c5fba1..00000000000 --- a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.4.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.4 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.5.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.5.yaml deleted file mode 100644 index 611b6d6b822..00000000000 --- a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.5.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.5 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.7.yaml b/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.7.yaml deleted file mode 100644 index 7cb8fcc22ac..00000000000 --- a/suites/upgrade/dumpling/fs/1-dumpling-install/v0.67.7.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.7 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/fs/2-workload/blogbench.yaml b/suites/upgrade/dumpling/fs/2-workload/blogbench.yaml deleted file mode 100644 index 0cd59eaafde..00000000000 --- a/suites/upgrade/dumpling/fs/2-workload/blogbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -workload: - workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index 38bba91895d..00000000000 --- a/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 5b617fdfd5a..00000000000 --- a/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index 91c146a2070..00000000000 --- a/suites/upgrade/dumpling/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] diff --git a/suites/upgrade/dumpling/fs/4-final/monthrash.yaml b/suites/upgrade/dumpling/fs/4-final/monthrash.yaml deleted file mode 100644 index 13af446eb3b..00000000000 --- a/suites/upgrade/dumpling/fs/4-final/monthrash.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- ceph-fuse: -- workunit: - clients: - client.0: - - suites/dbench.sh - diff --git a/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml b/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml deleted file mode 100644 index dbd7191e36c..00000000000 --- a/suites/upgrade/dumpling/fs/4-final/osdthrash.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- ceph-fuse: -- workunit: - clients: - all: - - suites/iogen.sh - diff --git a/suites/upgrade/dumpling/rados/% b/suites/upgrade/dumpling/rados/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling/rados/0-cluster/start.yaml b/suites/upgrade/dumpling/rados/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/dumpling/rados/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/cuttlefish.v0.67.1.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/cuttlefish.v0.67.1.yaml deleted file mode 100644 index 032340ba25f..00000000000 --- a/suites/upgrade/dumpling/rados/1-dumpling-install/cuttlefish.v0.67.1.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: cuttlefish -- ceph: -- install.upgrade: - all: - tag: v0.67.1 -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.1.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.1.yaml deleted file mode 100644 index a5bf1fa9073..00000000000 --- a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.1.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.1 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.2.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.2.yaml deleted file mode 100644 index d39967fe408..00000000000 --- a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.2.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.2 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.3.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.3.yaml deleted file mode 100644 index d0c1861193d..00000000000 --- a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.3.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.3 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.4.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.4.yaml deleted file mode 100644 index 4e7d7c5fba1..00000000000 --- a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.4.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.4 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.5.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.5.yaml deleted file mode 100644 index 611b6d6b822..00000000000 --- a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.5.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.5 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.7.yaml b/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.7.yaml deleted file mode 100644 index 7cb8fcc22ac..00000000000 --- a/suites/upgrade/dumpling/rados/1-dumpling-install/v0.67.7.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.7 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rados/2-workload/testrados.yaml b/suites/upgrade/dumpling/rados/2-workload/testrados.yaml deleted file mode 100644 index 8eaab19fd9e..00000000000 --- a/suites/upgrade/dumpling/rados/2-workload/testrados.yaml +++ /dev/null @@ -1,13 +0,0 @@ -workload: - rados: - clients: [client.0] - ops: 2000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - diff --git a/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index 38bba91895d..00000000000 --- a/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 5b617fdfd5a..00000000000 --- a/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index 801bab9f1f0..00000000000 --- a/suites/upgrade/dumpling/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,35 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 diff --git a/suites/upgrade/dumpling/rados/4-final/monthrash.yaml b/suites/upgrade/dumpling/rados/4-final/monthrash.yaml deleted file mode 100644 index 810ba1b30e2..00000000000 --- a/suites/upgrade/dumpling/rados/4-final/monthrash.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- workunit: - clients: - client.0: - - rados/test.sh - diff --git a/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml b/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml deleted file mode 100644 index f81504233ad..00000000000 --- a/suites/upgrade/dumpling/rados/4-final/osdthrash.yaml +++ /dev/null @@ -1,23 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- rados: - clients: [client.0] - ops: 2000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - diff --git a/suites/upgrade/dumpling/rbd/% b/suites/upgrade/dumpling/rbd/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling/rbd/0-cluster/start.yaml b/suites/upgrade/dumpling/rbd/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/dumpling/rbd/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/cuttlefish.v0.67.1.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/cuttlefish.v0.67.1.yaml deleted file mode 100644 index 032340ba25f..00000000000 --- a/suites/upgrade/dumpling/rbd/1-dumpling-install/cuttlefish.v0.67.1.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: cuttlefish -- ceph: -- install.upgrade: - all: - tag: v0.67.1 -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.1.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.1.yaml deleted file mode 100644 index a5bf1fa9073..00000000000 --- a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.1.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.1 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.2.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.2.yaml deleted file mode 100644 index d39967fe408..00000000000 --- a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.2.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.2 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.3.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.3.yaml deleted file mode 100644 index d0c1861193d..00000000000 --- a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.3.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.3 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.4.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.4.yaml deleted file mode 100644 index 4e7d7c5fba1..00000000000 --- a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.4.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.4 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.5.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.5.yaml deleted file mode 100644 index 611b6d6b822..00000000000 --- a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.5.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.5 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.7.yaml b/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.7.yaml deleted file mode 100644 index 7cb8fcc22ac..00000000000 --- a/suites/upgrade/dumpling/rbd/1-dumpling-install/v0.67.7.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.7 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rbd/2-workload/rbd.yaml b/suites/upgrade/dumpling/rbd/2-workload/rbd.yaml deleted file mode 100644 index ce2fabe0359..00000000000 --- a/suites/upgrade/dumpling/rbd/2-workload/rbd.yaml +++ /dev/null @@ -1,14 +0,0 @@ -workload: - sequential: - - workunit: - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format - - workunit: - clients: - client.0: - - cls/test_cls_rbd.sh - - diff --git a/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index 38bba91895d..00000000000 --- a/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 5b617fdfd5a..00000000000 --- a/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index 91c146a2070..00000000000 --- a/suites/upgrade/dumpling/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] diff --git a/suites/upgrade/dumpling/rbd/4-final/monthrash.yaml b/suites/upgrade/dumpling/rbd/4-final/monthrash.yaml deleted file mode 100644 index 593191c24f5..00000000000 --- a/suites/upgrade/dumpling/rbd/4-final/monthrash.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- workunit: - clients: - client.0: - - rbd/copy.sh - env: - RBD_CREATE_ARGS: --new-format - diff --git a/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml b/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml deleted file mode 100644 index 575fd7922ad..00000000000 --- a/suites/upgrade/dumpling/rbd/4-final/osdthrash.yaml +++ /dev/null @@ -1,16 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- workunit: - clients: - client.0: - - rbd/test_lock_fence.sh - diff --git a/suites/upgrade/dumpling/rgw/% b/suites/upgrade/dumpling/rgw/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/dumpling/rgw/0-cluster/start.yaml b/suites/upgrade/dumpling/rgw/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/dumpling/rgw/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/cuttlefish.v0.67.1.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/cuttlefish.v0.67.1.yaml deleted file mode 100644 index 032340ba25f..00000000000 --- a/suites/upgrade/dumpling/rgw/1-dumpling-install/cuttlefish.v0.67.1.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: cuttlefish -- ceph: -- install.upgrade: - all: - tag: v0.67.1 -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.1.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.1.yaml deleted file mode 100644 index a5bf1fa9073..00000000000 --- a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.1.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.1 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.2.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.2.yaml deleted file mode 100644 index d39967fe408..00000000000 --- a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.2.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.2 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.3.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.3.yaml deleted file mode 100644 index d0c1861193d..00000000000 --- a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.3.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.3 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.4.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.4.yaml deleted file mode 100644 index 4e7d7c5fba1..00000000000 --- a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.4.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.4 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.5.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.5.yaml deleted file mode 100644 index 611b6d6b822..00000000000 --- a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.5.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.5 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.7.yaml b/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.7.yaml deleted file mode 100644 index 7cb8fcc22ac..00000000000 --- a/suites/upgrade/dumpling/rgw/1-dumpling-install/v0.67.7.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.67.7 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/dumpling/rgw/2-workload/testrgw.yaml b/suites/upgrade/dumpling/rgw/2-workload/testrgw.yaml deleted file mode 100644 index f1b2f3e88ef..00000000000 --- a/suites/upgrade/dumpling/rgw/2-workload/testrgw.yaml +++ /dev/null @@ -1,6 +0,0 @@ -workload: - rgw: [client.0] - s3tests: - client.0: - rgw_server: client.0 - diff --git a/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index ff9129046e1..00000000000 --- a/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,36 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 30 - - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 75face28d88..00000000000 --- a/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,36 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 30 - - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index a08c669bf9e..00000000000 --- a/suites/upgrade/dumpling/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,36 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: dumpling - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/dumpling/rgw/4-final/monthrash.yaml b/suites/upgrade/dumpling/rgw/4-final/monthrash.yaml deleted file mode 100644 index 9361edc8015..00000000000 --- a/suites/upgrade/dumpling/rgw/4-final/monthrash.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- swift: - client.0: - rgw_server: client.0 - diff --git a/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml b/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml deleted file mode 100644 index 6cf6d861d5f..00000000000 --- a/suites/upgrade/dumpling/rgw/4-final/osdthrash.yaml +++ /dev/null @@ -1,15 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- swift: - client.0: - rgw_server: client.0 - diff --git a/suites/upgrade/emperor/fs/% b/suites/upgrade/emperor/fs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/emperor/fs/0-cluster/start.yaml b/suites/upgrade/emperor/fs/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/emperor/fs/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/emperor/fs/1-emperor-install/dumpling.v0.67.5.yaml b/suites/upgrade/emperor/fs/1-emperor-install/dumpling.v0.67.5.yaml deleted file mode 100644 index a2891c427c2..00000000000 --- a/suites/upgrade/emperor/fs/1-emperor-install/dumpling.v0.67.5.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: -- install.upgrade: - all: - tag: -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/fs/1-emperor-install/v0.73.yaml b/suites/upgrade/emperor/fs/1-emperor-install/v0.73.yaml deleted file mode 100644 index 7750040f138..00000000000 --- a/suites/upgrade/emperor/fs/1-emperor-install/v0.73.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.73 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/fs/1-emperor-install/v0.74.yaml b/suites/upgrade/emperor/fs/1-emperor-install/v0.74.yaml deleted file mode 100644 index 9d0ded4f56f..00000000000 --- a/suites/upgrade/emperor/fs/1-emperor-install/v0.74.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.74 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/fs/1-emperor-install/v0.75.yaml b/suites/upgrade/emperor/fs/1-emperor-install/v0.75.yaml deleted file mode 100644 index 368f0ec479e..00000000000 --- a/suites/upgrade/emperor/fs/1-emperor-install/v0.75.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.75 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/fs/2-workload/blogbench.yaml b/suites/upgrade/emperor/fs/2-workload/blogbench.yaml deleted file mode 100644 index 0cd59eaafde..00000000000 --- a/suites/upgrade/emperor/fs/2-workload/blogbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -workload: - workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index 520dc4b30cf..00000000000 --- a/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index e11f8c3b13f..00000000000 --- a/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index 78cb33be1ec..00000000000 --- a/suites/upgrade/emperor/fs/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] diff --git a/suites/upgrade/emperor/fs/4-final/monthrash.yaml b/suites/upgrade/emperor/fs/4-final/monthrash.yaml deleted file mode 100644 index 13af446eb3b..00000000000 --- a/suites/upgrade/emperor/fs/4-final/monthrash.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- ceph-fuse: -- workunit: - clients: - client.0: - - suites/dbench.sh - diff --git a/suites/upgrade/emperor/fs/4-final/osdthrash.yaml b/suites/upgrade/emperor/fs/4-final/osdthrash.yaml deleted file mode 100644 index dbd7191e36c..00000000000 --- a/suites/upgrade/emperor/fs/4-final/osdthrash.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- ceph-fuse: -- workunit: - clients: - all: - - suites/iogen.sh - diff --git a/suites/upgrade/emperor/rados/% b/suites/upgrade/emperor/rados/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/emperor/rados/0-cluster/start.yaml b/suites/upgrade/emperor/rados/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/emperor/rados/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/emperor/rados/1-emperor-install/dumpling.v0.67.5.yaml b/suites/upgrade/emperor/rados/1-emperor-install/dumpling.v0.67.5.yaml deleted file mode 100644 index a2891c427c2..00000000000 --- a/suites/upgrade/emperor/rados/1-emperor-install/dumpling.v0.67.5.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: -- install.upgrade: - all: - tag: -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rados/1-emperor-install/v0.73.yaml b/suites/upgrade/emperor/rados/1-emperor-install/v0.73.yaml deleted file mode 100644 index 7750040f138..00000000000 --- a/suites/upgrade/emperor/rados/1-emperor-install/v0.73.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.73 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rados/1-emperor-install/v0.74.yaml b/suites/upgrade/emperor/rados/1-emperor-install/v0.74.yaml deleted file mode 100644 index 9d0ded4f56f..00000000000 --- a/suites/upgrade/emperor/rados/1-emperor-install/v0.74.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.74 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rados/1-emperor-install/v0.75.yaml b/suites/upgrade/emperor/rados/1-emperor-install/v0.75.yaml deleted file mode 100644 index 368f0ec479e..00000000000 --- a/suites/upgrade/emperor/rados/1-emperor-install/v0.75.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.75 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rados/2-workload/testrados.yaml b/suites/upgrade/emperor/rados/2-workload/testrados.yaml deleted file mode 100644 index 8eaab19fd9e..00000000000 --- a/suites/upgrade/emperor/rados/2-workload/testrados.yaml +++ /dev/null @@ -1,13 +0,0 @@ -workload: - rados: - clients: [client.0] - ops: 2000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - diff --git a/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index 520dc4b30cf..00000000000 --- a/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index e11f8c3b13f..00000000000 --- a/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index f0fa4b886c5..00000000000 --- a/suites/upgrade/emperor/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,35 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 diff --git a/suites/upgrade/emperor/rados/4-final/monthrash.yaml b/suites/upgrade/emperor/rados/4-final/monthrash.yaml deleted file mode 100644 index 810ba1b30e2..00000000000 --- a/suites/upgrade/emperor/rados/4-final/monthrash.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- workunit: - clients: - client.0: - - rados/test.sh - diff --git a/suites/upgrade/emperor/rados/4-final/osdthrash.yaml b/suites/upgrade/emperor/rados/4-final/osdthrash.yaml deleted file mode 100644 index f81504233ad..00000000000 --- a/suites/upgrade/emperor/rados/4-final/osdthrash.yaml +++ /dev/null @@ -1,23 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- rados: - clients: [client.0] - ops: 2000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - diff --git a/suites/upgrade/emperor/rbd/% b/suites/upgrade/emperor/rbd/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/emperor/rbd/0-cluster/start.yaml b/suites/upgrade/emperor/rbd/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/emperor/rbd/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/emperor/rbd/1-emperor-install/dumpling.v0.67.5.yaml b/suites/upgrade/emperor/rbd/1-emperor-install/dumpling.v0.67.5.yaml deleted file mode 100644 index a2891c427c2..00000000000 --- a/suites/upgrade/emperor/rbd/1-emperor-install/dumpling.v0.67.5.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: -- install.upgrade: - all: - tag: -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rbd/1-emperor-install/v0.73.yaml b/suites/upgrade/emperor/rbd/1-emperor-install/v0.73.yaml deleted file mode 100644 index 7750040f138..00000000000 --- a/suites/upgrade/emperor/rbd/1-emperor-install/v0.73.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.73 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rbd/1-emperor-install/v0.74.yaml b/suites/upgrade/emperor/rbd/1-emperor-install/v0.74.yaml deleted file mode 100644 index 9d0ded4f56f..00000000000 --- a/suites/upgrade/emperor/rbd/1-emperor-install/v0.74.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.74 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rbd/1-emperor-install/v0.75.yaml b/suites/upgrade/emperor/rbd/1-emperor-install/v0.75.yaml deleted file mode 100644 index 368f0ec479e..00000000000 --- a/suites/upgrade/emperor/rbd/1-emperor-install/v0.75.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.75 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rbd/2-workload/rbd.yaml b/suites/upgrade/emperor/rbd/2-workload/rbd.yaml deleted file mode 100644 index ce2fabe0359..00000000000 --- a/suites/upgrade/emperor/rbd/2-workload/rbd.yaml +++ /dev/null @@ -1,14 +0,0 @@ -workload: - sequential: - - workunit: - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format - - workunit: - clients: - client.0: - - cls/test_cls_rbd.sh - - diff --git a/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index 520dc4b30cf..00000000000 --- a/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index e11f8c3b13f..00000000000 --- a/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] diff --git a/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index 78cb33be1ec..00000000000 --- a/suites/upgrade/emperor/rbd/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] diff --git a/suites/upgrade/emperor/rbd/4-final/monthrash.yaml b/suites/upgrade/emperor/rbd/4-final/monthrash.yaml deleted file mode 100644 index 593191c24f5..00000000000 --- a/suites/upgrade/emperor/rbd/4-final/monthrash.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- workunit: - clients: - client.0: - - rbd/copy.sh - env: - RBD_CREATE_ARGS: --new-format - diff --git a/suites/upgrade/emperor/rbd/4-final/osdthrash.yaml b/suites/upgrade/emperor/rbd/4-final/osdthrash.yaml deleted file mode 100644 index 575fd7922ad..00000000000 --- a/suites/upgrade/emperor/rbd/4-final/osdthrash.yaml +++ /dev/null @@ -1,16 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- workunit: - clients: - client.0: - - rbd/test_lock_fence.sh - diff --git a/suites/upgrade/emperor/rgw/% b/suites/upgrade/emperor/rgw/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/emperor/rgw/0-cluster/start.yaml b/suites/upgrade/emperor/rgw/0-cluster/start.yaml deleted file mode 100644 index c1acc4e8ad6..00000000000 --- a/suites/upgrade/emperor/rgw/0-cluster/start.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - fs: xfs -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 diff --git a/suites/upgrade/emperor/rgw/1-emperor-install/dumpling.v0.67.5.yaml b/suites/upgrade/emperor/rgw/1-emperor-install/dumpling.v0.67.5.yaml deleted file mode 100644 index a2891c427c2..00000000000 --- a/suites/upgrade/emperor/rgw/1-emperor-install/dumpling.v0.67.5.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: -- install.upgrade: - all: - tag: -- ceph.restart: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rgw/1-emperor-install/v0.73.yaml b/suites/upgrade/emperor/rgw/1-emperor-install/v0.73.yaml deleted file mode 100644 index 7750040f138..00000000000 --- a/suites/upgrade/emperor/rgw/1-emperor-install/v0.73.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.73 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rgw/1-emperor-install/v0.74.yaml b/suites/upgrade/emperor/rgw/1-emperor-install/v0.74.yaml deleted file mode 100644 index 9d0ded4f56f..00000000000 --- a/suites/upgrade/emperor/rgw/1-emperor-install/v0.74.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.74 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rgw/1-emperor-install/v0.75.yaml b/suites/upgrade/emperor/rgw/1-emperor-install/v0.75.yaml deleted file mode 100644 index 368f0ec479e..00000000000 --- a/suites/upgrade/emperor/rgw/1-emperor-install/v0.75.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.75 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/emperor/rgw/2-workload/testrgw.yaml b/suites/upgrade/emperor/rgw/2-workload/testrgw.yaml deleted file mode 100644 index f1b2f3e88ef..00000000000 --- a/suites/upgrade/emperor/rgw/2-workload/testrgw.yaml +++ /dev/null @@ -1,6 +0,0 @@ -workload: - rgw: [client.0] - s3tests: - client.0: - rgw_server: client.0 - diff --git a/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index 0ae6f928096..00000000000 --- a/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,36 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 30 - - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 3f0aad2ec86..00000000000 --- a/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,36 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 30 - - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index 622b02d4271..00000000000 --- a/suites/upgrade/emperor/rgw/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,36 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [rgw.client.0] diff --git a/suites/upgrade/emperor/rgw/4-final/monthrash.yaml b/suites/upgrade/emperor/rgw/4-final/monthrash.yaml deleted file mode 100644 index 9361edc8015..00000000000 --- a/suites/upgrade/emperor/rgw/4-final/monthrash.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- mon_thrash: - revive_delay: 20 - thrash_delay: 1 -- swift: - client.0: - rgw_server: client.0 - diff --git a/suites/upgrade/emperor/rgw/4-final/osdthrash.yaml b/suites/upgrade/emperor/rgw/4-final/osdthrash.yaml deleted file mode 100644 index 6cf6d861d5f..00000000000 --- a/suites/upgrade/emperor/rgw/4-final/osdthrash.yaml +++ /dev/null @@ -1,15 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 -- swift: - client.0: - rgw_server: client.0 - diff --git a/suites/upgrade/firefly/newer/% b/suites/upgrade/firefly/newer/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/firefly/newer/0-cluster/start.yaml b/suites/upgrade/firefly/newer/0-cluster/start.yaml deleted file mode 100644 index 1e7ad743675..00000000000 --- a/suites/upgrade/firefly/newer/0-cluster/start.yaml +++ /dev/null @@ -1,24 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - - scrub mismatch - - ScrubResult - - osd_map_max_advance - fs: xfs - conf: - osd: - osd map max advance: 1000 -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 -- - client.0 - - client.1 diff --git a/suites/upgrade/firefly/newer/1-install/v0.80.10.yaml b/suites/upgrade/firefly/newer/1-install/v0.80.10.yaml deleted file mode 100644 index 40e3883773d..00000000000 --- a/suites/upgrade/firefly/newer/1-install/v0.80.10.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.80.10 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/newer/1-install/v0.80.4.yaml b/suites/upgrade/firefly/newer/1-install/v0.80.4.yaml deleted file mode 100644 index 371fc35502f..00000000000 --- a/suites/upgrade/firefly/newer/1-install/v0.80.4.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.80.4 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/newer/1-install/v0.80.5.yaml b/suites/upgrade/firefly/newer/1-install/v0.80.5.yaml deleted file mode 100644 index 90f7dfd9860..00000000000 --- a/suites/upgrade/firefly/newer/1-install/v0.80.5.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.80.5 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/newer/1-install/v0.80.6.yaml b/suites/upgrade/firefly/newer/1-install/v0.80.6.yaml deleted file mode 100644 index c6502a03dab..00000000000 --- a/suites/upgrade/firefly/newer/1-install/v0.80.6.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.80.6 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/newer/1-install/v0.80.8.yaml b/suites/upgrade/firefly/newer/1-install/v0.80.8.yaml deleted file mode 100644 index 1582dcf4479..00000000000 --- a/suites/upgrade/firefly/newer/1-install/v0.80.8.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.80.8 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/newer/1-install/v0.80.9.yaml b/suites/upgrade/firefly/newer/1-install/v0.80.9.yaml deleted file mode 100644 index 374b7e0f436..00000000000 --- a/suites/upgrade/firefly/newer/1-install/v0.80.9.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install: - tag: v0.80.9 -- ceph: -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/newer/2-workload/+ b/suites/upgrade/firefly/newer/2-workload/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/firefly/newer/2-workload/blogbench.yaml b/suites/upgrade/firefly/newer/2-workload/blogbench.yaml deleted file mode 100644 index 909f5bc1c75..00000000000 --- a/suites/upgrade/firefly/newer/2-workload/blogbench.yaml +++ /dev/null @@ -1,6 +0,0 @@ -workload: - sequential: - - workunit: - clients: - client.0: - - suites/blogbench.sh diff --git a/suites/upgrade/firefly/newer/2-workload/rbd.yaml b/suites/upgrade/firefly/newer/2-workload/rbd.yaml deleted file mode 100644 index 0d2f3ad72ef..00000000000 --- a/suites/upgrade/firefly/newer/2-workload/rbd.yaml +++ /dev/null @@ -1,12 +0,0 @@ -workload: - sequential: - - workunit: - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format - - workunit: - clients: - client.0: - - cls/test_cls_rbd.sh diff --git a/suites/upgrade/firefly/newer/2-workload/s3tests.yaml b/suites/upgrade/firefly/newer/2-workload/s3tests.yaml deleted file mode 100644 index 1de84f2de2e..00000000000 --- a/suites/upgrade/firefly/newer/2-workload/s3tests.yaml +++ /dev/null @@ -1,7 +0,0 @@ -workload: - sequential: - - rgw: [client.0] - - s3tests: - client.0: - force-branch: firefly-original - rgw_server: client.0 diff --git a/suites/upgrade/firefly/newer/2-workload/testrados.yaml b/suites/upgrade/firefly/newer/2-workload/testrados.yaml deleted file mode 100644 index 98f426b3737..00000000000 --- a/suites/upgrade/firefly/newer/2-workload/testrados.yaml +++ /dev/null @@ -1,13 +0,0 @@ -workload: - sequential: - - rados: - clients: [client.0] - ops: 2000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 32f2314f736..00000000000 --- a/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,35 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - mon.a: - branch: firefly - mon.b: - branch: firefly - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mds.a] diff --git a/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index 7a3dbe51e3f..00000000000 --- a/suites/upgrade/firefly/newer/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,35 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - mon.a: - branch: firefly - mon.b: - branch: firefly - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] diff --git a/suites/upgrade/firefly/newer/4-finish-upgrade.yaml b/suites/upgrade/firefly/newer/4-finish-upgrade.yaml deleted file mode 100644 index 3f55404ccdb..00000000000 --- a/suites/upgrade/firefly/newer/4-finish-upgrade.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install.upgrade: - client.0: diff --git a/suites/upgrade/firefly/newer/5-final/+ b/suites/upgrade/firefly/newer/5-final/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/firefly/newer/5-final/monthrash.yaml b/suites/upgrade/firefly/newer/5-final/monthrash.yaml deleted file mode 100644 index 8e321c47c41..00000000000 --- a/suites/upgrade/firefly/newer/5-final/monthrash.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- sequential: - - mon_thrash: - revive_delay: 20 - thrash_delay: 1 - - ceph-fuse: [client.0] - - workunit: - clients: - client.0: - - suites/dbench.sh - diff --git a/suites/upgrade/firefly/newer/5-final/osdthrash.yaml b/suites/upgrade/firefly/newer/5-final/osdthrash.yaml deleted file mode 100644 index 1efbf428039..00000000000 --- a/suites/upgrade/firefly/newer/5-final/osdthrash.yaml +++ /dev/null @@ -1,18 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- sequential: - - thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 - - ceph-fuse: [client.0] - - workunit: - clients: - client.0: - - suites/iogen.sh - diff --git a/suites/upgrade/firefly/newer/5-final/rbd.yaml b/suites/upgrade/firefly/newer/5-final/rbd.yaml deleted file mode 100644 index ee82941298d..00000000000 --- a/suites/upgrade/firefly/newer/5-final/rbd.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- sequential: - - workunit: - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format - - workunit: - clients: - client.0: - - cls/test_cls_rbd.sh diff --git a/suites/upgrade/firefly/newer/5-final/testrgw.yaml b/suites/upgrade/firefly/newer/5-final/testrgw.yaml deleted file mode 100644 index 9dfc14ea677..00000000000 --- a/suites/upgrade/firefly/newer/5-final/testrgw.yaml +++ /dev/null @@ -1,15 +0,0 @@ -tasks: -- sequential: - - rgw: [client.1] - - s3readwrite: - client.0: - rgw_server: client.1 - readwrite: - bucket: rwtest - readers: 10 - writers: 3 - duration: 300 - files: - num: 10 - size: 2000 - stddev: 500 diff --git a/suites/upgrade/firefly/newer/distros b/suites/upgrade/firefly/newer/distros deleted file mode 120000 index 79010c36a59..00000000000 --- a/suites/upgrade/firefly/newer/distros +++ /dev/null @@ -1 +0,0 @@ -../../../../distros/supported \ No newline at end of file diff --git a/suites/upgrade/firefly/older/% b/suites/upgrade/firefly/older/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/firefly/older/0-cluster/start.yaml b/suites/upgrade/firefly/older/0-cluster/start.yaml deleted file mode 100644 index 5dab05f107f..00000000000 --- a/suites/upgrade/firefly/older/0-cluster/start.yaml +++ /dev/null @@ -1,22 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - - osd_map_max_advance - fs: xfs - conf: - osd: - osd map max advance: 1000 -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 -- - client.1 - - client.0 diff --git a/suites/upgrade/firefly/older/1-install/dumpling.v0.80.1.yaml b/suites/upgrade/firefly/older/1-install/dumpling.v0.80.1.yaml deleted file mode 100644 index 518dfa8c500..00000000000 --- a/suites/upgrade/firefly/older/1-install/dumpling.v0.80.1.yaml +++ /dev/null @@ -1,20 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - conf: - mon: - mon warn on legacy crush tunables: false - log-whitelist: - - scrub mismatch - - ScrubResult -- install.upgrade: - all: - tag: v0.80.1 -- ceph.restart: -- exec: - client.0: - - ceph osd crush tunables firefly -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/older/1-install/emperor.v.80.1.yaml b/suites/upgrade/firefly/older/1-install/emperor.v.80.1.yaml deleted file mode 100644 index e41e50c1f65..00000000000 --- a/suites/upgrade/firefly/older/1-install/emperor.v.80.1.yaml +++ /dev/null @@ -1,20 +0,0 @@ -tasks: -- install: - branch: emperor -- ceph: - conf: - mon: - mon warn on legacy crush tunables: false - log-whitelist: - - scrub mismatch - - ScrubResult -- install.upgrade: - all: - tag: v0.80.1 -- ceph.restart: -- exec: - client.0: - - ceph osd crush tunables firefly -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/older/1-install/latest_dumpling_release.yaml b/suites/upgrade/firefly/older/1-install/latest_dumpling_release.yaml deleted file mode 100644 index 090ff7f1020..00000000000 --- a/suites/upgrade/firefly/older/1-install/latest_dumpling_release.yaml +++ /dev/null @@ -1,17 +0,0 @@ -overrides: - ceph: - conf: - mon: - mon warn on legacy crush tunables: false - thrashosds: - thrash_primary_affinity: false -tasks: -- install: - tag: v0.67.11 -- ceph: - log-whitelist: - - scrub mismatch - - ScrubResult -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/older/1-install/v0.80.1.yaml b/suites/upgrade/firefly/older/1-install/v0.80.1.yaml deleted file mode 100644 index 8c6d1fda3c4..00000000000 --- a/suites/upgrade/firefly/older/1-install/v0.80.1.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: - tag: v0.80.1 -- ceph: - log-whitelist: - - scrub mismatch - - ScrubResult -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/older/1-install/v0.80.2.yaml b/suites/upgrade/firefly/older/1-install/v0.80.2.yaml deleted file mode 100644 index d03e4f777c3..00000000000 --- a/suites/upgrade/firefly/older/1-install/v0.80.2.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: - tag: v0.80.2 -- ceph: - log-whitelist: - - scrub mismatch - - ScrubResult -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/older/1-install/v0.80.3.yaml b/suites/upgrade/firefly/older/1-install/v0.80.3.yaml deleted file mode 100644 index 6396ab617f8..00000000000 --- a/suites/upgrade/firefly/older/1-install/v0.80.3.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: - tag: v0.80.3 -- ceph: - log-whitelist: - - scrub mismatch - - ScrubResult -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/older/1-install/v0.80.yaml b/suites/upgrade/firefly/older/1-install/v0.80.yaml deleted file mode 100644 index eb945e62eb7..00000000000 --- a/suites/upgrade/firefly/older/1-install/v0.80.yaml +++ /dev/null @@ -1,10 +0,0 @@ -tasks: -- install: - tag: v0.80 -- ceph: - log-whitelist: - - scrub mismatch - - ScrubResult -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/firefly/older/2-workload/+ b/suites/upgrade/firefly/older/2-workload/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/firefly/older/2-workload/blogbench.yaml b/suites/upgrade/firefly/older/2-workload/blogbench.yaml deleted file mode 100644 index 909f5bc1c75..00000000000 --- a/suites/upgrade/firefly/older/2-workload/blogbench.yaml +++ /dev/null @@ -1,6 +0,0 @@ -workload: - sequential: - - workunit: - clients: - client.0: - - suites/blogbench.sh diff --git a/suites/upgrade/firefly/older/2-workload/radosloadgen.yaml b/suites/upgrade/firefly/older/2-workload/radosloadgen.yaml deleted file mode 100644 index 572f4755e66..00000000000 --- a/suites/upgrade/firefly/older/2-workload/radosloadgen.yaml +++ /dev/null @@ -1,6 +0,0 @@ -workload: - sequential: - - workunit: - clients: - client.0: - - rados/load-gen-big.sh diff --git a/suites/upgrade/firefly/older/2-workload/rbd.yaml b/suites/upgrade/firefly/older/2-workload/rbd.yaml deleted file mode 100644 index 0d2f3ad72ef..00000000000 --- a/suites/upgrade/firefly/older/2-workload/rbd.yaml +++ /dev/null @@ -1,12 +0,0 @@ -workload: - sequential: - - workunit: - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format - - workunit: - clients: - client.0: - - cls/test_cls_rbd.sh diff --git a/suites/upgrade/firefly/older/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/firefly/older/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 120de5cca8e..00000000000 --- a/suites/upgrade/firefly/older/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,37 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - mon.a: - branch: firefly - mon.b: - branch: firefly - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 diff --git a/suites/upgrade/firefly/older/4-finish-upgrade.yaml b/suites/upgrade/firefly/older/4-finish-upgrade.yaml deleted file mode 100644 index 3f55404ccdb..00000000000 --- a/suites/upgrade/firefly/older/4-finish-upgrade.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install.upgrade: - client.0: diff --git a/suites/upgrade/firefly/older/5-final/+ b/suites/upgrade/firefly/older/5-final/+ deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/firefly/older/5-final/monthrash.yaml b/suites/upgrade/firefly/older/5-final/monthrash.yaml deleted file mode 100644 index 8e321c47c41..00000000000 --- a/suites/upgrade/firefly/older/5-final/monthrash.yaml +++ /dev/null @@ -1,11 +0,0 @@ -tasks: -- sequential: - - mon_thrash: - revive_delay: 20 - thrash_delay: 1 - - ceph-fuse: [client.0] - - workunit: - clients: - client.0: - - suites/dbench.sh - diff --git a/suites/upgrade/firefly/older/5-final/osdthrash.yaml b/suites/upgrade/firefly/older/5-final/osdthrash.yaml deleted file mode 100644 index 1efbf428039..00000000000 --- a/suites/upgrade/firefly/older/5-final/osdthrash.yaml +++ /dev/null @@ -1,18 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- sequential: - - thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 - - ceph-fuse: [client.0] - - workunit: - clients: - client.0: - - suites/iogen.sh - diff --git a/suites/upgrade/firefly/older/5-final/rbd.yaml b/suites/upgrade/firefly/older/5-final/rbd.yaml deleted file mode 100644 index ee82941298d..00000000000 --- a/suites/upgrade/firefly/older/5-final/rbd.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- sequential: - - workunit: - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format - - workunit: - clients: - client.0: - - cls/test_cls_rbd.sh diff --git a/suites/upgrade/firefly/older/5-final/testrgw.yaml b/suites/upgrade/firefly/older/5-final/testrgw.yaml deleted file mode 100644 index 5d388e86200..00000000000 --- a/suites/upgrade/firefly/older/5-final/testrgw.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- sequential: - - rgw: [client.1] - - s3tests: - client.1: - force-branch: firefly-original - rgw_server: client.1 diff --git a/suites/upgrade/firefly/older/distros b/suites/upgrade/firefly/older/distros deleted file mode 120000 index 79010c36a59..00000000000 --- a/suites/upgrade/firefly/older/distros +++ /dev/null @@ -1 +0,0 @@ -../../../../distros/supported \ No newline at end of file diff --git a/suites/upgrade/firefly/singleton/upgrade_client/% b/suites/upgrade/firefly/singleton/upgrade_client/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/firefly/singleton/upgrade_client/distros b/suites/upgrade/firefly/singleton/upgrade_client/distros deleted file mode 120000 index ea78f6570e5..00000000000 --- a/suites/upgrade/firefly/singleton/upgrade_client/distros +++ /dev/null @@ -1 +0,0 @@ -../../../../../distros/supported/ \ No newline at end of file diff --git a/suites/upgrade/firefly/singleton/upgrade_client/upgrade_client_first.yaml b/suites/upgrade/firefly/singleton/upgrade_client/upgrade_client_first.yaml deleted file mode 100644 index 882b69350ef..00000000000 --- a/suites/upgrade/firefly/singleton/upgrade_client/upgrade_client_first.yaml +++ /dev/null @@ -1,69 +0,0 @@ -# this case tests issue #9419 "dumpling->firefly upgrade, sending setallochint?" -overrides: - ceph: - conf: - mon: - mon warn on legacy crush tunables: false - log-whitelist: - - scrub mismatch - - ScrubResult -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 -tasks: -- install: - branch: dumpling -- print: "**** done install dumpling" -- ceph: - fs: xfs -- print: "**** done ceph" -- install.upgrade: - client.0: -- print: "**** done install.upgrade on clinet.0" -- install.upgrade: - mon.a: - mon.b: -- print: "**** done install.upgrade" -- ceph.restart: - #osd.2 is not upgraded - daemons: [mon.a, mon.b, mon.c, osd.0, osd.1] -- print: "**** done restart all" -- workunit: - branch: firefly - clients: - client.0: - - rbd/test_librbd_python.sh -- print: "**** done rbd/test_librbd_python.sh" -- workunit: - branch: dumpling - clients: - client.0: - - rados/load-gen-big.sh -- print: "**** done rados/load-gen-big.sh" -- workunit: - branch: firefly - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format -- print: "**** done rbd/import_export.sh" -- workunit: - branch: firefly - clients: - client.0: - - cls/test_cls_rbd.sh -- print: "**** done cls/test_cls_rbd.sh" -- rgw: [client.0] -- s3tests: - client.0: - force-branch: firefly - rgw_server: client.0 -- print: "**** done s3tests" diff --git a/suites/upgrade/firefly/singleton/versions-steps/% b/suites/upgrade/firefly/singleton/versions-steps/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/firefly/singleton/versions-steps/distros b/suites/upgrade/firefly/singleton/versions-steps/distros deleted file mode 120000 index ea78f6570e5..00000000000 --- a/suites/upgrade/firefly/singleton/versions-steps/distros +++ /dev/null @@ -1 +0,0 @@ -../../../../../distros/supported/ \ No newline at end of file diff --git a/suites/upgrade/firefly/singleton/versions-steps/versions-steps.yaml b/suites/upgrade/firefly/singleton/versions-steps/versions-steps.yaml deleted file mode 100644 index 116811c12ea..00000000000 --- a/suites/upgrade/firefly/singleton/versions-steps/versions-steps.yaml +++ /dev/null @@ -1,342 +0,0 @@ -overrides: - ceph: - log-whitelist: - - scrub - - osd_map_max_advance - fs: xfs - conf: - osd: - osd map max advance: 1000 -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - mon.b - - mon.c - - osd.3 - - osd.4 - - osd.5 - - client.0 -- - client.1 -tasks: -- install: - tag: v0.80.4 -- print: "**** done v0.80.4 install" -- ceph: - fs: xfs -- print: "**** done ceph xfs" -- sequential: - - workload -- print: "**** done workload v0.80.4" -- parallel: - - workload1 - - upgrade-sequence1 -- print: "**** done parallel v0.80.5" -- parallel: - - workload2 - - upgrade-sequence2 -- print: "**** done parallel v0.80.7" -- parallel: - - workload3 - - upgrade-sequence3 -- print: "**** done parallel v0.80.8" -- parallel: - - workload4 - - upgrade-sequence4 -- print: "**** done parallel v0.80.9" -- parallel: - - workload_firefly - - upgrade-sequence_firefly -- print: "**** done parallel firefly branch" -####################### -workload: - sequential: - - workunit: - clients: - client.0: - - suites/blogbench.sh - - print: "**** done suites/blogbench.sh workload" -workload1: - sequential: - - workunit: - clients: - client.0: - - rados/load-gen-big.sh - - print: "**** done rados/load-gen-big.sh workload1" - - workunit: - clients: - client.0: - - rados/test.sh - - cls - - print: "**** done rados/test.sh & cls workload1" - - workunit: - clients: - client.0: - - rbd/test_librbd.sh - - print: "**** done rbd/test_librbd.sh workload1" -upgrade-sequence1: - sequential: - - install.upgrade: - mon.a: - tag: v0.80.5 - mon.b: - tag: v0.80.5 - client.1: - tag: v0.80.5 - - print: "**** done v0.80.5 install.upgrade" - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 30 - - print: "**** done ceph.restart all 1 mon/mds/osd" -workload2: - sequential: -# removed to fix #10176 -# - workunit: -# clients: -# client.0: -# - rbd/import_export.sh -# env: -# RBD_CREATE_ARGS: --new-format - - workunit: - clients: - client.0: - - cls/test_cls_rbd.sh - - print: "**** done cls/test_cls_rbd.sh workload2" -upgrade-sequence2: - sequential: - - install.upgrade: - mon.a: - tag: v0.80.7 - mon.b: - tag: v0.80.7 - client.1: - tag: v0.80.7 - - print: "**** done v0.80.7 install.upgrade" - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - print: "**** done ceph.restart all 2 osd/mon/mds" -workload3: - sequential: - - workunit: - clients: - client.0: - - rados/load-gen-big.sh - - print: "**** done rados/load-gen-big.sh workload3" - - workunit: - clients: - client.0: - - rados/test.sh - - cls - - print: "**** done rados/test.sh & cls workload3" - - workunit: - clients: - client.0: - - rbd/test_librbd.sh - - print: "**** done rbd/test_librbd.sh workload3" -upgrade-sequence3: - sequential: - - install.upgrade: - mon.a: - tag: v0.80.8 - mon.b: - tag: v0.80.8 - client.1: - tag: v0.80.8 - - print: "**** done v0.80.8 install.upgrade" - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 30 - - print: "**** done ceph.restart all mon/mds/osd upgrade-sequence3" -workload4: - sequential: - - workunit: - clients: - client.0: - - rados/load-gen-big.sh - - print: "**** done rados/load-gen-big.sh workload4" - - workunit: - clients: - client.0: - - rados/test.sh - - cls - - print: "**** done rados/test.sh & cls workload4" - - workunit: - clients: - client.0: - - rbd/test_librbd.sh - - print: "**** done rbd/test_librbd.sh workload4" -upgrade-sequence4: - sequential: - - install.upgrade: - mon.a: - tag: v0.80.9 - mon.b: - tag: v0.80.9 - client.1: - tag: v0.80.9 - - print: "**** done v0.80.9 install.upgrade" - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 30 - - print: "**** done ceph.restart all 1 mon/mds/osd upgrade-sequence4" -workload_firefly: - sequential: - - rgw: [client.0] - - print: "**** done rgw workload_firefly" - - s3tests: - client.0: - force-branch: firefly - rgw_server: client.0 - - print: "**** done s3tests workload_firefly" -upgrade-sequence_firefly: - sequential: - - install.upgrade: - mon.a: - branch: firefly - mon.b: - branch: firefly - client.1: - branch: firefly - - print: "**** done branch: firefly install.upgrade" - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 30 - - ceph.restart: [osd.1] - - sleep: - duration: 30 - - ceph.restart: [osd.2] - - sleep: - duration: 30 - - ceph.restart: [osd.3] - - sleep: - duration: 30 - - ceph.restart: [osd.4] - - sleep: - duration: 30 - - ceph.restart: [osd.5] - - sleep: - duration: 60 - - ceph.restart: [mon.a] - - sleep: - duration: 60 - - ceph.restart: [mon.b] - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - print: "**** done ceph.restart all firefly current branch mds/osd/mon" diff --git a/suites/upgrade/old/fs/fs/% b/suites/upgrade/old/fs/fs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/fs/fs/0-cluster/start.yaml b/suites/upgrade/old/fs/fs/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/fs/fs/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/fs/fs/1-cuttlefish-install/cuttlefish.yaml b/suites/upgrade/old/fs/fs/1-cuttlefish-install/cuttlefish.yaml deleted file mode 100644 index e427343d8c3..00000000000 --- a/suites/upgrade/old/fs/fs/1-cuttlefish-install/cuttlefish.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: - branch: cuttlefish -- ceph: - fs: xfs -- ceph-fuse: diff --git a/suites/upgrade/old/fs/fs/2-cuttlefish-workload/blogbench.yaml b/suites/upgrade/old/fs/fs/2-cuttlefish-workload/blogbench.yaml deleted file mode 100644 index 50161b08114..00000000000 --- a/suites/upgrade/old/fs/fs/2-cuttlefish-workload/blogbench.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - client.0: - - suites/blogbench.sh diff --git a/suites/upgrade/old/fs/fs/2-cuttlefish-workload/dbench.yaml b/suites/upgrade/old/fs/fs/2-cuttlefish-workload/dbench.yaml deleted file mode 100644 index 3bb9040e251..00000000000 --- a/suites/upgrade/old/fs/fs/2-cuttlefish-workload/dbench.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - all: - - suites/dbench.sh diff --git a/suites/upgrade/old/fs/fs/2-cuttlefish-workload/iogen.yaml b/suites/upgrade/old/fs/fs/2-cuttlefish-workload/iogen.yaml deleted file mode 100644 index c832d2f5bb1..00000000000 --- a/suites/upgrade/old/fs/fs/2-cuttlefish-workload/iogen.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - client.0: - - suites/iogen.sh diff --git a/suites/upgrade/old/fs/fs/3-upgrade/dumpling.yaml b/suites/upgrade/old/fs/fs/3-upgrade/dumpling.yaml deleted file mode 100644 index e3e332c4f22..00000000000 --- a/suites/upgrade/old/fs/fs/3-upgrade/dumpling.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: dumpling diff --git a/suites/upgrade/old/fs/fs/4-restart/mds-mon-osd.yaml b/suites/upgrade/old/fs/fs/4-restart/mds-mon-osd.yaml deleted file mode 100644 index d21800684d3..00000000000 --- a/suites/upgrade/old/fs/fs/4-restart/mds-mon-osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/fs/fs/4-restart/mon-mds-osd.yaml b/suites/upgrade/old/fs/fs/4-restart/mon-mds-osd.yaml deleted file mode 100644 index 78e14e9472a..00000000000 --- a/suites/upgrade/old/fs/fs/4-restart/mon-mds-osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/fs/fs/4-restart/osd-mds-mon.yaml b/suites/upgrade/old/fs/fs/4-restart/osd-mds-mon.yaml deleted file mode 100644 index dbcd013b3f0..00000000000 --- a/suites/upgrade/old/fs/fs/4-restart/osd-mds-mon.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/suites/upgrade/old/fs/fs/5-dumpling-workload/fsstress.yaml b/suites/upgrade/old/fs/fs/5-dumpling-workload/fsstress.yaml deleted file mode 100644 index ae6f7936989..00000000000 --- a/suites/upgrade/old/fs/fs/5-dumpling-workload/fsstress.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - suites/fsstress.sh diff --git a/suites/upgrade/old/fs/fs/5-dumpling-workload/iogen.yaml b/suites/upgrade/old/fs/fs/5-dumpling-workload/iogen.yaml deleted file mode 100644 index 5aa4d3e091d..00000000000 --- a/suites/upgrade/old/fs/fs/5-dumpling-workload/iogen.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/iogen.sh diff --git a/suites/upgrade/old/fs/fs/5-dumpling-workload/kernel-untar-build.yaml b/suites/upgrade/old/fs/fs/5-dumpling-workload/kernel-untar-build.yaml deleted file mode 100644 index 7fc7de979ed..00000000000 --- a/suites/upgrade/old/fs/fs/5-dumpling-workload/kernel-untar-build.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - kernel_untar_build.sh diff --git a/suites/upgrade/old/fs/fs/5-dumpling-workload/tiobench.yaml b/suites/upgrade/old/fs/fs/5-dumpling-workload/tiobench.yaml deleted file mode 100644 index 58ee040af77..00000000000 --- a/suites/upgrade/old/fs/fs/5-dumpling-workload/tiobench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/tiobench.sh diff --git a/suites/upgrade/old/fs/fs/6-upgrade-to-emperor/emperor.yaml b/suites/upgrade/old/fs/fs/6-upgrade-to-emperor/emperor.yaml deleted file mode 100644 index e473f31862d..00000000000 --- a/suites/upgrade/old/fs/fs/6-upgrade-to-emperor/emperor.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: emperor diff --git a/suites/upgrade/old/fs/fs/7-restart/mds-mon-osd.yaml b/suites/upgrade/old/fs/fs/7-restart/mds-mon-osd.yaml deleted file mode 100644 index d21800684d3..00000000000 --- a/suites/upgrade/old/fs/fs/7-restart/mds-mon-osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/fs/fs/7-restart/mon-mds-osd.yaml b/suites/upgrade/old/fs/fs/7-restart/mon-mds-osd.yaml deleted file mode 100644 index 78e14e9472a..00000000000 --- a/suites/upgrade/old/fs/fs/7-restart/mon-mds-osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/fs/fs/7-restart/osd-mds-mon.yaml b/suites/upgrade/old/fs/fs/7-restart/osd-mds-mon.yaml deleted file mode 100644 index dbcd013b3f0..00000000000 --- a/suites/upgrade/old/fs/fs/7-restart/osd-mds-mon.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/suites/upgrade/old/fs/fs/8-emperor-workload/blogbench.yaml b/suites/upgrade/old/fs/fs/8-emperor-workload/blogbench.yaml deleted file mode 100644 index 4e54068f314..00000000000 --- a/suites/upgrade/old/fs/fs/8-emperor-workload/blogbench.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: emperor - clients: - client.0: - - suites/blogbench.sh diff --git a/suites/upgrade/old/fs/fs/8-emperor-workload/dbench.yaml b/suites/upgrade/old/fs/fs/8-emperor-workload/dbench.yaml deleted file mode 100644 index 365ba9ac8f4..00000000000 --- a/suites/upgrade/old/fs/fs/8-emperor-workload/dbench.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: emperor - clients: - all: - - suites/dbench.sh diff --git a/suites/upgrade/old/fs/fs/8-emperor-workload/iogen.yaml b/suites/upgrade/old/fs/fs/8-emperor-workload/iogen.yaml deleted file mode 100644 index 994fa8716d5..00000000000 --- a/suites/upgrade/old/fs/fs/8-emperor-workload/iogen.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: emperor - clients: - client.0: - - suites/iogen.sh diff --git a/suites/upgrade/old/mixed-cluster/mixed-cluster/% b/suites/upgrade/old/mixed-cluster/mixed-cluster/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/mixed-cluster/mixed-cluster/0-cluster/start.yaml b/suites/upgrade/old/mixed-cluster/mixed-cluster/0-cluster/start.yaml deleted file mode 100644 index 0a85eacad7f..00000000000 --- a/suites/upgrade/old/mixed-cluster/mixed-cluster/0-cluster/start.yaml +++ /dev/null @@ -1,11 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 - diff --git a/suites/upgrade/old/mixed-cluster/mixed-cluster/1-cuttlefish-install/cuttlefish.yaml b/suites/upgrade/old/mixed-cluster/mixed-cluster/1-cuttlefish-install/cuttlefish.yaml deleted file mode 100644 index 50b65f72bca..00000000000 --- a/suites/upgrade/old/mixed-cluster/mixed-cluster/1-cuttlefish-install/cuttlefish.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install: - branch: cuttlefish -- ceph: - diff --git a/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/api.yaml b/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/api.yaml deleted file mode 100644 index ad36bddb48c..00000000000 --- a/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/api.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - client.0: - - rados/test.sh - - cls - diff --git a/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/load-gen-mix.yaml b/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/load-gen-mix.yaml deleted file mode 100644 index 7ec655c8ccf..00000000000 --- a/suites/upgrade/old/mixed-cluster/mixed-cluster/2-cuttlefish-workload/load-gen-mix.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - client.0: - - rados/load-gen-mix.sh diff --git a/suites/upgrade/old/mixed-cluster/mixed-cluster/3-partial-osds-upgrade/dumpling.yaml b/suites/upgrade/old/mixed-cluster/mixed-cluster/3-partial-osds-upgrade/dumpling.yaml deleted file mode 100644 index a9b9bf8418c..00000000000 --- a/suites/upgrade/old/mixed-cluster/mixed-cluster/3-partial-osds-upgrade/dumpling.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- install.upgrade: - osd.0: - branch: dumpling - osd.2: - branch: dumpling - diff --git a/suites/upgrade/old/mixed-cluster/mixed-cluster/4-osds-restart/restart.yaml b/suites/upgrade/old/mixed-cluster/mixed-cluster/4-osds-restart/restart.yaml deleted file mode 100644 index 3a84bbb4074..00000000000 --- a/suites/upgrade/old/mixed-cluster/mixed-cluster/4-osds-restart/restart.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.2] diff --git a/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/api.yaml b/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/api.yaml deleted file mode 100644 index ad36bddb48c..00000000000 --- a/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/api.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - client.0: - - rados/test.sh - - cls - diff --git a/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/load-gen-big.yaml b/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/load-gen-big.yaml deleted file mode 100644 index 0f6e616a286..00000000000 --- a/suites/upgrade/old/mixed-cluster/mixed-cluster/5-mixed-workload/load-gen-big.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - client.0: - - rados/load-gen-big.sh diff --git a/suites/upgrade/old/mixed-mons/mixed-mons/% b/suites/upgrade/old/mixed-mons/mixed-mons/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/mixed-mons/mixed-mons/0-cluster/start.yaml b/suites/upgrade/old/mixed-mons/mixed-mons/0-cluster/start.yaml deleted file mode 100644 index 0a85eacad7f..00000000000 --- a/suites/upgrade/old/mixed-mons/mixed-mons/0-cluster/start.yaml +++ /dev/null @@ -1,11 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 - diff --git a/suites/upgrade/old/mixed-mons/mixed-mons/1-cuttlefish-install/cuttlefish.yaml b/suites/upgrade/old/mixed-mons/mixed-mons/1-cuttlefish-install/cuttlefish.yaml deleted file mode 100644 index 50b65f72bca..00000000000 --- a/suites/upgrade/old/mixed-mons/mixed-mons/1-cuttlefish-install/cuttlefish.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install: - branch: cuttlefish -- ceph: - diff --git a/suites/upgrade/old/mixed-mons/mixed-mons/2-cuttlefish-workload/cephtool.yaml b/suites/upgrade/old/mixed-mons/mixed-mons/2-cuttlefish-workload/cephtool.yaml deleted file mode 100644 index 8648784fd16..00000000000 --- a/suites/upgrade/old/mixed-mons/mixed-mons/2-cuttlefish-workload/cephtool.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - all: - - cephtool/test.sh - - mon/pool_ops.sh diff --git a/suites/upgrade/old/mixed-mons/mixed-mons/3-partial-mon-upgrade/dumpling.yaml b/suites/upgrade/old/mixed-mons/mixed-mons/3-partial-mon-upgrade/dumpling.yaml deleted file mode 100644 index 6c9d3206f16..00000000000 --- a/suites/upgrade/old/mixed-mons/mixed-mons/3-partial-mon-upgrade/dumpling.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - mon.a: - branch: dumpling diff --git a/suites/upgrade/old/mixed-mons/mixed-mons/4-mon-restart/restart.yaml b/suites/upgrade/old/mixed-mons/mixed-mons/4-mon-restart/restart.yaml deleted file mode 100644 index b6ffb3323d1..00000000000 --- a/suites/upgrade/old/mixed-mons/mixed-mons/4-mon-restart/restart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true diff --git a/suites/upgrade/old/mixed-mons/mixed-mons/5-mixed-workload/cephtool.yaml b/suites/upgrade/old/mixed-mons/mixed-mons/5-mixed-workload/cephtool.yaml deleted file mode 100644 index 8648784fd16..00000000000 --- a/suites/upgrade/old/mixed-mons/mixed-mons/5-mixed-workload/cephtool.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - all: - - cephtool/test.sh - - mon/pool_ops.sh diff --git a/suites/upgrade/old/mixed-mons/mixed-mons/6-rest/rest.yaml b/suites/upgrade/old/mixed-mons/mixed-mons/6-rest/rest.yaml deleted file mode 100644 index 18ae735e663..00000000000 --- a/suites/upgrade/old/mixed-mons/mixed-mons/6-rest/rest.yaml +++ /dev/null @@ -1,26 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - had wrong client addr - - had wrong cluster addr -tasks: -- install.upgrade: - mon.b: - branch: dumpling - client.0: - branch: dumpling -- ceph.restart: - daemons: - - mon.b - - mon.c - - osd.0 - - osd.1 - - osd.2 - - osd.3 -- workunit: - branch: dumpling - clients: - all: - - cephtool/test.sh - - mon/pool_ops.sh diff --git a/suites/upgrade/old/parallel/fs/% b/suites/upgrade/old/parallel/fs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/parallel/fs/0-cluster/start.yaml b/suites/upgrade/old/parallel/fs/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/parallel/fs/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/parallel/fs/1-dumpling-install/dumpling.yaml b/suites/upgrade/old/parallel/fs/1-dumpling-install/dumpling.yaml deleted file mode 100644 index 6d3947abd90..00000000000 --- a/suites/upgrade/old/parallel/fs/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - fs: xfs -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/old/parallel/fs/2-workload/blogbench.yaml b/suites/upgrade/old/parallel/fs/2-workload/blogbench.yaml deleted file mode 100644 index 0cd59eaafde..00000000000 --- a/suites/upgrade/old/parallel/fs/2-workload/blogbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -workload: - workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/upgrade/old/parallel/fs/3-upgrade-sequence/upgrade-all.yaml b/suites/upgrade/old/parallel/fs/3-upgrade-sequence/upgrade-all.yaml deleted file mode 100644 index 4cb05ce8777..00000000000 --- a/suites/upgrade/old/parallel/fs/3-upgrade-sequence/upgrade-all.yaml +++ /dev/null @@ -1,6 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/parallel/fs/distro b/suites/upgrade/old/parallel/fs/distro deleted file mode 120000 index 3a0ac71c8af..00000000000 --- a/suites/upgrade/old/parallel/fs/distro +++ /dev/null @@ -1 +0,0 @@ -../rados/distro \ No newline at end of file diff --git a/suites/upgrade/old/parallel/rados/% b/suites/upgrade/old/parallel/rados/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/parallel/rados/0-cluster/start.yaml b/suites/upgrade/old/parallel/rados/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/parallel/rados/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/parallel/rados/1-dumpling-install/dumpling.yaml b/suites/upgrade/old/parallel/rados/1-dumpling-install/dumpling.yaml deleted file mode 100644 index f1a09304712..00000000000 --- a/suites/upgrade/old/parallel/rados/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - fs: xfs -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/old/parallel/rados/2-workload/loadgenbig.yaml b/suites/upgrade/old/parallel/rados/2-workload/loadgenbig.yaml deleted file mode 100644 index b118459ce90..00000000000 --- a/suites/upgrade/old/parallel/rados/2-workload/loadgenbig.yaml +++ /dev/null @@ -1,6 +0,0 @@ -workload: - workunit: - branch: dumpling - clients: - all: - - rados/load-gen-big.sh diff --git a/suites/upgrade/old/parallel/rados/2-workload/loadgenmix.yaml b/suites/upgrade/old/parallel/rados/2-workload/loadgenmix.yaml deleted file mode 100644 index 8c7f4c5653a..00000000000 --- a/suites/upgrade/old/parallel/rados/2-workload/loadgenmix.yaml +++ /dev/null @@ -1,6 +0,0 @@ -workload: - workunit: - branch: dumpling - clients: - client.0: - - rados/load-gen-mix.sh diff --git a/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-all.yaml b/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-all.yaml deleted file mode 100644 index 4cb05ce8777..00000000000 --- a/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-all.yaml +++ /dev/null @@ -1,6 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml b/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml deleted file mode 100644 index 717f778e458..00000000000 --- a/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mds-mon-osd.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.b] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 60 - - ceph.restart: [osd.1] - - sleep: - duration: 60 - - ceph.restart: [osd.2] - - sleep: - duration: 60 - - ceph.restart: [osd.3] diff --git a/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml deleted file mode 100644 index 8ad7503bb63..00000000000 --- a/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-mon-osd-mds.yaml +++ /dev/null @@ -1,33 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.b] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 - - ceph.restart: [osd.0] - - sleep: - duration: 60 - - ceph.restart: [osd.1] - - sleep: - duration: 60 - - ceph.restart: [osd.2] - - sleep: - duration: 60 - - ceph.restart: [osd.3] diff --git a/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml b/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml deleted file mode 100644 index a3607dbcb5c..00000000000 --- a/suites/upgrade/old/parallel/rados/3-upgrade-sequence/upgrade-osd-mon-mds.yaml +++ /dev/null @@ -1,35 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [osd.0] - - sleep: - duration: 60 - - ceph.restart: [osd.1] - - sleep: - duration: 60 - - ceph.restart: [osd.2] - - sleep: - duration: 60 - - ceph.restart: [osd.3] - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: - daemons: [mon.b] - wait-for-healthy: false - wait-for-osds-up: true - - sleep: - duration: 60 - - ceph.restart: [mon.c] - - sleep: - duration: 60 - - ceph.restart: [mds.a] - - sleep: - duration: 60 diff --git a/suites/upgrade/old/parallel/rados/distro/centos_6.4.yaml b/suites/upgrade/old/parallel/rados/distro/centos_6.4.yaml deleted file mode 100644 index 02383cd5f8c..00000000000 --- a/suites/upgrade/old/parallel/rados/distro/centos_6.4.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: centos -os_version: "6.4" diff --git a/suites/upgrade/old/parallel/rados/distro/debian_7.0.yaml b/suites/upgrade/old/parallel/rados/distro/debian_7.0.yaml deleted file mode 100644 index 8100dc41e3d..00000000000 --- a/suites/upgrade/old/parallel/rados/distro/debian_7.0.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: debian -os_version: "7.0" diff --git a/suites/upgrade/old/parallel/rados/distro/fedora_18.yaml b/suites/upgrade/old/parallel/rados/distro/fedora_18.yaml deleted file mode 100644 index 07872aa7edf..00000000000 --- a/suites/upgrade/old/parallel/rados/distro/fedora_18.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: fedora -os_version: "18" diff --git a/suites/upgrade/old/parallel/rados/distro/rhel_6.3.yaml b/suites/upgrade/old/parallel/rados/distro/rhel_6.3.yaml deleted file mode 100644 index 6a8edcd5626..00000000000 --- a/suites/upgrade/old/parallel/rados/distro/rhel_6.3.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: rhel -os_version: "6.3" diff --git a/suites/upgrade/old/parallel/rados/distro/ubuntu_12.04.yaml b/suites/upgrade/old/parallel/rados/distro/ubuntu_12.04.yaml deleted file mode 100644 index dbc3a8d9c58..00000000000 --- a/suites/upgrade/old/parallel/rados/distro/ubuntu_12.04.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: ubuntu -os_version: "12.04" diff --git a/suites/upgrade/old/parallel/rgw/% b/suites/upgrade/old/parallel/rgw/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/parallel/rgw/0-cluster/start.yaml b/suites/upgrade/old/parallel/rgw/0-cluster/start.yaml deleted file mode 100644 index 5b6d8978cdc..00000000000 --- a/suites/upgrade/old/parallel/rgw/0-cluster/start.yaml +++ /dev/null @@ -1,11 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 - - client.1 diff --git a/suites/upgrade/old/parallel/rgw/1-dumpling-install/dumpling.yaml b/suites/upgrade/old/parallel/rgw/1-dumpling-install/dumpling.yaml deleted file mode 100644 index f1a09304712..00000000000 --- a/suites/upgrade/old/parallel/rgw/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - fs: xfs -- parallel: - - workload - - upgrade-sequence diff --git a/suites/upgrade/old/parallel/rgw/2-workload/s3tests.yaml b/suites/upgrade/old/parallel/rgw/2-workload/s3tests.yaml deleted file mode 100644 index bd91e2c9f65..00000000000 --- a/suites/upgrade/old/parallel/rgw/2-workload/s3tests.yaml +++ /dev/null @@ -1,8 +0,0 @@ -workload: - sequential: - - rgw: [client.0] - - s3tests: - # use older tests when we are running a mix - client.0: - force-branch: dumpling - rgw_server: client.0 diff --git a/suites/upgrade/old/parallel/rgw/3-upgrade-sequence/upgrade-all.yaml b/suites/upgrade/old/parallel/rgw/3-upgrade-sequence/upgrade-all.yaml deleted file mode 100644 index 23740967edf..00000000000 --- a/suites/upgrade/old/parallel/rgw/3-upgrade-sequence/upgrade-all.yaml +++ /dev/null @@ -1,6 +0,0 @@ -upgrade-sequence: - sequential: - - install.upgrade: - all: - branch: emperor - - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0] diff --git a/suites/upgrade/old/parallel/rgw/4-final-workload/final.yaml b/suites/upgrade/old/parallel/rgw/4-final-workload/final.yaml deleted file mode 100644 index fb754ed0ce7..00000000000 --- a/suites/upgrade/old/parallel/rgw/4-final-workload/final.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- rgw: [client.1] -- swift: - client.1: - rgw_server: client.1 diff --git a/suites/upgrade/old/parallel/rgw/distro b/suites/upgrade/old/parallel/rgw/distro deleted file mode 120000 index 3a0ac71c8af..00000000000 --- a/suites/upgrade/old/parallel/rgw/distro +++ /dev/null @@ -1 +0,0 @@ -../rados/distro \ No newline at end of file diff --git a/suites/upgrade/old/parallel/stress-split/% b/suites/upgrade/old/parallel/stress-split/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/parallel/stress-split/0-cluster/start.yaml b/suites/upgrade/old/parallel/stress-split/0-cluster/start.yaml deleted file mode 100644 index 89d4b3681a9..00000000000 --- a/suites/upgrade/old/parallel/stress-split/0-cluster/start.yaml +++ /dev/null @@ -1,12 +0,0 @@ -roles: -- - mon.a - - mon.b - - mds.a - - osd.0 - - osd.1 - - osd.2 -- - osd.3 - - osd.4 - - osd.5 - - mon.c -- - client.0 diff --git a/suites/upgrade/old/parallel/stress-split/1-dumpling-install/dumpling.yaml b/suites/upgrade/old/parallel/stress-split/1-dumpling-install/dumpling.yaml deleted file mode 100644 index c98631e2bbd..00000000000 --- a/suites/upgrade/old/parallel/stress-split/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - fs: xfs diff --git a/suites/upgrade/old/parallel/stress-split/2-partial-upgrade/firsthalf.yaml b/suites/upgrade/old/parallel/stress-split/2-partial-upgrade/firsthalf.yaml deleted file mode 100644 index 68c9d44b7c3..00000000000 --- a/suites/upgrade/old/parallel/stress-split/2-partial-upgrade/firsthalf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install.upgrade: - osd.0: -- ceph.restart: - daemons: [osd.0, osd.1, osd.2] diff --git a/suites/upgrade/old/parallel/stress-split/3-thrash/default.yaml b/suites/upgrade/old/parallel/stress-split/3-thrash/default.yaml deleted file mode 100644 index 21d4c752075..00000000000 --- a/suites/upgrade/old/parallel/stress-split/3-thrash/default.yaml +++ /dev/null @@ -1,11 +0,0 @@ -overrides: - ceph: - log-whitelist: - - wrongly marked me down - - objects unfound and apparently lost - - log bound mismatch -tasks: -- thrashosds: - timeout: 1200 - chance_pgnum_grow: 1 - chance_pgpnum_fix: 1 diff --git a/suites/upgrade/old/parallel/stress-split/4-mon/mona.yaml b/suites/upgrade/old/parallel/stress-split/4-mon/mona.yaml deleted file mode 100644 index b6ffb3323d1..00000000000 --- a/suites/upgrade/old/parallel/stress-split/4-mon/mona.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- ceph.restart: - daemons: [mon.a] - wait-for-healthy: false - wait-for-osds-up: true diff --git a/suites/upgrade/old/parallel/stress-split/5-workload/rados_api_tests.yaml b/suites/upgrade/old/parallel/stress-split/5-workload/rados_api_tests.yaml deleted file mode 100644 index 5797c2f292a..00000000000 --- a/suites/upgrade/old/parallel/stress-split/5-workload/rados_api_tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rados/test.sh diff --git a/suites/upgrade/old/parallel/stress-split/5-workload/radosbench.yaml b/suites/upgrade/old/parallel/stress-split/5-workload/radosbench.yaml deleted file mode 100644 index 3940870fce0..00000000000 --- a/suites/upgrade/old/parallel/stress-split/5-workload/radosbench.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- radosbench: - clients: [client.0] - time: 1800 diff --git a/suites/upgrade/old/parallel/stress-split/5-workload/readwrite.yaml b/suites/upgrade/old/parallel/stress-split/5-workload/readwrite.yaml deleted file mode 100644 index c53e52b0872..00000000000 --- a/suites/upgrade/old/parallel/stress-split/5-workload/readwrite.yaml +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 500 - op_weights: - read: 45 - write: 45 - delete: 10 diff --git a/suites/upgrade/old/parallel/stress-split/5-workload/snaps-few-objects.yaml b/suites/upgrade/old/parallel/stress-split/5-workload/snaps-few-objects.yaml deleted file mode 100644 index c54039766c0..00000000000 --- a/suites/upgrade/old/parallel/stress-split/5-workload/snaps-few-objects.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/upgrade/old/parallel/stress-split/5-workload/snaps-many-objects.yaml b/suites/upgrade/old/parallel/stress-split/5-workload/snaps-many-objects.yaml deleted file mode 100644 index 9e311c946e1..00000000000 --- a/suites/upgrade/old/parallel/stress-split/5-workload/snaps-many-objects.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 500 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/upgrade/old/parallel/stress-split/6-next-mon/monb.yaml b/suites/upgrade/old/parallel/stress-split/6-next-mon/monb.yaml deleted file mode 100644 index 513890c41c0..00000000000 --- a/suites/upgrade/old/parallel/stress-split/6-next-mon/monb.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- ceph.restart: - daemons: [mon.b] - wait-for-healthy: false - wait-for-osds-up: true diff --git a/suites/upgrade/old/parallel/stress-split/7-workload/rados_api_tests.yaml b/suites/upgrade/old/parallel/stress-split/7-workload/rados_api_tests.yaml deleted file mode 100644 index 5797c2f292a..00000000000 --- a/suites/upgrade/old/parallel/stress-split/7-workload/rados_api_tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rados/test.sh diff --git a/suites/upgrade/old/parallel/stress-split/8-next-mon/monc.yaml b/suites/upgrade/old/parallel/stress-split/8-next-mon/monc.yaml deleted file mode 100644 index 28acc466907..00000000000 --- a/suites/upgrade/old/parallel/stress-split/8-next-mon/monc.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- install.upgrade: - mon.c: null -- ceph.restart: - daemons: [mon.c] - wait-for-healthy: false - wait-for-osds-up: true -- ceph.wait_for_mon_quorum: [a, b, c] diff --git a/suites/upgrade/old/parallel/stress-split/9-workload/rados_api_tests.yaml b/suites/upgrade/old/parallel/stress-split/9-workload/rados_api_tests.yaml deleted file mode 100644 index 5797c2f292a..00000000000 --- a/suites/upgrade/old/parallel/stress-split/9-workload/rados_api_tests.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rados/test.sh diff --git a/suites/upgrade/old/parallel/stress-split/distro b/suites/upgrade/old/parallel/stress-split/distro deleted file mode 120000 index 3a0ac71c8af..00000000000 --- a/suites/upgrade/old/parallel/stress-split/distro +++ /dev/null @@ -1 +0,0 @@ -../rados/distro \ No newline at end of file diff --git a/suites/upgrade/old/rados-double/rados-double/% b/suites/upgrade/old/rados-double/rados-double/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/rados-double/rados-double/0-cluster/start.yaml b/suites/upgrade/old/rados-double/rados-double/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/rados-double/rados-double/1-bobtail-install/bobtail.yaml b/suites/upgrade/old/rados-double/rados-double/1-bobtail-install/bobtail.yaml deleted file mode 100644 index c676a5582d5..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/1-bobtail-install/bobtail.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: - branch: bobtail -- ceph: diff --git a/suites/upgrade/old/rados-double/rados-double/2-bobtail-workload/api.yaml b/suites/upgrade/old/rados-double/rados-double/2-bobtail-workload/api.yaml deleted file mode 100644 index 637b7a8be97..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/2-bobtail-workload/api.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: bobtail - clients: - client.0: - - rados/test.sh diff --git a/suites/upgrade/old/rados-double/rados-double/3-upgrade/dumpling.yaml b/suites/upgrade/old/rados-double/rados-double/3-upgrade/dumpling.yaml deleted file mode 100644 index e3e332c4f22..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/3-upgrade/dumpling.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: dumpling diff --git a/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_mon_mds_osd.yaml b/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_mon_mds_osd.yaml deleted file mode 100644 index 78e14e9472a..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_mon_mds_osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_osd_mds_mon.yaml b/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_osd_mds_mon.yaml deleted file mode 100644 index dbcd013b3f0..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/4-restart/upgrade_osd_mds_mon.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/api.yaml b/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/api.yaml deleted file mode 100644 index b091ecc2090..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/api.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rados/test.sh - - cls diff --git a/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/load-gen-mix.yaml b/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/load-gen-mix.yaml deleted file mode 100644 index e89d1f5534c..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/5-dumpling-workload/load-gen-mix.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rados/load-gen-mix.sh diff --git a/suites/upgrade/old/rados-double/rados-double/6-upgrade-next/next.yaml b/suites/upgrade/old/rados-double/rados-double/6-upgrade-next/next.yaml deleted file mode 100644 index bb34346801c..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/6-upgrade-next/next.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install.upgrade: - all: diff --git a/suites/upgrade/old/rados-double/rados-double/7-restart/mon-mds-osd.yaml b/suites/upgrade/old/rados-double/rados-double/7-restart/mon-mds-osd.yaml deleted file mode 100644 index 78e14e9472a..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/7-restart/mon-mds-osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/rados-double/rados-double/7-restart/osd-mds-mon.yaml b/suites/upgrade/old/rados-double/rados-double/7-restart/osd-mds-mon.yaml deleted file mode 100644 index dbcd013b3f0..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/7-restart/osd-mds-mon.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/suites/upgrade/old/rados-double/rados-double/8-next-workload/api.yaml b/suites/upgrade/old/rados-double/rados-double/8-next-workload/api.yaml deleted file mode 100644 index 9b9f1f2e675..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/8-next-workload/api.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rados/test.sh - - cls diff --git a/suites/upgrade/old/rados-double/rados-double/8-next-workload/snaps-few-objects.yaml b/suites/upgrade/old/rados-double/rados-double/8-next-workload/snaps-few-objects.yaml deleted file mode 100644 index aa82d973ae1..00000000000 --- a/suites/upgrade/old/rados-double/rados-double/8-next-workload/snaps-few-objects.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 - copy_from: 50 diff --git a/suites/upgrade/old/rados/rados/% b/suites/upgrade/old/rados/rados/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/rados/rados/0-cluster/start.yaml b/suites/upgrade/old/rados/rados/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/rados/rados/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/rados/rados/1-cuttlefish-install/cuttlefish.yaml b/suites/upgrade/old/rados/rados/1-cuttlefish-install/cuttlefish.yaml deleted file mode 100644 index b259af97269..00000000000 --- a/suites/upgrade/old/rados/rados/1-cuttlefish-install/cuttlefish.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: - branch: cuttlefish -- ceph: diff --git a/suites/upgrade/old/rados/rados/2-cuttlefish-workload/api.yaml b/suites/upgrade/old/rados/rados/2-cuttlefish-workload/api.yaml deleted file mode 100644 index 66526582579..00000000000 --- a/suites/upgrade/old/rados/rados/2-cuttlefish-workload/api.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - client.0: - - rados/test.sh - - cls diff --git a/suites/upgrade/old/rados/rados/2-cuttlefish-workload/load-gen-mix.yaml b/suites/upgrade/old/rados/rados/2-cuttlefish-workload/load-gen-mix.yaml deleted file mode 100644 index 7ec655c8ccf..00000000000 --- a/suites/upgrade/old/rados/rados/2-cuttlefish-workload/load-gen-mix.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: cuttlefish - clients: - client.0: - - rados/load-gen-mix.sh diff --git a/suites/upgrade/old/rados/rados/3-upgrade/dumpling.yaml b/suites/upgrade/old/rados/rados/3-upgrade/dumpling.yaml deleted file mode 100644 index e3e332c4f22..00000000000 --- a/suites/upgrade/old/rados/rados/3-upgrade/dumpling.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: dumpling diff --git a/suites/upgrade/old/rados/rados/4-restart/upgrade_mds_mon_osd.yaml b/suites/upgrade/old/rados/rados/4-restart/upgrade_mds_mon_osd.yaml deleted file mode 100644 index d21800684d3..00000000000 --- a/suites/upgrade/old/rados/rados/4-restart/upgrade_mds_mon_osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/rados/rados/4-restart/upgrade_mon_mds_osd.yaml b/suites/upgrade/old/rados/rados/4-restart/upgrade_mon_mds_osd.yaml deleted file mode 100644 index 78e14e9472a..00000000000 --- a/suites/upgrade/old/rados/rados/4-restart/upgrade_mon_mds_osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/rados/rados/4-restart/upgrade_osd_mon_mds.yaml b/suites/upgrade/old/rados/rados/4-restart/upgrade_osd_mon_mds.yaml deleted file mode 100644 index e8fe288f657..00000000000 --- a/suites/upgrade/old/rados/rados/4-restart/upgrade_osd_mon_mds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a] diff --git a/suites/upgrade/old/rados/rados/5-dumpling-workload/api.yaml b/suites/upgrade/old/rados/rados/5-dumpling-workload/api.yaml deleted file mode 100644 index b091ecc2090..00000000000 --- a/suites/upgrade/old/rados/rados/5-dumpling-workload/api.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rados/test.sh - - cls diff --git a/suites/upgrade/old/rados/rados/5-dumpling-workload/snaps-few-objects.yaml b/suites/upgrade/old/rados/rados/5-dumpling-workload/snaps-few-objects.yaml deleted file mode 100644 index c54039766c0..00000000000 --- a/suites/upgrade/old/rados/rados/5-dumpling-workload/snaps-few-objects.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/upgrade/old/rados/rados/6-upgrade-emp/emperor.yaml b/suites/upgrade/old/rados/rados/6-upgrade-emp/emperor.yaml deleted file mode 100644 index e473f31862d..00000000000 --- a/suites/upgrade/old/rados/rados/6-upgrade-emp/emperor.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: emperor diff --git a/suites/upgrade/old/rados/rados/7-restart/upgrade_mds_mon_osd.yaml b/suites/upgrade/old/rados/rados/7-restart/upgrade_mds_mon_osd.yaml deleted file mode 100644 index d21800684d3..00000000000 --- a/suites/upgrade/old/rados/rados/7-restart/upgrade_mds_mon_osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/rados/rados/7-restart/upgrade_mon_mds_osd.yaml b/suites/upgrade/old/rados/rados/7-restart/upgrade_mon_mds_osd.yaml deleted file mode 100644 index 78e14e9472a..00000000000 --- a/suites/upgrade/old/rados/rados/7-restart/upgrade_mon_mds_osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/rados/rados/7-restart/upgrade_osd_mds_mon.yaml b/suites/upgrade/old/rados/rados/7-restart/upgrade_osd_mds_mon.yaml deleted file mode 100644 index dbcd013b3f0..00000000000 --- a/suites/upgrade/old/rados/rados/7-restart/upgrade_osd_mds_mon.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/suites/upgrade/old/rados/rados/8-emperor-workload/api.yaml b/suites/upgrade/old/rados/rados/8-emperor-workload/api.yaml deleted file mode 100644 index 29a4be13fb6..00000000000 --- a/suites/upgrade/old/rados/rados/8-emperor-workload/api.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - branch: emperor - clients: - client.0: - - rados/test.sh - - cls diff --git a/suites/upgrade/old/rados/rados/8-emperor-workload/snaps-few-objects.yaml b/suites/upgrade/old/rados/rados/8-emperor-workload/snaps-few-objects.yaml deleted file mode 100644 index c54039766c0..00000000000 --- a/suites/upgrade/old/rados/rados/8-emperor-workload/snaps-few-objects.yaml +++ /dev/null @@ -1,12 +0,0 @@ -tasks: -- rados: - clients: [client.0] - ops: 4000 - objects: 50 - op_weights: - read: 100 - write: 100 - delete: 50 - snap_create: 50 - snap_remove: 50 - rollback: 50 diff --git a/suites/upgrade/old/rbd-double/rbd-double/% b/suites/upgrade/old/rbd-double/rbd-double/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/rbd-double/rbd-double/0-cluster/start.yaml b/suites/upgrade/old/rbd-double/rbd-double/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/rbd-double/rbd-double/1-bobtail-install/bobtail.yaml b/suites/upgrade/old/rbd-double/rbd-double/1-bobtail-install/bobtail.yaml deleted file mode 100644 index c676a5582d5..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/1-bobtail-install/bobtail.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install: - branch: bobtail -- ceph: diff --git a/suites/upgrade/old/rbd-double/rbd-double/2-bobtail-workload/import_export.yaml b/suites/upgrade/old/rbd-double/rbd-double/2-bobtail-workload/import_export.yaml deleted file mode 100644 index 9123db83bbb..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/2-bobtail-workload/import_export.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- workunit: - branch: bobtail - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/upgrade/old/rbd-double/rbd-double/3-upgrade/dumpling.yaml b/suites/upgrade/old/rbd-double/rbd-double/3-upgrade/dumpling.yaml deleted file mode 100644 index e3e332c4f22..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/3-upgrade/dumpling.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: dumpling diff --git a/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_mon_mds_osd.yaml b/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_mon_mds_osd.yaml deleted file mode 100644 index 78e14e9472a..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_mon_mds_osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_osd_mds_mon.yaml b/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_osd_mds_mon.yaml deleted file mode 100644 index dbcd013b3f0..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/4-restart/upgrade_osd_mds_mon.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/api.yaml b/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/api.yaml deleted file mode 100644 index bbcde3e1559..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/api.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd.sh diff --git a/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/cls.yaml b/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/cls.yaml deleted file mode 100644 index c8079e3dcdb..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/5-dumpling-workload/cls.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - cls/test_cls_rbd.sh diff --git a/suites/upgrade/old/rbd-double/rbd-double/6-upgrade-next/next.yaml b/suites/upgrade/old/rbd-double/rbd-double/6-upgrade-next/next.yaml deleted file mode 100644 index bb34346801c..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/6-upgrade-next/next.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install.upgrade: - all: diff --git a/suites/upgrade/old/rbd-double/rbd-double/7-restart/mon-mds-osd.yaml b/suites/upgrade/old/rbd-double/rbd-double/7-restart/mon-mds-osd.yaml deleted file mode 100644 index 78e14e9472a..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/7-restart/mon-mds-osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/rbd-double/rbd-double/7-restart/osd-mds-mon.yaml b/suites/upgrade/old/rbd-double/rbd-double/7-restart/osd-mds-mon.yaml deleted file mode 100644 index dbcd013b3f0..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/7-restart/osd-mds-mon.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c] diff --git a/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/import-export.yaml b/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/import-export.yaml deleted file mode 100644 index ae44a873829..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/import-export.yaml +++ /dev/null @@ -1,7 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/python.yaml b/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/python.yaml deleted file mode 100644 index 5c6df6e38dd..00000000000 --- a/suites/upgrade/old/rbd-double/rbd-double/8-next-workload/python.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - client.0: - - rbd/test_librbd_python.sh diff --git a/suites/upgrade/old/rbd/rbd/% b/suites/upgrade/old/rbd/rbd/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/rbd/rbd/0-cluster/start.yaml b/suites/upgrade/old/rbd/rbd/0-cluster/start.yaml deleted file mode 100644 index cd071f9cff4..00000000000 --- a/suites/upgrade/old/rbd/rbd/0-cluster/start.yaml +++ /dev/null @@ -1,14 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 -tasks: -- install: - branch: bobtail -- ceph: diff --git a/suites/upgrade/old/rbd/rbd/1-bobtail-workload/import_export.yaml b/suites/upgrade/old/rbd/rbd/1-bobtail-workload/import_export.yaml deleted file mode 100644 index 9123db83bbb..00000000000 --- a/suites/upgrade/old/rbd/rbd/1-bobtail-workload/import_export.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- workunit: - branch: bobtail - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/upgrade/old/rbd/rbd/2-upgrade-to-dumpling/upgrade.yaml b/suites/upgrade/old/rbd/rbd/2-upgrade-to-dumpling/upgrade.yaml deleted file mode 100644 index e3e332c4f22..00000000000 --- a/suites/upgrade/old/rbd/rbd/2-upgrade-to-dumpling/upgrade.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: dumpling diff --git a/suites/upgrade/old/rbd/rbd/3-restart/mon-osd-mds.yaml b/suites/upgrade/old/rbd/rbd/3-restart/mon-osd-mds.yaml deleted file mode 100644 index 31a79e45938..00000000000 --- a/suites/upgrade/old/rbd/rbd/3-restart/mon-osd-mds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a] diff --git a/suites/upgrade/old/rbd/rbd/3-restart/osd-mon-mds.yaml b/suites/upgrade/old/rbd/rbd/3-restart/osd-mon-mds.yaml deleted file mode 100644 index e8fe288f657..00000000000 --- a/suites/upgrade/old/rbd/rbd/3-restart/osd-mon-mds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a] diff --git a/suites/upgrade/old/rbd/rbd/4-dumpling-workload/api.yaml b/suites/upgrade/old/rbd/rbd/4-dumpling-workload/api.yaml deleted file mode 100644 index bbcde3e1559..00000000000 --- a/suites/upgrade/old/rbd/rbd/4-dumpling-workload/api.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd.sh diff --git a/suites/upgrade/old/rbd/rbd/4-dumpling-workload/cls.yaml b/suites/upgrade/old/rbd/rbd/4-dumpling-workload/cls.yaml deleted file mode 100644 index c8079e3dcdb..00000000000 --- a/suites/upgrade/old/rbd/rbd/4-dumpling-workload/cls.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - cls/test_cls_rbd.sh diff --git a/suites/upgrade/old/rbd/rbd/4-dumpling-workload/import-export.yaml b/suites/upgrade/old/rbd/rbd/4-dumpling-workload/import-export.yaml deleted file mode 100644 index 364ef25f31c..00000000000 --- a/suites/upgrade/old/rbd/rbd/4-dumpling-workload/import-export.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/upgrade/old/rbd/rbd/4-dumpling-workload/python.yaml b/suites/upgrade/old/rbd/rbd/4-dumpling-workload/python.yaml deleted file mode 100644 index 737a821f776..00000000000 --- a/suites/upgrade/old/rbd/rbd/4-dumpling-workload/python.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rbd/test_librbd_python.sh diff --git a/suites/upgrade/old/rbd/rbd/5-upgrade-to-emperor/upgrade.yaml b/suites/upgrade/old/rbd/rbd/5-upgrade-to-emperor/upgrade.yaml deleted file mode 100644 index e473f31862d..00000000000 --- a/suites/upgrade/old/rbd/rbd/5-upgrade-to-emperor/upgrade.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: emperor diff --git a/suites/upgrade/old/rbd/rbd/6-restart/mon-osd-mds.yaml b/suites/upgrade/old/rbd/rbd/6-restart/mon-osd-mds.yaml deleted file mode 100644 index 31a79e45938..00000000000 --- a/suites/upgrade/old/rbd/rbd/6-restart/mon-osd-mds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a] diff --git a/suites/upgrade/old/rbd/rbd/6-restart/osd-mon-mds.yaml b/suites/upgrade/old/rbd/rbd/6-restart/osd-mon-mds.yaml deleted file mode 100644 index e8fe288f657..00000000000 --- a/suites/upgrade/old/rbd/rbd/6-restart/osd-mon-mds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a] diff --git a/suites/upgrade/old/rbd/rbd/7-emperor-workload/import_export.yaml b/suites/upgrade/old/rbd/rbd/7-emperor-workload/import_export.yaml deleted file mode 100644 index e29788766b6..00000000000 --- a/suites/upgrade/old/rbd/rbd/7-emperor-workload/import_export.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- workunit: - branch: emperor - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/upgrade/old/rgw-double/rgw-double/% b/suites/upgrade/old/rgw-double/rgw-double/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/rgw-double/rgw-double/0-cluster/start.yaml b/suites/upgrade/old/rgw-double/rgw-double/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/rgw-double/rgw-double/1-bobtail-install/bobtail.yaml b/suites/upgrade/old/rgw-double/rgw-double/1-bobtail-install/bobtail.yaml deleted file mode 100644 index ca81c710bd1..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/1-bobtail-install/bobtail.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install: - branch: bobtail -- ceph: -- rgw: diff --git a/suites/upgrade/old/rgw-double/rgw-double/2-bobtail-workload/s3tests.yaml b/suites/upgrade/old/rgw-double/rgw-double/2-bobtail-workload/s3tests.yaml deleted file mode 100644 index 7397ae6873e..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/2-bobtail-workload/s3tests.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- s3tests: - client.0: - force-branch: bobtail - rgw_server: client.0 diff --git a/suites/upgrade/old/rgw-double/rgw-double/3-upgrade/dumpling.yaml b/suites/upgrade/old/rgw-double/rgw-double/3-upgrade/dumpling.yaml deleted file mode 100644 index e3e332c4f22..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/3-upgrade/dumpling.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: dumpling diff --git a/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_mon_mds_osd.yaml b/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_mon_mds_osd.yaml deleted file mode 100644 index 86665905d67..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_mon_mds_osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0] diff --git a/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_osd_mds_mon.yaml b/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_osd_mds_mon.yaml deleted file mode 100644 index 425cf6082a3..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/4-restart/upgrade_osd_mds_mon.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c, rgw.client.0] diff --git a/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/readwrite.yaml b/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/readwrite.yaml deleted file mode 100644 index d3166f117da..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/readwrite.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- s3readwrite: - client.0: - rgw_server: client.0 - readwrite: - bucket: rwtest - readers: 10 - writers: 3 - duration: 300 - files: - num: 10 - size: 2000 - stddev: 500 diff --git a/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/s3tests.yaml b/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/s3tests.yaml deleted file mode 100644 index 6506960f73a..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/s3tests.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- s3tests: - client.0: - force-branch: dumpling - rgw_server: client.0 diff --git a/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/swift.yaml b/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/swift.yaml deleted file mode 100644 index 45e2fc9cc30..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/5-dumpling-workload/swift.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- swift: - client.0: - rgw_server: client.0 diff --git a/suites/upgrade/old/rgw-double/rgw-double/6-upgrade-next/next.yaml b/suites/upgrade/old/rgw-double/rgw-double/6-upgrade-next/next.yaml deleted file mode 100644 index bb34346801c..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/6-upgrade-next/next.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- install.upgrade: - all: diff --git a/suites/upgrade/old/rgw-double/rgw-double/7-restart/mon-mds-osd.yaml b/suites/upgrade/old/rgw-double/rgw-double/7-restart/mon-mds-osd.yaml deleted file mode 100644 index 86665905d67..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/7-restart/mon-mds-osd.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0] diff --git a/suites/upgrade/old/rgw-double/rgw-double/7-restart/osd-mds-mon.yaml b/suites/upgrade/old/rgw-double/rgw-double/7-restart/osd-mds-mon.yaml deleted file mode 100644 index 425cf6082a3..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/7-restart/osd-mds-mon.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c, rgw.client.0] diff --git a/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/readwrite.yaml b/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/readwrite.yaml deleted file mode 100644 index d3166f117da..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/readwrite.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- s3readwrite: - client.0: - rgw_server: client.0 - readwrite: - bucket: rwtest - readers: 10 - writers: 3 - duration: 300 - files: - num: 10 - size: 2000 - stddev: 500 diff --git a/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/s3tests.yaml b/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/s3tests.yaml deleted file mode 100644 index 573cffbc30a..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/s3tests.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- s3tests: - client.0: - rgw_server: client.0 diff --git a/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/swift.yaml b/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/swift.yaml deleted file mode 100644 index 45e2fc9cc30..00000000000 --- a/suites/upgrade/old/rgw-double/rgw-double/8-next-workload/swift.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- swift: - client.0: - rgw_server: client.0 diff --git a/suites/upgrade/old/rgw/rgw/% b/suites/upgrade/old/rgw/rgw/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/rgw/rgw/0-cluster/start.yaml b/suites/upgrade/old/rgw/rgw/0-cluster/start.yaml deleted file mode 100644 index 8b1ebbe2c36..00000000000 --- a/suites/upgrade/old/rgw/rgw/0-cluster/start.yaml +++ /dev/null @@ -1,20 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 - - client.1 -- - client.0 -tasks: -- install: - branch: bobtail -- ceph: - conf: - client: - client mount timeout: 600 - rgw init timeout: 600 -- rgw: [client.0] diff --git a/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3readwrite.yaml b/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3readwrite.yaml deleted file mode 100644 index d3166f117da..00000000000 --- a/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3readwrite.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- s3readwrite: - client.0: - rgw_server: client.0 - readwrite: - bucket: rwtest - readers: 10 - writers: 3 - duration: 300 - files: - num: 10 - size: 2000 - stddev: 500 diff --git a/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3tests.yaml b/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3tests.yaml deleted file mode 100644 index 8020d793c37..00000000000 --- a/suites/upgrade/old/rgw/rgw/1-bobtail-workload/s3tests.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- s3tests: - client.0: - rgw_server: client.0 - force-branch: bobtail diff --git a/suites/upgrade/old/rgw/rgw/1-bobtail-workload/swift.yaml b/suites/upgrade/old/rgw/rgw/1-bobtail-workload/swift.yaml deleted file mode 100644 index 45e2fc9cc30..00000000000 --- a/suites/upgrade/old/rgw/rgw/1-bobtail-workload/swift.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- swift: - client.0: - rgw_server: client.0 diff --git a/suites/upgrade/old/rgw/rgw/2-upgrade-to-dumpling/upgrade.yaml b/suites/upgrade/old/rgw/rgw/2-upgrade-to-dumpling/upgrade.yaml deleted file mode 100644 index e3e332c4f22..00000000000 --- a/suites/upgrade/old/rgw/rgw/2-upgrade-to-dumpling/upgrade.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: dumpling diff --git a/suites/upgrade/old/rgw/rgw/3-restart/mon-osd-mds.yaml b/suites/upgrade/old/rgw/rgw/3-restart/mon-osd-mds.yaml deleted file mode 100644 index ea8a58ccdae..00000000000 --- a/suites/upgrade/old/rgw/rgw/3-restart/mon-osd-mds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a, rgw.client.0] diff --git a/suites/upgrade/old/rgw/rgw/3-restart/osd-mon-mds.yaml b/suites/upgrade/old/rgw/rgw/3-restart/osd-mon-mds.yaml deleted file mode 100644 index f9606ef70cc..00000000000 --- a/suites/upgrade/old/rgw/rgw/3-restart/osd-mon-mds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0] diff --git a/suites/upgrade/old/rgw/rgw/4-dumpling-workload/readwrite.yaml b/suites/upgrade/old/rgw/rgw/4-dumpling-workload/readwrite.yaml deleted file mode 100644 index d3166f117da..00000000000 --- a/suites/upgrade/old/rgw/rgw/4-dumpling-workload/readwrite.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- s3readwrite: - client.0: - rgw_server: client.0 - readwrite: - bucket: rwtest - readers: 10 - writers: 3 - duration: 300 - files: - num: 10 - size: 2000 - stddev: 500 diff --git a/suites/upgrade/old/rgw/rgw/4-dumpling-workload/s3tests.yaml b/suites/upgrade/old/rgw/rgw/4-dumpling-workload/s3tests.yaml deleted file mode 100644 index 6506960f73a..00000000000 --- a/suites/upgrade/old/rgw/rgw/4-dumpling-workload/s3tests.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- s3tests: - client.0: - force-branch: dumpling - rgw_server: client.0 diff --git a/suites/upgrade/old/rgw/rgw/4-dumpling-workload/swift.yaml b/suites/upgrade/old/rgw/rgw/4-dumpling-workload/swift.yaml deleted file mode 100644 index 45e2fc9cc30..00000000000 --- a/suites/upgrade/old/rgw/rgw/4-dumpling-workload/swift.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- swift: - client.0: - rgw_server: client.0 diff --git a/suites/upgrade/old/rgw/rgw/5-upgrade-to-emperor/upgrade.yaml b/suites/upgrade/old/rgw/rgw/5-upgrade-to-emperor/upgrade.yaml deleted file mode 100644 index e473f31862d..00000000000 --- a/suites/upgrade/old/rgw/rgw/5-upgrade-to-emperor/upgrade.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: emperor diff --git a/suites/upgrade/old/rgw/rgw/6-restart/mon-osd-mds.yaml b/suites/upgrade/old/rgw/rgw/6-restart/mon-osd-mds.yaml deleted file mode 100644 index ea8a58ccdae..00000000000 --- a/suites/upgrade/old/rgw/rgw/6-restart/mon-osd-mds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a, rgw.client.0] diff --git a/suites/upgrade/old/rgw/rgw/6-restart/osd-mon-mds.yaml b/suites/upgrade/old/rgw/rgw/6-restart/osd-mon-mds.yaml deleted file mode 100644 index f9606ef70cc..00000000000 --- a/suites/upgrade/old/rgw/rgw/6-restart/osd-mon-mds.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0] diff --git a/suites/upgrade/old/rgw/rgw/7-emperor-workload/readwrite.yaml b/suites/upgrade/old/rgw/rgw/7-emperor-workload/readwrite.yaml deleted file mode 100644 index d3166f117da..00000000000 --- a/suites/upgrade/old/rgw/rgw/7-emperor-workload/readwrite.yaml +++ /dev/null @@ -1,13 +0,0 @@ -tasks: -- s3readwrite: - client.0: - rgw_server: client.0 - readwrite: - bucket: rwtest - readers: 10 - writers: 3 - duration: 300 - files: - num: 10 - size: 2000 - stddev: 500 diff --git a/suites/upgrade/old/rgw/rgw/7-emperor-workload/s3tests.yaml b/suites/upgrade/old/rgw/rgw/7-emperor-workload/s3tests.yaml deleted file mode 100644 index cc9675c2ade..00000000000 --- a/suites/upgrade/old/rgw/rgw/7-emperor-workload/s3tests.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- s3tests: - client.0: - force-branch: emperor - rgw_server: client.0 diff --git a/suites/upgrade/old/rgw/rgw/7-emperor-workload/swift.yaml b/suites/upgrade/old/rgw/rgw/7-emperor-workload/swift.yaml deleted file mode 100644 index 45e2fc9cc30..00000000000 --- a/suites/upgrade/old/rgw/rgw/7-emperor-workload/swift.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- swift: - client.0: - rgw_server: client.0 diff --git a/suites/upgrade/old/small/fs/% b/suites/upgrade/old/small/fs/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/small/fs/0-cluster/start.yaml b/suites/upgrade/old/small/fs/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/small/fs/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/small/fs/1-dumpling-install/dumpling.yaml b/suites/upgrade/old/small/fs/1-dumpling-install/dumpling.yaml deleted file mode 100644 index d99595e47de..00000000000 --- a/suites/upgrade/old/small/fs/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - fs: xfs -- ceph-fuse: diff --git a/suites/upgrade/old/small/fs/2-workload/blogbench.yaml b/suites/upgrade/old/small/fs/2-workload/blogbench.yaml deleted file mode 100644 index edf71708b6d..00000000000 --- a/suites/upgrade/old/small/fs/2-workload/blogbench.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- workunit: - clients: - all: - - suites/blogbench.sh diff --git a/suites/upgrade/old/small/fs/3-upgrade-sequence/upgrade-all.yaml b/suites/upgrade/old/small/fs/3-upgrade-sequence/upgrade-all.yaml deleted file mode 100644 index e473f31862d..00000000000 --- a/suites/upgrade/old/small/fs/3-upgrade-sequence/upgrade-all.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: emperor diff --git a/suites/upgrade/old/small/fs/4-restart/restart.yaml b/suites/upgrade/old/small/fs/4-restart/restart.yaml deleted file mode 100644 index 4290b2b9f98..00000000000 --- a/suites/upgrade/old/small/fs/4-restart/restart.yaml +++ /dev/null @@ -1,3 +0,0 @@ -tasks: -- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3] - diff --git a/suites/upgrade/old/small/fs/5-emperor-workload/emperor.yaml b/suites/upgrade/old/small/fs/5-emperor-workload/emperor.yaml deleted file mode 100644 index 3f54d542976..00000000000 --- a/suites/upgrade/old/small/fs/5-emperor-workload/emperor.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: emperor - clients: - client.0: - - suites/dbench.sh diff --git a/suites/upgrade/old/small/fs/distro b/suites/upgrade/old/small/fs/distro deleted file mode 120000 index 3a0ac71c8af..00000000000 --- a/suites/upgrade/old/small/fs/distro +++ /dev/null @@ -1 +0,0 @@ -../rados/distro \ No newline at end of file diff --git a/suites/upgrade/old/small/rados/% b/suites/upgrade/old/small/rados/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/small/rados/0-cluster/start.yaml b/suites/upgrade/old/small/rados/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/small/rados/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/small/rados/1-dumpling-install/dumpling.yaml b/suites/upgrade/old/small/rados/1-dumpling-install/dumpling.yaml deleted file mode 100644 index c98631e2bbd..00000000000 --- a/suites/upgrade/old/small/rados/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - fs: xfs diff --git a/suites/upgrade/old/small/rados/2-workload/loadgenbig.yaml b/suites/upgrade/old/small/rados/2-workload/loadgenbig.yaml deleted file mode 100644 index 9c5c2c71786..00000000000 --- a/suites/upgrade/old/small/rados/2-workload/loadgenbig.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - all: - - rados/load-gen-big.sh diff --git a/suites/upgrade/old/small/rados/3-upgrade-sequence/upgrade.yaml b/suites/upgrade/old/small/rados/3-upgrade-sequence/upgrade.yaml deleted file mode 100644 index e473f31862d..00000000000 --- a/suites/upgrade/old/small/rados/3-upgrade-sequence/upgrade.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: emperor diff --git a/suites/upgrade/old/small/rados/4-restart/restart.yaml b/suites/upgrade/old/small/rados/4-restart/restart.yaml deleted file mode 100644 index 78e14e9472a..00000000000 --- a/suites/upgrade/old/small/rados/4-restart/restart.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3] diff --git a/suites/upgrade/old/small/rados/5-emperor-workload/emperor.yaml b/suites/upgrade/old/small/rados/5-emperor-workload/emperor.yaml deleted file mode 100644 index 58d439c7da9..00000000000 --- a/suites/upgrade/old/small/rados/5-emperor-workload/emperor.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: emperor - clients: - client.0: - - rados/test.sh diff --git a/suites/upgrade/old/small/rados/distro/centos_6.4.yaml b/suites/upgrade/old/small/rados/distro/centos_6.4.yaml deleted file mode 100644 index 02383cd5f8c..00000000000 --- a/suites/upgrade/old/small/rados/distro/centos_6.4.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: centos -os_version: "6.4" diff --git a/suites/upgrade/old/small/rados/distro/debian_7.0.yaml b/suites/upgrade/old/small/rados/distro/debian_7.0.yaml deleted file mode 100644 index 8100dc41e3d..00000000000 --- a/suites/upgrade/old/small/rados/distro/debian_7.0.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: debian -os_version: "7.0" diff --git a/suites/upgrade/old/small/rados/distro/fedora_18.yaml b/suites/upgrade/old/small/rados/distro/fedora_18.yaml deleted file mode 100644 index 07872aa7edf..00000000000 --- a/suites/upgrade/old/small/rados/distro/fedora_18.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: fedora -os_version: "18" diff --git a/suites/upgrade/old/small/rados/distro/rhel_6.3.yaml b/suites/upgrade/old/small/rados/distro/rhel_6.3.yaml deleted file mode 100644 index 6a8edcd5626..00000000000 --- a/suites/upgrade/old/small/rados/distro/rhel_6.3.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: rhel -os_version: "6.3" diff --git a/suites/upgrade/old/small/rados/distro/rhel_6.4.yaml b/suites/upgrade/old/small/rados/distro/rhel_6.4.yaml deleted file mode 100644 index 5225495834a..00000000000 --- a/suites/upgrade/old/small/rados/distro/rhel_6.4.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: rhel -os_version: "6.4" diff --git a/suites/upgrade/old/small/rados/distro/ubuntu_12.04.yaml b/suites/upgrade/old/small/rados/distro/ubuntu_12.04.yaml deleted file mode 100644 index dbc3a8d9c58..00000000000 --- a/suites/upgrade/old/small/rados/distro/ubuntu_12.04.yaml +++ /dev/null @@ -1,2 +0,0 @@ -os_type: ubuntu -os_version: "12.04" diff --git a/suites/upgrade/old/small/rbd/% b/suites/upgrade/old/small/rbd/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/small/rbd/0-cluster/start.yaml b/suites/upgrade/old/small/rbd/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/small/rbd/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/small/rbd/1-dumpling-install/dumpling.yaml b/suites/upgrade/old/small/rbd/1-dumpling-install/dumpling.yaml deleted file mode 100644 index c98631e2bbd..00000000000 --- a/suites/upgrade/old/small/rbd/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - fs: xfs diff --git a/suites/upgrade/old/small/rbd/2-workload/workload.yaml b/suites/upgrade/old/small/rbd/2-workload/workload.yaml deleted file mode 100644 index 364ef25f31c..00000000000 --- a/suites/upgrade/old/small/rbd/2-workload/workload.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- workunit: - branch: dumpling - clients: - client.0: - - rbd/import_export.sh - env: - RBD_CREATE_ARGS: --new-format diff --git a/suites/upgrade/old/small/rbd/3-upgrade-sequence/upgrade-all.yaml b/suites/upgrade/old/small/rbd/3-upgrade-sequence/upgrade-all.yaml deleted file mode 100644 index e473f31862d..00000000000 --- a/suites/upgrade/old/small/rbd/3-upgrade-sequence/upgrade-all.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: emperor diff --git a/suites/upgrade/old/small/rbd/4-restart/restart.yaml b/suites/upgrade/old/small/rbd/4-restart/restart.yaml deleted file mode 100644 index e8fe288f657..00000000000 --- a/suites/upgrade/old/small/rbd/4-restart/restart.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a] diff --git a/suites/upgrade/old/small/rbd/5-emperor-workload/final.yaml b/suites/upgrade/old/small/rbd/5-emperor-workload/final.yaml deleted file mode 100644 index 19b5bd3b557..00000000000 --- a/suites/upgrade/old/small/rbd/5-emperor-workload/final.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- workunit: - branch: emperor - clients: - client.0: - - cls/test_cls_rbd.sh diff --git a/suites/upgrade/old/small/rbd/distro b/suites/upgrade/old/small/rbd/distro deleted file mode 120000 index 3a0ac71c8af..00000000000 --- a/suites/upgrade/old/small/rbd/distro +++ /dev/null @@ -1 +0,0 @@ -../rados/distro \ No newline at end of file diff --git a/suites/upgrade/old/small/rgw/% b/suites/upgrade/old/small/rgw/% deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/suites/upgrade/old/small/rgw/0-cluster/start.yaml b/suites/upgrade/old/small/rgw/0-cluster/start.yaml deleted file mode 100644 index 01747e42056..00000000000 --- a/suites/upgrade/old/small/rgw/0-cluster/start.yaml +++ /dev/null @@ -1,10 +0,0 @@ -roles: -- - mon.a - - mds.a - - osd.0 - - osd.1 -- - mon.b - - mon.c - - osd.2 - - osd.3 -- - client.0 diff --git a/suites/upgrade/old/small/rgw/1-dumpling-install/dumpling.yaml b/suites/upgrade/old/small/rgw/1-dumpling-install/dumpling.yaml deleted file mode 100644 index fe05a61ff31..00000000000 --- a/suites/upgrade/old/small/rgw/1-dumpling-install/dumpling.yaml +++ /dev/null @@ -1,6 +0,0 @@ -tasks: -- install: - branch: dumpling -- ceph: - fs: xfs -- rgw: [client.0] diff --git a/suites/upgrade/old/small/rgw/2-workload/s3tests.yaml b/suites/upgrade/old/small/rgw/2-workload/s3tests.yaml deleted file mode 100644 index 6e7449ebcd8..00000000000 --- a/suites/upgrade/old/small/rgw/2-workload/s3tests.yaml +++ /dev/null @@ -1,5 +0,0 @@ -tasks: -- s3tests: - client.0: - rgw_server: client.0 - force-branch: dumpling diff --git a/suites/upgrade/old/small/rgw/3-upgrade-sequence/upgrade-all.yaml b/suites/upgrade/old/small/rgw/3-upgrade-sequence/upgrade-all.yaml deleted file mode 100644 index e473f31862d..00000000000 --- a/suites/upgrade/old/small/rgw/3-upgrade-sequence/upgrade-all.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- install.upgrade: - all: - branch: emperor diff --git a/suites/upgrade/old/small/rgw/4-restart/restart.yaml b/suites/upgrade/old/small/rgw/4-restart/restart.yaml deleted file mode 100644 index f9606ef70cc..00000000000 --- a/suites/upgrade/old/small/rgw/4-restart/restart.yaml +++ /dev/null @@ -1,2 +0,0 @@ -tasks: -- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0] diff --git a/suites/upgrade/old/small/rgw/5-emperor-workload/final.yaml b/suites/upgrade/old/small/rgw/5-emperor-workload/final.yaml deleted file mode 100644 index 573cffbc30a..00000000000 --- a/suites/upgrade/old/small/rgw/5-emperor-workload/final.yaml +++ /dev/null @@ -1,4 +0,0 @@ -tasks: -- s3tests: - client.0: - rgw_server: client.0 diff --git a/suites/upgrade/old/small/rgw/distro b/suites/upgrade/old/small/rgw/distro deleted file mode 120000 index 3a0ac71c8af..00000000000 --- a/suites/upgrade/old/small/rgw/distro +++ /dev/null @@ -1 +0,0 @@ -../rados/distro \ No newline at end of file diff --git a/suites/workload/cifs-dbench.yaml b/suites/workload/cifs-dbench.yaml deleted file mode 100644 index c13c1c099e5..00000000000 --- a/suites/workload/cifs-dbench.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- cifs-mount: - client.1: - share: ceph -- workunit: - clients: - client.1: - - suites/dbench.sh diff --git a/suites/workload/cifs-fsstress.yaml b/suites/workload/cifs-fsstress.yaml deleted file mode 100644 index ff003af3433..00000000000 --- a/suites/workload/cifs-fsstress.yaml +++ /dev/null @@ -1,8 +0,0 @@ -tasks: -- cifs-mount: - client.1: - share: ceph -- workunit: - clients: - client.1: - - suites/fsstress.sh diff --git a/suites/workload/cifs-kernel-build.yaml.disabled b/suites/workload/cifs-kernel-build.yaml.disabled deleted file mode 100644 index ab9ff8ac731..00000000000 --- a/suites/workload/cifs-kernel-build.yaml.disabled +++ /dev/null @@ -1,9 +0,0 @@ -tasks: -- cifs-mount: - client.1: - share: ceph -- workunit: - clients: - client.1: - - kernel_untar_build.sh - diff --git a/suites/workload/smbtorture.yaml b/suites/workload/smbtorture.yaml deleted file mode 100644 index 823489a2082..00000000000 --- a/suites/workload/smbtorture.yaml +++ /dev/null @@ -1,39 +0,0 @@ -tasks: -- pexec: - client.1: - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1 - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb -# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon -# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number - - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl -# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid diff --git a/tasks/__init__.py b/tasks/__init__.py deleted file mode 100644 index 9a7949a001e..00000000000 --- a/tasks/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -import logging - -# Inherit teuthology's log level -teuthology_log = logging.getLogger('teuthology') -log = logging.getLogger(__name__) -log.setLevel(teuthology_log.level) diff --git a/tasks/admin_socket.py b/tasks/admin_socket.py deleted file mode 100644 index 71f631ad7e5..00000000000 --- a/tasks/admin_socket.py +++ /dev/null @@ -1,192 +0,0 @@ -""" -Admin Socket task -- used in rados, powercycle, and smoke testing -""" -from cStringIO import StringIO - -import json -import logging -import os -import time - -from teuthology.orchestra import run -from teuthology import misc as teuthology -from teuthology.parallel import parallel - -log = logging.getLogger(__name__) - - -def task(ctx, config): - """ - Run an admin socket command, make sure the output is json, and run - a test program on it. The test program should read json from - stdin. This task succeeds if the test program exits with status 0. - - To run the same test on all clients:: - - tasks: - - ceph: - - rados: - - admin_socket: - all: - dump_requests: - test: http://example.com/script - - To restrict it to certain clients:: - - tasks: - - ceph: - - rados: [client.1] - - admin_socket: - client.1: - dump_requests: - test: http://example.com/script - - If an admin socket command has arguments, they can be specified as - a list:: - - tasks: - - ceph: - - rados: [client.0] - - admin_socket: - client.0: - dump_requests: - test: http://example.com/script - help: - test: http://example.com/test_help_version - args: [version] - - Note that there must be a ceph client with an admin socket running - before this task is run. The tests are parallelized at the client - level. Tests for a single client are run serially. - - :param ctx: Context - :param config: Configuration - """ - assert isinstance(config, dict), \ - 'admin_socket task requires a dict for configuration' - teuthology.replace_all_with_clients(ctx.cluster, config) - - with parallel() as ptask: - for client, tests in config.iteritems(): - ptask.spawn(_run_tests, ctx, client, tests) - - -def _socket_command(ctx, remote, socket_path, command, args): - """ - Run an admin socket command and return the result as a string. - - :param ctx: Context - :param remote: Remote site - :param socket_path: path to socket - :param command: command to be run remotely - :param args: command arguments - - :returns: output of command in json format - """ - json_fp = StringIO() - testdir = teuthology.get_testdir(ctx) - max_tries = 60 - while True: - proc = remote.run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph', - '--admin-daemon', socket_path, - ] + command.split(' ') + args, - stdout=json_fp, - check_status=False, - ) - if proc.exitstatus == 0: - break - assert max_tries > 0 - max_tries -= 1 - log.info('ceph cli returned an error, command not registered yet?') - log.info('sleeping and retrying ...') - time.sleep(1) - out = json_fp.getvalue() - json_fp.close() - log.debug('admin socket command %s returned %s', command, out) - return json.loads(out) - -def _run_tests(ctx, client, tests): - """ - Create a temp directory and wait for a client socket to be created. - For each test, copy the executable locally and run the test. - Remove temp directory when finished. - - :param ctx: Context - :param client: client machine to run the test - :param tests: list of tests to run - """ - testdir = teuthology.get_testdir(ctx) - log.debug('Running admin socket tests on %s', client) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() - socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client) - overrides = ctx.config.get('overrides', {}).get('admin_socket', {}) - - try: - tmp_dir = os.path.join( - testdir, - 'admin_socket_{client}'.format(client=client), - ) - remote.run( - args=[ - 'mkdir', - '--', - tmp_dir, - run.Raw('&&'), - # wait for client process to create the socket - 'while', 'test', '!', '-e', socket_path, run.Raw(';'), - 'do', 'sleep', '1', run.Raw(';'), 'done', - ], - ) - - for command, config in tests.iteritems(): - if config is None: - config = {} - teuthology.deep_merge(config, overrides) - log.debug('Testing %s with config %s', command, str(config)) - - test_path = None - if 'test' in config: - url = config['test'].format( - branch=config.get('branch', 'master') - ) - test_path = os.path.join(tmp_dir, command) - remote.run( - args=[ - 'wget', - '-q', - '-O', - test_path, - '--', - url, - run.Raw('&&'), - 'chmod', - 'u=rx', - '--', - test_path, - ], - ) - - args = config.get('args', []) - assert isinstance(args, list), \ - 'admin socket command args must be a list' - sock_out = _socket_command(ctx, remote, socket_path, command, args) - if test_path is not None: - remote.run( - args=[ - test_path, - ], - stdin=json.dumps(sock_out), - ) - - finally: - remote.run( - args=[ - 'rm', '-rf', '--', tmp_dir, - ], - ) diff --git a/tasks/apache.conf.template b/tasks/apache.conf.template deleted file mode 100644 index ed61bfc5f90..00000000000 --- a/tasks/apache.conf.template +++ /dev/null @@ -1,59 +0,0 @@ - - LoadModule version_module {mod_path}/mod_version.so - - - LoadModule env_module {mod_path}/mod_env.so - - - LoadModule rewrite_module {mod_path}/mod_rewrite.so - - - LoadModule fastcgi_module {mod_path}/mod_fastcgi.so - - - LoadModule log_config_module {mod_path}/mod_log_config.so - - -Listen {port} -ServerName {host} - -= 2.4> - - LoadModule unixd_module {mod_path}/mod_unixd.so - - - LoadModule authz_core_module {mod_path}/mod_authz_core.so - - - LoadModule mpm_worker_module {mod_path}/mod_mpm_worker.so - - User {user} - Group {group} - - -ServerRoot {testdir}/apache -ErrorLog {testdir}/archive/apache.{client}/error.log -LogFormat "%h l %u %t \"%r\" %>s %b \"{{Referer}}i\" \"%{{User-agent}}i\"" combined -CustomLog {testdir}/archive/apache.{client}/access.log combined -PidFile {testdir}/apache/tmp.{client}/apache.pid -DocumentRoot {testdir}/apache/htdocs.{client} -FastCgiIPCDir {testdir}/apache/tmp.{client}/fastcgi_sock -FastCgiExternalServer {testdir}/apache/htdocs.{client}/rgw.fcgi -socket rgw_sock -idle-timeout {idle_timeout} -RewriteEngine On - -RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /rgw.fcgi?page=$1¶ms=$2&%{{QUERY_STRING}} [E=HTTP_AUTHORIZATION:%{{HTTP:Authorization}},L] - -# Set fastcgi environment variables. -# Note that this is separate from Unix environment variables! -SetEnv RGW_LOG_LEVEL 20 -SetEnv RGW_SHOULD_LOG yes -SetEnv RGW_PRINT_CONTINUE {print_continue} - - - Options +ExecCGI - AllowOverride All - SetHandler fastcgi-script - - -AllowEncodedSlashes On -ServerSignature Off diff --git a/tasks/autotest.py b/tasks/autotest.py deleted file mode 100644 index efa972123d2..00000000000 --- a/tasks/autotest.py +++ /dev/null @@ -1,166 +0,0 @@ -""" -Run an autotest test on the ceph cluster. -""" -import json -import logging -import os - -from teuthology import misc as teuthology -from teuthology.parallel import parallel -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Run an autotest test on the ceph cluster. - - Only autotest client tests are supported. - - The config is a mapping from role name to list of tests to run on - that client. - - For example:: - - tasks: - - ceph: - - ceph-fuse: [client.0, client.1] - - autotest: - client.0: [dbench] - client.1: [bonnie] - - You can also specify a list of tests to run on all clients:: - - tasks: - - ceph: - - ceph-fuse: - - autotest: - all: [dbench] - """ - assert isinstance(config, dict) - config = teuthology.replace_all_with_clients(ctx.cluster, config) - log.info('Setting up autotest...') - testdir = teuthology.get_testdir(ctx) - with parallel() as p: - for role in config.iterkeys(): - (remote,) = ctx.cluster.only(role).remotes.keys() - p.spawn(_download, testdir, remote) - - log.info('Making a separate scratch dir for every client...') - for role in config.iterkeys(): - assert isinstance(role, basestring) - PREFIX = 'client.' - assert role.startswith(PREFIX) - id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) - remote.run( - args=[ - 'sudo', - 'install', - '-d', - '-m', '0755', - '--owner={user}'.format(user='ubuntu'), #TODO - '--', - scratch, - ], - ) - - with parallel() as p: - for role, tests in config.iteritems(): - (remote,) = ctx.cluster.only(role).remotes.keys() - p.spawn(_run_tests, testdir, remote, role, tests) - -def _download(testdir, remote): - """ - Download. Does not explicitly support muliple tasks in a single run. - """ - remote.run( - args=[ - # explicitly does not support multiple autotest tasks - # in a single run; the result archival would conflict - 'mkdir', '{tdir}/archive/autotest'.format(tdir=testdir), - run.Raw('&&'), - 'mkdir', '{tdir}/autotest'.format(tdir=testdir), - run.Raw('&&'), - 'wget', - '-nv', - '--no-check-certificate', - 'https://github.com/ceph/autotest/tarball/ceph', - '-O-', - run.Raw('|'), - 'tar', - '-C', '{tdir}/autotest'.format(tdir=testdir), - '-x', - '-z', - '-f-', - '--strip-components=1', - ], - ) - -def _run_tests(testdir, remote, role, tests): - """ - Spawned to run test on remote site - """ - assert isinstance(role, basestring) - PREFIX = 'client.' - assert role.startswith(PREFIX) - id_ = role[len(PREFIX):] - mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) - - assert isinstance(tests, list) - for idx, testname in enumerate(tests): - log.info('Running autotest client test #%d: %s...', idx, testname) - - tag = 'client.{id}.num{idx}.{testname}'.format( - idx=idx, - testname=testname, - id=id_, - ) - control = '{tdir}/control.{tag}'.format(tdir=testdir, tag=tag) - teuthology.write_file( - remote=remote, - path=control, - data='import json; data=json.loads({data!r}); job.run_test(**data)'.format( - data=json.dumps(dict( - url=testname, - dir=scratch, - # TODO perhaps tag - # results will be in {testdir}/autotest/client/results/dbench - # or {testdir}/autotest/client/results/dbench.{tag} - )), - ), - ) - remote.run( - args=[ - '{tdir}/autotest/client/bin/autotest'.format(tdir=testdir), - '--verbose', - '--harness=simple', - '--tag={tag}'.format(tag=tag), - control, - run.Raw('3>&1'), - ], - ) - - remote.run( - args=[ - 'rm', '-rf', '--', control, - ], - ) - - remote.run( - args=[ - 'mv', - '--', - '{tdir}/autotest/client/results/{tag}'.format(tdir=testdir, tag=tag), - '{tdir}/archive/autotest/{tag}'.format(tdir=testdir, tag=tag), - ], - ) - - remote.run( - args=[ - 'rm', '-rf', '--', '{tdir}/autotest'.format(tdir=testdir), - ], - ) diff --git a/tasks/blktrace.py b/tasks/blktrace.py deleted file mode 100644 index 401f9e39f64..00000000000 --- a/tasks/blktrace.py +++ /dev/null @@ -1,93 +0,0 @@ -""" -Run blktrace program through teuthology -""" -import contextlib -import logging - -from teuthology import misc as teuthology -from teuthology import contextutil -from teuthology.orchestra import run - -log = logging.getLogger(__name__) -blktrace = '/usr/sbin/blktrace' -daemon_signal = 'term' - -@contextlib.contextmanager -def setup(ctx, config): - """ - Setup all the remotes - """ - osds = ctx.cluster.only(teuthology.is_type('osd')) - log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx)) - - for remote, roles_for_host in osds.remotes.iteritems(): - log.info('Creating %s on %s' % (log_dir, remote.name)) - remote.run( - args=['mkdir', '-p', '-m0755', '--', log_dir], - wait=False, - ) - yield - -@contextlib.contextmanager -def execute(ctx, config): - """ - Run the blktrace program on remote machines. - """ - procs = [] - testdir = teuthology.get_testdir(ctx) - log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir) - - osds = ctx.cluster.only(teuthology.is_type('osd')) - for remote, roles_for_host in osds.remotes.iteritems(): - roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote] - for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): - if roles_to_devs.get(id_): - dev = roles_to_devs[id_] - log.info("running blktrace on %s: %s" % (remote.name, dev)) - - proc = remote.run( - args=[ - 'cd', - log_dir, - run.Raw(';'), - 'daemon-helper', - daemon_signal, - 'sudo', - blktrace, - '-o', - dev.rsplit("/", 1)[1], - '-d', - dev, - ], - wait=False, - stdin=run.PIPE, - ) - procs.append(proc) - try: - yield - finally: - osds = ctx.cluster.only(teuthology.is_type('osd')) - log.info('stopping blktrace processs') - for proc in procs: - proc.stdin.close() - -@contextlib.contextmanager -def task(ctx, config): - """ - Usage: - blktrace: - - Runs blktrace on all clients. - """ - if config is None: - config = dict(('client.{id}'.format(id=id_), None) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) - elif isinstance(config, list): - config = dict.fromkeys(config) - - with contextutil.nested( - lambda: setup(ctx=ctx, config=config), - lambda: execute(ctx=ctx, config=config), - ): - yield - diff --git a/tasks/boto.cfg.template b/tasks/boto.cfg.template deleted file mode 100644 index cdfe8873b42..00000000000 --- a/tasks/boto.cfg.template +++ /dev/null @@ -1,2 +0,0 @@ -[Boto] -http_socket_timeout = {idle_timeout} diff --git a/tasks/calamari/http_client.py b/tasks/calamari/http_client.py deleted file mode 100755 index 84a03c7bfa0..00000000000 --- a/tasks/calamari/http_client.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python - -import json -import logging -import requests - -log = logging.getLogger(__name__) - - -class AuthenticatedHttpClient(requests.Session): - """ - Client for the calamari REST API, principally exists to do - authentication, but also helpfully prefixes - URLs in requests with the API base URL and JSONizes - POST data. - """ - def __init__(self, api_url, username, password): - super(AuthenticatedHttpClient, self).__init__() - self._username = username - self._password = password - self._api_url = api_url - self.headers = { - 'Content-type': "application/json; charset=UTF-8" - } - - def request(self, method, url, **kwargs): - if not url.startswith('/'): - url = self._api_url + url - response = super(AuthenticatedHttpClient, self).request(method, url, **kwargs) - if response.status_code >= 400: - # For the benefit of test logs - print "%s: %s" % (response.status_code, response.content) - return response - - def post(self, url, data=None, **kwargs): - if isinstance(data, dict): - data = json.dumps(data) - return super(AuthenticatedHttpClient, self).post(url, data, **kwargs) - - def patch(self, url, data=None, **kwargs): - if isinstance(data, dict): - data = json.dumps(data) - return super(AuthenticatedHttpClient, self).patch(url, data, **kwargs) - - def login(self): - """ - Authenticate with the Django auth system as - it is exposed in the Calamari REST API. - """ - log.info("Logging in as %s" % self._username) - response = self.get("auth/login/") - response.raise_for_status() - self.headers['X-XSRF-TOKEN'] = response.cookies['XSRF-TOKEN'] - - self.post("auth/login/", { - 'next': "/", - 'username': self._username, - 'password': self._password - }) - response.raise_for_status() - - # Check we're allowed in now. - response = self.get("cluster") - response.raise_for_status() - -if __name__ == "__main__": - - import argparse - - p = argparse.ArgumentParser() - p.add_argument('-u', '--uri', default='http://mira035/api/v1/') - p.add_argument('--user', default='admin') - p.add_argument('--pass', dest='password', default='admin') - args, remainder = p.parse_known_args() - - c = AuthenticatedHttpClient(args.uri, args.user, args.password) - c.login() - response = c.request('GET', ''.join(remainder)).json() - print json.dumps(response, indent=2) diff --git a/tasks/calamari/servertest_1_0.py b/tasks/calamari/servertest_1_0.py deleted file mode 100755 index b9b07a39052..00000000000 --- a/tasks/calamari/servertest_1_0.py +++ /dev/null @@ -1,269 +0,0 @@ -#!/usr/bin/env python - -import datetime -import os -import logging -import logging.handlers -import requests -import uuid -import unittest -from http_client import AuthenticatedHttpClient - -log = logging.getLogger(__name__) -log.addHandler(logging.StreamHandler()) -log.setLevel(logging.INFO) - -global base_uri -global client -base_uri = None -server_uri = None -client = None - -def setUpModule(): - global base_uri - global server_uri - global client - try: - base_uri = os.environ['CALAMARI_BASE_URI'] - except KeyError: - log.error('Must define CALAMARI_BASE_URI') - os._exit(1) - if not base_uri.endswith('/'): - base_uri += '/' - if not base_uri.endswith('api/v1/'): - base_uri += 'api/v1/' - client = AuthenticatedHttpClient(base_uri, 'admin', 'admin') - server_uri = base_uri.replace('api/v1/', '') - client.login() - -class RestTest(unittest.TestCase): - 'Base class for all tests here; get class\'s data' - - def setUp(self): - # Called once for each test_* case. A bit wasteful, but we - # really like using the simple class variable self.uri - # to customize each derived TestCase - method = getattr(self, 'method', 'GET') - raw = self.uri.startswith('/') - self.response = self.get_object(method, self.uri, raw=raw) - - def get_object(self, method, url, raw=False): - global server_uri - 'Return Python object decoded from JSON response to method/url' - if not raw: - return client.request(method, url).json() - else: - return requests.request(method, server_uri + url).json() - -class TestUserMe(RestTest): - - uri = 'user/me' - - def test_me(self): - self.assertEqual(self.response['username'], 'admin') - -class TestCluster(RestTest): - - uri = 'cluster' - - def test_id(self): - self.assertEqual(self.response[0]['id'], 1) - - def test_times(self): - for time in ( - self.response[0]['cluster_update_time'], - self.response[0]['cluster_update_attempt_time'], - ): - self.assertTrue(is_datetime(time)) - - def test_api_base_url(self): - api_base_url = self.response[0]['api_base_url'] - self.assertTrue(api_base_url.startswith('http')) - self.assertIn('api/v0.1', api_base_url) - -class TestHealth(RestTest): - - uri = 'cluster/1/health' - - def test_cluster(self): - self.assertEqual(self.response['cluster'], 1) - - def test_times(self): - for time in ( - self.response['cluster_update_time'], - self.response['added'], - ): - self.assertTrue(is_datetime(time)) - - def test_report_and_overall_status(self): - self.assertIn('report', self.response) - self.assertIn('overall_status', self.response['report']) - -class TestHealthCounters(RestTest): - - uri = 'cluster/1/health_counters' - - def test_cluster(self): - self.assertEqual(self.response['cluster'], 1) - - def test_time(self): - self.assertTrue(is_datetime(self.response['cluster_update_time'])) - - def test_existence(self): - for section in ('pg', 'mon', 'osd'): - for counter in ('warn', 'critical', 'ok'): - count = self.response[section][counter]['count'] - self.assertIsInstance(count, int) - self.assertIsInstance(self.response['pool']['total'], int) - - def test_mds_sum(self): - count = self.response['mds'] - self.assertEqual( - count['up_not_in'] + count['not_up_not_in'] + count['up_in'], - count['total'] - ) - -class TestSpace(RestTest): - - uri = 'cluster/1/space' - - def test_cluster(self): - self.assertEqual(self.response['cluster'], 1) - - def test_times(self): - for time in ( - self.response['cluster_update_time'], - self.response['added'], - ): - self.assertTrue(is_datetime(time)) - - def test_space(self): - for size in ('free_bytes', 'used_bytes', 'capacity_bytes'): - self.assertIsInstance(self.response['space'][size], int) - self.assertGreater(self.response['space'][size], 0) - - def test_report(self): - for size in ('total_used', 'total_space', 'total_avail'): - self.assertIsInstance(self.response['report'][size], int) - self.assertGreater(self.response['report'][size], 0) - -class TestOSD(RestTest): - - uri = 'cluster/1/osd' - - def test_cluster(self): - self.assertEqual(self.response['cluster'], 1) - - def test_times(self): - for time in ( - self.response['cluster_update_time'], - self.response['added'], - ): - self.assertTrue(is_datetime(time)) - - def test_osd_uuid(self): - for osd in self.response['osds']: - uuidobj = uuid.UUID(osd['uuid']) - self.assertEqual(str(uuidobj), osd['uuid']) - - def test_osd_pools(self): - for osd in self.response['osds']: - if osd['up'] != 1: - continue - self.assertIsInstance(osd['pools'], list) - self.assertIsInstance(osd['pools'][0], basestring) - - def test_osd_up_in(self): - for osd in self.response['osds']: - for flag in ('up', 'in'): - self.assertIn(osd[flag], (0, 1)) - - def test_osd_0(self): - osd0 = self.get_object('GET', 'cluster/1/osd/0')['osd'] - for field in osd0.keys(): - if not field.startswith('cluster_update_time'): - self.assertEqual(self.response['osds'][0][field], osd0[field]) - -class TestPool(RestTest): - - uri = 'cluster/1/pool' - - def test_cluster(self): - for pool in self.response: - self.assertEqual(pool['cluster'], 1) - - def test_fields_are_ints(self): - for pool in self.response: - for field in ('id', 'used_objects', 'used_bytes'): - self.assertIsInstance(pool[field], int) - - def test_name_is_str(self): - for pool in self.response: - self.assertIsInstance(pool['name'], basestring) - - def test_pool_0(self): - poolid = self.response[0]['id'] - pool = self.get_object('GET', 'cluster/1/pool/{id}'.format(id=poolid)) - self.assertEqual(self.response[0], pool) - -class TestServer(RestTest): - - uri = 'cluster/1/server' - - def test_ipaddr(self): - for server in self.response: - octets = server['addr'].split('.') - self.assertEqual(len(octets), 4) - for octetstr in octets: - octet = int(octetstr) - self.assertIsInstance(octet, int) - self.assertGreaterEqual(octet, 0) - self.assertLessEqual(octet, 255) - - def test_hostname_name_strings(self): - for server in self.response: - for field in ('name', 'hostname'): - self.assertIsInstance(server[field], basestring) - - def test_services(self): - for server in self.response: - self.assertIsInstance(server['services'], list) - for service in server['services']: - self.assertIn(service['type'], ('osd', 'mon', 'mds')) - -class TestGraphitePoolIOPS(RestTest): - - uri = '/graphite/render?format=json-array&' \ - 'target=ceph.cluster.ceph.pool.0.num_read&' \ - 'target=ceph.cluster.ceph.pool.0.num_write' - - def test_targets_contain_request(self): - self.assertIn('targets', self.response) - self.assertIn('ceph.cluster.ceph.pool.0.num_read', - self.response['targets']) - self.assertIn('ceph.cluster.ceph.pool.0.num_write', - self.response['targets']) - - def test_datapoints(self): - self.assertIn('datapoints', self.response) - self.assertGreater(len(self.response['datapoints']), 0) - data = self.response['datapoints'][0] - self.assertEqual(len(data), 3) - self.assertIsInstance(data[0], int) - if data[1]: - self.assertIsInstance(data[1], float) - if data[2]: - self.assertIsInstance(data[2], float) - -# -# Utility functions -# - -DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' - -def is_datetime(time): - datetime.datetime.strptime(time, DATETIME_FORMAT) - return True - -if __name__ == '__main__': - unittest.main() diff --git a/tasks/ceph.py b/tasks/ceph.py deleted file mode 100644 index 2abc11019d8..00000000000 --- a/tasks/ceph.py +++ /dev/null @@ -1,1197 +0,0 @@ -""" -Ceph cluster task. - -Handle the setup, starting, and clean-up of a Ceph cluster. -""" -from cStringIO import StringIO - -import argparse -import contextlib -import logging -import os -import json -import time - -from ceph_manager import CephManager -from teuthology import misc as teuthology -from teuthology import contextutil -from teuthology.orchestra import run -from teuthology.orchestra.daemon import DaemonGroup -import ceph_client as cclient - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def ceph_log(ctx, config): - """ - Create /var/log/ceph log directory that is open to everyone. - Add valgrind and profiling-logger directories. - - :param ctx: Context - :param config: Configuration - """ - log.info('Making ceph log dir writeable by non-root...') - run.wait( - ctx.cluster.run( - args=[ - 'sudo', - 'chmod', - '777', - '/var/log/ceph', - ], - wait=False, - ) - ) - log.info('Disabling ceph logrotate...') - run.wait( - ctx.cluster.run( - args=[ - 'sudo', - 'rm', '-f', '--', - '/etc/logrotate.d/ceph', - ], - wait=False, - ) - ) - log.info('Creating extra log directories...') - run.wait( - ctx.cluster.run( - args=[ - 'sudo', - 'install', '-d', '-m0755', '--', - '/var/log/ceph/valgrind', - '/var/log/ceph/profiling-logger', - ], - wait=False, - ) - ) - - try: - yield - - finally: - pass - - -def assign_devs(roles, devs): - """ - Create a dictionary of devs indexed by roles - - :param roles: List of roles - :param devs: Corresponding list of devices. - :returns: Dictionary of devs indexed by roles. - """ - return dict(zip(roles, devs)) - -@contextlib.contextmanager -def valgrind_post(ctx, config): - """ - After the tests run, look throught all the valgrind logs. Exceptions are raised - if textual errors occured in the logs, or if valgrind exceptions were detected in - the logs. - - :param ctx: Context - :param config: Configuration - """ - try: - yield - finally: - lookup_procs = list() - log.info('Checking for errors in any valgrind logs...'); - for remote in ctx.cluster.remotes.iterkeys(): - #look at valgrind logs for each node - proc = remote.run( - args=[ - 'sudo', - 'zgrep', - '', - run.Raw('/var/log/ceph/valgrind/*'), - '/dev/null', # include a second file so that we always get a filename prefix on the output - run.Raw('|'), - 'sort', - run.Raw('|'), - 'uniq', - ], - wait=False, - check_status=False, - stdout=StringIO(), - ) - lookup_procs.append((proc, remote)) - - valgrind_exception = None - for (proc, remote) in lookup_procs: - proc.wait() - out = proc.stdout.getvalue() - for line in out.split('\n'): - if line == '': - continue - try: - (file, kind) = line.split(':') - except Exception: - log.error('failed to split line %s', line) - raise - log.debug('file %s kind %s', file, kind) - if (file.find('mds') >= 0) and kind.find('Lost') > 0: - continue - log.error('saw valgrind issue %s in %s', kind, file) - valgrind_exception = Exception('saw valgrind issues') - - if valgrind_exception is not None: - raise valgrind_exception - - - -@contextlib.contextmanager -def cluster(ctx, config): - """ - Handle the creation and removal of a ceph cluster. - - On startup: - Create directories needed for the cluster. - Create remote journals for all osds. - Create and set keyring. - Copy the monmap to tht test systems. - Setup mon nodes. - Setup mds nodes. - Mkfs osd nodes. - Add keyring information to monmaps - Mkfs mon nodes. - - On exit: - If errors occured, extract a failure message and store in ctx.summary. - Unmount all test files and temporary journaling files. - Save the monitor information and archive all ceph logs. - Cleanup the keyring setup, and remove all monitor map and data files left over. - - :param ctx: Context - :param config: Configuration - """ - if ctx.config.get('use_existing_cluster', False) is True: - log.info("'use_existing_cluster' is true; skipping cluster creation") - yield - - testdir = teuthology.get_testdir(ctx) - log.info('Creating ceph cluster...') - run.wait( - ctx.cluster.run( - args=[ - 'install', '-d', '-m0755', '--', - '{tdir}/data'.format(tdir=testdir), - ], - wait=False, - ) - ) - - run.wait( - ctx.cluster.run( - args=[ - 'sudo', - 'install', '-d', '-m0777', '--', '/var/run/ceph', - ], - wait=False, - ) - ) - - - devs_to_clean = {} - remote_to_roles_to_devs = {} - remote_to_roles_to_journals = {} - osds = ctx.cluster.only(teuthology.is_type('osd')) - for remote, roles_for_host in osds.remotes.iteritems(): - devs = teuthology.get_scratch_devices(remote) - roles_to_devs = {} - roles_to_journals = {} - if config.get('fs'): - log.info('fs option selected, checking for scratch devs') - log.info('found devs: %s' % (str(devs),)) - devs_id_map = teuthology.get_wwn_id_map(remote, devs) - iddevs = devs_id_map.values() - roles_to_devs = assign_devs( - teuthology.roles_of_type(roles_for_host, 'osd'), iddevs - ) - if len(roles_to_devs) < len(iddevs): - iddevs = iddevs[len(roles_to_devs):] - devs_to_clean[remote] = [] - - if config.get('block_journal'): - log.info('block journal enabled') - roles_to_journals = assign_devs( - teuthology.roles_of_type(roles_for_host, 'osd'), iddevs - ) - log.info('journal map: %s', roles_to_journals) - - if config.get('tmpfs_journal'): - log.info('tmpfs journal enabled') - roles_to_journals = {} - remote.run( args=[ 'sudo', 'mount', '-t', 'tmpfs', 'tmpfs', '/mnt' ] ) - for osd in teuthology.roles_of_type(roles_for_host, 'osd'): - tmpfs = '/mnt/osd.%s' % osd - roles_to_journals[osd] = tmpfs - remote.run( args=[ 'truncate', '-s', '1500M', tmpfs ] ) - log.info('journal map: %s', roles_to_journals) - - log.info('dev map: %s' % (str(roles_to_devs),)) - remote_to_roles_to_devs[remote] = roles_to_devs - remote_to_roles_to_journals[remote] = roles_to_journals - - - log.info('Generating config...') - remotes_and_roles = ctx.cluster.remotes.items() - roles = [role_list for (remote, role_list) in remotes_and_roles] - ips = [host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)] - conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips) - for remote, roles_to_journals in remote_to_roles_to_journals.iteritems(): - for role, journal in roles_to_journals.iteritems(): - key = "osd." + str(role) - if key not in conf: - conf[key] = {} - conf[key]['osd journal'] = journal - for section, keys in config['conf'].iteritems(): - for key, value in keys.iteritems(): - log.info("[%s] %s = %s" % (section, key, value)) - if section not in conf: - conf[section] = {} - conf[section][key] = value - - if config.get('tmpfs_journal'): - conf['journal dio'] = False - - ctx.ceph = argparse.Namespace() - ctx.ceph.conf = conf - - keyring_path = config.get('keyring_path', '/etc/ceph/ceph.keyring') - - coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) - - firstmon = teuthology.get_first_mon(ctx, config) - - log.info('Setting up %s...' % firstmon) - ctx.cluster.only(firstmon).run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph-authtool', - '--create-keyring', - keyring_path, - ], - ) - ctx.cluster.only(firstmon).run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph-authtool', - '--gen-key', - '--name=mon.', - keyring_path, - ], - ) - ctx.cluster.only(firstmon).run( - args=[ - 'sudo', - 'chmod', - '0644', - keyring_path, - ], - ) - (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() - fsid = teuthology.create_simple_monmap( - ctx, - remote=mon0_remote, - conf=conf, - ) - if not 'global' in conf: - conf['global'] = {} - conf['global']['fsid'] = fsid - - log.info('Writing ceph.conf for FSID %s...' % fsid) - conf_path = config.get('conf_path', '/etc/ceph/ceph.conf') - conf_fp = StringIO() - conf.write(conf_fp) - conf_fp.seek(0) - writes = ctx.cluster.run( - args=[ - 'sudo', 'mkdir', '-p', '/etc/ceph', run.Raw('&&'), - 'sudo', 'chmod', '0755', '/etc/ceph', run.Raw('&&'), - 'sudo', 'python', - '-c', - 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))', - conf_path, - run.Raw('&&'), - 'sudo', 'chmod', '0644', conf_path, - ], - stdin=run.PIPE, - wait=False, - ) - teuthology.feed_many_stdins_and_close(conf_fp, writes) - run.wait(writes) - - log.info('Creating admin key on %s...' % firstmon) - ctx.cluster.only(firstmon).run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph-authtool', - '--gen-key', - '--name=client.admin', - '--set-uid=0', - '--cap', 'mon', 'allow *', - '--cap', 'osd', 'allow *', - '--cap', 'mds', 'allow', - keyring_path, - ], - ) - - log.info('Copying monmap to all nodes...') - keyring = teuthology.get_file( - remote=mon0_remote, - path=keyring_path, - ) - monmap = teuthology.get_file( - remote=mon0_remote, - path='{tdir}/monmap'.format(tdir=testdir), - ) - - for rem in ctx.cluster.remotes.iterkeys(): - # copy mon key and initial monmap - log.info('Sending monmap to node {remote}'.format(remote=rem)) - teuthology.sudo_write_file( - remote=rem, - path=keyring_path, - data=keyring, - perms='0644' - ) - teuthology.write_file( - remote=rem, - path='{tdir}/monmap'.format(tdir=testdir), - data=monmap, - ) - - log.info('Setting up mon nodes...') - mons = ctx.cluster.only(teuthology.is_type('mon')) - run.wait( - mons.run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'osdmaptool', - '-c', conf_path, - '--clobber', - '--createsimple', '{num:d}'.format( - num=teuthology.num_instances_of_type(ctx.cluster, 'osd'), - ), - '{tdir}/osdmap'.format(tdir=testdir), - '--pg_bits', '2', - '--pgp_bits', '4', - ], - wait=False, - ), - ) - - log.info('Setting up mds nodes...') - mdss = ctx.cluster.only(teuthology.is_type('mds')) - for remote, roles_for_host in mdss.remotes.iteritems(): - for id_ in teuthology.roles_of_type(roles_for_host, 'mds'): - remote.run( - args=[ - 'sudo', - 'mkdir', - '-p', - '/var/lib/ceph/mds/ceph-{id}'.format(id=id_), - run.Raw('&&'), - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph-authtool', - '--create-keyring', - '--gen-key', - '--name=mds.{id}'.format(id=id_), - '/var/lib/ceph/mds/ceph-{id}/keyring'.format(id=id_), - ], - ) - - cclient.create_keyring(ctx) - log.info('Running mkfs on osd nodes...') - - ctx.disk_config = argparse.Namespace() - ctx.disk_config.remote_to_roles_to_dev = remote_to_roles_to_devs - ctx.disk_config.remote_to_roles_to_journals = remote_to_roles_to_journals - ctx.disk_config.remote_to_roles_to_dev_mount_options = {} - ctx.disk_config.remote_to_roles_to_dev_fstype = {} - - log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(r=str(ctx.disk_config.remote_to_roles_to_dev))) - for remote, roles_for_host in osds.remotes.iteritems(): - roles_to_devs = remote_to_roles_to_devs[remote] - roles_to_journals = remote_to_roles_to_journals[remote] - - - for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): - remote.run( - args=[ - 'sudo', - 'mkdir', - '-p', - '/var/lib/ceph/osd/ceph-{id}'.format(id=id_), - ]) - log.info(str(roles_to_journals)) - log.info(id_) - if roles_to_devs.get(id_): - dev = roles_to_devs[id_] - fs = config.get('fs') - package = None - mkfs_options = config.get('mkfs_options') - mount_options = config.get('mount_options') - if fs == 'btrfs': - #package = 'btrfs-tools' - if mount_options is None: - mount_options = ['noatime','user_subvol_rm_allowed'] - if mkfs_options is None: - mkfs_options = ['-m', 'single', - '-l', '32768', - '-n', '32768'] - if fs == 'xfs': - #package = 'xfsprogs' - if mount_options is None: - mount_options = ['noatime'] - if mkfs_options is None: - mkfs_options = ['-f', '-i', 'size=2048'] - if fs == 'ext4' or fs == 'ext3': - if mount_options is None: - mount_options = ['noatime','user_xattr'] - - if mount_options is None: - mount_options = [] - if mkfs_options is None: - mkfs_options = [] - mkfs = ['mkfs.%s' % fs] + mkfs_options - log.info('%s on %s on %s' % (mkfs, dev, remote)) - if package is not None: - remote.run( - args=[ - 'sudo', - 'apt-get', 'install', '-y', package - ], - stdout=StringIO(), - ) - - try: - remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) - except run.CommandFailedError: - # Newer btfs-tools doesn't prompt for overwrite, use -f - if '-f' not in mount_options: - mkfs_options.append('-f') - mkfs = ['mkfs.%s' % fs] + mkfs_options - log.info('%s on %s on %s' % (mkfs, dev, remote)) - remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) - - log.info('mount %s on %s -o %s' % (dev, remote, - ','.join(mount_options))) - remote.run( - args=[ - 'sudo', - 'mount', - '-t', fs, - '-o', ','.join(mount_options), - dev, - os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)), - ] - ) - if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options: - ctx.disk_config.remote_to_roles_to_dev_mount_options[remote] = {} - ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][id_] = mount_options - if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype: - ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {} - ctx.disk_config.remote_to_roles_to_dev_fstype[remote][id_] = fs - devs_to_clean[remote].append( - os.path.join( - os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)), - ) - ) - - for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): - remote.run( - args=[ - 'sudo', - 'MALLOC_CHECK_=3', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph-osd', - '--mkfs', - '--mkkey', - '-i', id_, - '--monmap', '{tdir}/monmap'.format(tdir=testdir), - ], - ) - - - log.info('Reading keys from all nodes...') - keys_fp = StringIO() - keys = [] - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): - for type_ in ['mds','osd']: - for id_ in teuthology.roles_of_type(roles_for_host, type_): - data = teuthology.get_file( - remote=remote, - path='/var/lib/ceph/{type}/ceph-{id}/keyring'.format( - type=type_, - id=id_, - ), - sudo=True, - ) - keys.append((type_, id_, data)) - keys_fp.write(data) - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): - for type_ in ['client']: - for id_ in teuthology.roles_of_type(roles_for_host, type_): - data = teuthology.get_file( - remote=remote, - path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) - ) - keys.append((type_, id_, data)) - keys_fp.write(data) - - log.info('Adding keys to all mons...') - writes = mons.run( - args=[ - 'sudo', 'tee', '-a', - keyring_path, - ], - stdin=run.PIPE, - wait=False, - stdout=StringIO(), - ) - keys_fp.seek(0) - teuthology.feed_many_stdins_and_close(keys_fp, writes) - run.wait(writes) - for type_, id_, data in keys: - run.wait( - mons.run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph-authtool', - keyring_path, - '--name={type}.{id}'.format( - type=type_, - id=id_, - ), - ] + list(teuthology.generate_caps(type_)), - wait=False, - ), - ) - - log.info('Running mkfs on mon nodes...') - for remote, roles_for_host in mons.remotes.iteritems(): - for id_ in teuthology.roles_of_type(roles_for_host, 'mon'): - remote.run( - args=[ - 'sudo', - 'mkdir', - '-p', - '/var/lib/ceph/mon/ceph-{id}'.format(id=id_), - ], - ) - remote.run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph-mon', - '--mkfs', - '-i', id_, - '--monmap={tdir}/monmap'.format(tdir=testdir), - '--osdmap={tdir}/osdmap'.format(tdir=testdir), - '--keyring={kpath}'.format(kpath=keyring_path), - ], - ) - - - run.wait( - mons.run( - args=[ - 'rm', - '--', - '{tdir}/monmap'.format(tdir=testdir), - '{tdir}/osdmap'.format(tdir=testdir), - ], - wait=False, - ), - ) - - try: - yield - except Exception: - # we need to know this below - ctx.summary['success'] = False - raise - finally: - (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() - - log.info('Checking cluster log for badness...') - def first_in_ceph_log(pattern, excludes): - """ - Find the first occurence of the pattern specified in the Ceph log, - Returns None if none found. - - :param pattern: Pattern scanned for. - :param excludes: Patterns to ignore. - :return: First line of text (or None if not found) - """ - args = [ - 'sudo', - 'egrep', pattern, - '/var/log/ceph/ceph.log', - ] - for exclude in excludes: - args.extend([run.Raw('|'), 'egrep', '-v', exclude]) - args.extend([ - run.Raw('|'), 'head', '-n', '1', - ]) - r = mon0_remote.run( - stdout=StringIO(), - args=args, - ) - stdout = r.stdout.getvalue() - if stdout != '': - return stdout - return None - - if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]', - config['log_whitelist']) is not None: - log.warning('Found errors (ERR|WRN|SEC) in cluster log') - ctx.summary['success'] = False - # use the most severe problem as the failure reason - if 'failure_reason' not in ctx.summary: - for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']: - match = first_in_ceph_log(pattern, config['log_whitelist']) - if match is not None: - ctx.summary['failure_reason'] = \ - '"{match}" in cluster log'.format( - match=match.rstrip('\n'), - ) - break - - for remote, dirs in devs_to_clean.iteritems(): - for dir_ in dirs: - log.info('Unmounting %s on %s' % (dir_, remote)) - remote.run( - args=[ - 'sync', - run.Raw('&&'), - 'sudo', - 'umount', - '-f', - dir_ - ] - ) - - if config.get('tmpfs_journal'): - log.info('tmpfs journal enabled - unmounting tmpfs at /mnt') - for remote, roles_for_host in osds.remotes.iteritems(): - remote.run( - args=[ 'sudo', 'umount', '-f', '/mnt' ], - check_status=False, - ) - - if ctx.archive is not None and \ - not (ctx.config.get('archive-on-error') and ctx.summary['success']): - # archive mon data, too - log.info('Archiving mon data...') - path = os.path.join(ctx.archive, 'data') - os.makedirs(path) - for remote, roles in mons.remotes.iteritems(): - for role in roles: - if role.startswith('mon.'): - teuthology.pull_directory_tarball( - remote, - '/var/lib/ceph/mon', - path + '/' + role + '.tgz') - - # and logs - log.info('Compressing logs...') - run.wait( - ctx.cluster.run( - args=[ - 'sudo', - 'find', - '/var/log/ceph', - '-name', - '*.log', - '-print0', - run.Raw('|'), - 'sudo', - 'xargs', - '-0', - '--no-run-if-empty', - '--', - 'gzip', - '--', - ], - wait=False, - ), - ) - - log.info('Archiving logs...') - path = os.path.join(ctx.archive, 'remote') - os.makedirs(path) - for remote in ctx.cluster.remotes.iterkeys(): - sub = os.path.join(path, remote.shortname) - os.makedirs(sub) - teuthology.pull_directory(remote, '/var/log/ceph', - os.path.join(sub, 'log')) - - - log.info('Cleaning ceph cluster...') - run.wait( - ctx.cluster.run( - args=[ - 'sudo', - 'rm', - '-rf', - '--', - conf_path, - keyring_path, - '{tdir}/data'.format(tdir=testdir), - '{tdir}/monmap'.format(tdir=testdir), - ], - wait=False, - ), - ) - -def get_all_pg_info(rem_site, testdir): - """ - Get the results of a ceph pg dump - """ - info = rem_site.run(args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph', 'pg', 'dump', - '--format', 'json'], stdout=StringIO()) - all_info = json.loads(info.stdout.getvalue()) - return all_info['pg_stats'] - -def osd_scrub_pgs(ctx, config): - """ - Scrub pgs when we exit. - - First make sure all pgs are active and clean. - Next scrub all osds. - Then periodically check until all pgs have scrub time stamps that - indicate the last scrub completed. Time out if no progess is made - here after two minutes. - """ - retries = 12 - delays = 10 - vlist = ctx.cluster.remotes.values() - testdir = teuthology.get_testdir(ctx) - rem_site = ctx.cluster.remotes.keys()[0] - all_clean = False - for _ in range(0, retries): - stats = get_all_pg_info(rem_site, testdir) - states = [stat['state'] for stat in stats] - if len(set(states)) == 1 and states[0] == 'active+clean': - all_clean = True - break - log.info("Waiting for all osds to be active and clean.") - time.sleep(delays) - if not all_clean: - log.info("Scrubbing terminated -- not all pgs were active and clean.") - return - check_time_now = time.localtime() - time.sleep(1) - for slists in vlist: - for role in slists: - if role.startswith('osd.'): - log.info("Scrubbing osd {osd}".format(osd=role)) - rem_site.run(args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph', 'osd', 'scrub', role]) - prev_good = 0 - gap_cnt = 0 - loop = True - while loop: - stats = get_all_pg_info(rem_site, testdir) - timez = [stat['last_scrub_stamp'] for stat in stats] - loop = False - thiscnt = 0 - for tmval in timez: - pgtm = time.strptime(tmval[0:tmval.find('.')], '%Y-%m-%d %H:%M:%S') - if pgtm > check_time_now: - thiscnt += 1 - else: - loop = True - if thiscnt > prev_good: - prev_good = thiscnt - gap_cnt = 0 - else: - gap_cnt += 1 - if gap_cnt > retries: - log.info('Exiting scrub checking -- not all pgs scrubbed.') - return - if loop: - log.info('Still waiting for all pgs to be scrubbed.') - time.sleep(delays) - -@contextlib.contextmanager -def run_daemon(ctx, config, type_): - """ - Run daemons for a role type. Handle the startup and termination of a a daemon. - On startup -- set coverages, cpu_profile, valgrind values for all remotes, - and a max_mds value for one mds. - On cleanup -- Stop all existing daemons of this type. - - :param ctx: Context - :param config: Configuration - :paran type_: Role type - """ - log.info('Starting %s daemons...' % type_) - testdir = teuthology.get_testdir(ctx) - daemons = ctx.cluster.only(teuthology.is_type(type_)) - - # check whether any daemons if this type are configured - if daemons is None: - return - coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) - - daemon_signal = 'kill' - if config.get('coverage') or config.get('valgrind') is not None: - daemon_signal = 'term' - - num_active = 0 - for remote, roles_for_host in daemons.remotes.iteritems(): - for id_ in teuthology.roles_of_type(roles_for_host, type_): - name = '%s.%s' % (type_, id_) - - if not (id_.endswith('-s')) and (id_.find('-s-') == -1): - num_active += 1 - - run_cmd = [ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'daemon-helper', - daemon_signal, - ] - run_cmd_tail = [ - 'ceph-%s' % (type_), - '-f', - '-i', id_] - - if type_ in config.get('cpu_profile', []): - profile_path = '/var/log/ceph/profiling-logger/%s.%s.prof' % (type_, id_) - run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ]) - - if config.get('valgrind') is not None: - valgrind_args = None - if type_ in config['valgrind']: - valgrind_args = config['valgrind'][type_] - if name in config['valgrind']: - valgrind_args = config['valgrind'][name] - run_cmd = teuthology.get_valgrind_args(testdir, name, - run_cmd, - valgrind_args) - - run_cmd.extend(run_cmd_tail) - - ctx.daemons.add_daemon(remote, type_, id_, - args=run_cmd, - logger=log.getChild(name), - stdin=run.PIPE, - wait=False, - ) - - if type_ == 'mds': - firstmon = teuthology.get_first_mon(ctx, config) - (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() - - mon0_remote.run(args=[ - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph', - 'mds', 'set_max_mds', str(num_active)]) - - try: - yield - finally: - teuthology.stop_daemons_of_type(ctx, type_) - -def healthy(ctx, config): - """ - Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK. - - :param ctx: Context - :param config: Configuration - """ - log.info('Waiting until ceph is healthy...') - firstmon = teuthology.get_first_mon(ctx, config) - (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() - teuthology.wait_until_osds_up( - ctx, - cluster=ctx.cluster, - remote=mon0_remote - ) - teuthology.wait_until_healthy( - ctx, - remote=mon0_remote, - ) - -def wait_for_osds_up(ctx, config): - """ - Wait for all osd's to come up. - - :param ctx: Context - :param config: Configuration - """ - log.info('Waiting until ceph osds are all up...') - firstmon = teuthology.get_first_mon(ctx, config) - (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() - teuthology.wait_until_osds_up( - ctx, - cluster=ctx.cluster, - remote=mon0_remote - ) - -def wait_for_mon_quorum(ctx, config): - """ - Check renote ceph status until all monitors are up. - - :param ctx: Context - :param config: Configuration - """ - - assert isinstance(config, list) - firstmon = teuthology.get_first_mon(ctx, config) - (remote,) = ctx.cluster.only(firstmon).remotes.keys() - while True: - r = remote.run( - args=[ - 'ceph', - 'quorum_status', - ], - stdout=StringIO(), - logger=log.getChild('quorum_status'), - ) - j = json.loads(r.stdout.getvalue()) - q = j.get('quorum_names', []) - log.debug('Quorum: %s', q) - if sorted(q) == sorted(config): - break - time.sleep(1) - - -@contextlib.contextmanager -def restart(ctx, config): - """ - restart ceph daemons - - For example:: - tasks: - - ceph.restart: [all] - - For example:: - tasks: - - ceph.restart: [osd.0, mon.1] - - or:: - - tasks: - - ceph.restart: - daemons: [osd.0, mon.1] - wait-for-healthy: false - wait-for-osds-up: true - - :param ctx: Context - :param config: Configuration - """ - if config is None: - config = {} - if isinstance(config, list): - config = { 'daemons': config } - if 'daemons' not in config: - config['daemons'] = [] - type_daemon = ['mon', 'osd', 'mds', 'rgw'] - for d in type_daemon: - type_ = d - for daemon in ctx.daemons.iter_daemons_of_role(type_): - config['daemons'].append(type_ + '.' + daemon.id_) - - assert isinstance(config['daemons'], list) - daemons = dict.fromkeys(config['daemons']) - for i in daemons.keys(): - type_ = i.split('.', 1)[0] - id_ = i.split('.', 1)[1] - ctx.daemons.get_daemon(type_, id_).stop() - ctx.daemons.get_daemon(type_, id_).restart() - - if config.get('wait-for-healthy', True): - healthy(ctx=ctx, config=None) - if config.get('wait-for-osds-up', False): - wait_for_osds_up(ctx=ctx, config=None) - yield - -@contextlib.contextmanager -def task(ctx, config): - """ - Set up and tear down a Ceph cluster. - - For example:: - - tasks: - - ceph: - - interactive: - - You can also specify what branch to run:: - - tasks: - - ceph: - branch: foo - - Or a tag:: - - tasks: - - ceph: - tag: v0.42.13 - - Or a sha1:: - - tasks: - - ceph: - sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed - - Or a local source dir:: - - tasks: - - ceph: - path: /home/sage/ceph - - To capture code coverage data, use:: - - tasks: - - ceph: - coverage: true - - To use btrfs, ext4, or xfs on the target's scratch disks, use:: - - tasks: - - ceph: - fs: xfs - mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1] - mount_options: [nobarrier, inode64] - - Note, this will cause the task to check the /scratch_devs file on each node - for available devices. If no such file is found, /dev/sdb will be used. - - To run some daemons under valgrind, include their names - and the tool/args to use in a valgrind section:: - - tasks: - - ceph: - valgrind: - mds.1: --tool=memcheck - osd.1: [--tool=memcheck, --leak-check=no] - - Those nodes which are using memcheck or valgrind will get - checked for bad results. - - To adjust or modify config options, use:: - - tasks: - - ceph: - conf: - section: - key: value - - For example:: - - tasks: - - ceph: - conf: - mds.0: - some option: value - other key: other value - client.0: - debug client: 10 - debug ms: 1 - - By default, the cluster log is checked for errors and warnings, - and the run marked failed if any appear. You can ignore log - entries by giving a list of egrep compatible regexes, i.e.: - - tasks: - - ceph: - log-whitelist: ['foo.*bar', 'bad message'] - - :param ctx: Context - :param config: Configuration - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - "task ceph only supports a dictionary for configuration" - - overrides = ctx.config.get('overrides', {}) - teuthology.deep_merge(config, overrides.get('ceph', {})) - - ctx.daemons = DaemonGroup() - - testdir = teuthology.get_testdir(ctx) - if config.get('coverage'): - coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) - log.info('Creating coverage directory...') - run.wait( - ctx.cluster.run( - args=[ - 'install', '-d', '-m0755', '--', - coverage_dir, - ], - wait=False, - ) - ) - - with contextutil.nested( - lambda: ceph_log(ctx=ctx, config=None), - lambda: valgrind_post(ctx=ctx, config=config), - lambda: cluster(ctx=ctx, config=dict( - conf=config.get('conf', {}), - fs=config.get('fs', None), - mkfs_options=config.get('mkfs_options', None), - mount_options=config.get('mount_options',None), - block_journal=config.get('block_journal', None), - tmpfs_journal=config.get('tmpfs_journal', None), - log_whitelist=config.get('log-whitelist', []), - cpu_profile=set(config.get('cpu_profile', [])), - )), - lambda: run_daemon(ctx=ctx, config=config, type_='mon'), - lambda: run_daemon(ctx=ctx, config=config, type_='osd'), - lambda: run_daemon(ctx=ctx, config=config, type_='mds'), - ): - try: - if config.get('wait-for-healthy', True): - healthy(ctx=ctx, config=None) - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - ctx.manager = CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - yield - finally: - osd_scrub_pgs(ctx, config) diff --git a/tasks/ceph_client.py b/tasks/ceph_client.py deleted file mode 100644 index d7cfd00be3e..00000000000 --- a/tasks/ceph_client.py +++ /dev/null @@ -1,40 +0,0 @@ -""" -Set up client keyring -""" -import logging - -from teuthology import misc as teuthology -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - -def create_keyring(ctx): - """ - Set up key ring on remote sites - """ - log.info('Setting up client nodes...') - clients = ctx.cluster.only(teuthology.is_type('client')) - testdir = teuthology.get_testdir(ctx) - coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) - for remote, roles_for_host in clients.remotes.iteritems(): - for id_ in teuthology.roles_of_type(roles_for_host, 'client'): - client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) - remote.run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph-authtool', - '--create-keyring', - '--gen-key', - # TODO this --name= is not really obeyed, all unknown "types" are munged to "client" - '--name=client.{id}'.format(id=id_), - client_keyring, - run.Raw('&&'), - 'sudo', - 'chmod', - '0644', - client_keyring, - ], - ) diff --git a/tasks/ceph_deploy.py b/tasks/ceph_deploy.py deleted file mode 100644 index 058a798e052..00000000000 --- a/tasks/ceph_deploy.py +++ /dev/null @@ -1,464 +0,0 @@ -""" -Execute ceph-deploy as a task -""" -from cStringIO import StringIO - -import contextlib -import os -import time -import logging - -from teuthology import misc as teuthology -from teuthology import contextutil -from teuthology.config import config as teuth_config -from teuthology.task import install as install_fn -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def download_ceph_deploy(ctx, config): - """ - Downloads ceph-deploy from the ceph.com git mirror and (by default) - switches to the master branch. If the `ceph-deploy-branch` is specified, it - will use that instead. - """ - log.info('Downloading ceph-deploy...') - testdir = teuthology.get_testdir(ctx) - ceph_admin = teuthology.get_first_mon(ctx, config) - default_cd_branch = {'ceph-deploy-branch': 'master'} - ceph_deploy_branch = config.get( - 'ceph-deploy', - default_cd_branch).get('ceph-deploy-branch') - - ctx.cluster.only(ceph_admin).run( - args=[ - 'git', 'clone', '-b', ceph_deploy_branch, - teuth_config.ceph_git_base_url + 'ceph-deploy.git', - '{tdir}/ceph-deploy'.format(tdir=testdir), - ], - ) - ctx.cluster.only(ceph_admin).run( - args=[ - 'cd', - '{tdir}/ceph-deploy'.format(tdir=testdir), - run.Raw('&&'), - './bootstrap', - ], - ) - - try: - yield - finally: - log.info('Removing ceph-deploy ...') - ctx.cluster.only(ceph_admin).run( - args=[ - 'rm', - '-rf', - '{tdir}/ceph-deploy'.format(tdir=testdir), - ], - ) - - -def is_healthy(ctx, config): - """Wait until a Ceph cluster is healthy.""" - testdir = teuthology.get_testdir(ctx) - ceph_admin = teuthology.get_first_mon(ctx, config) - (remote,) = ctx.cluster.only(ceph_admin).remotes.keys() - max_tries = 90 # 90 tries * 10 secs --> 15 minutes - tries = 0 - while True: - tries += 1 - if tries >= max_tries: - msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes" - raise RuntimeError(msg) - - r = remote.run( - args=[ - 'cd', - '{tdir}'.format(tdir=testdir), - run.Raw('&&'), - 'sudo', 'ceph', - 'health', - ], - stdout=StringIO(), - logger=log.getChild('health'), - ) - out = r.stdout.getvalue() - log.debug('Ceph health: %s', out.rstrip('\n')) - if out.split(None, 1)[0] == 'HEALTH_OK': - break - time.sleep(10) - -def get_nodes_using_roles(ctx, config, role): - """Extract the names of nodes that match a given role from a cluster""" - newl = [] - for _remote, roles_for_host in ctx.cluster.remotes.iteritems(): - for id_ in teuthology.roles_of_type(roles_for_host, role): - rem = _remote - if role == 'mon': - req1 = str(rem).split('@')[-1] - else: - req = str(rem).split('.')[0] - req1 = str(req).split('@')[1] - newl.append(req1) - return newl - -def get_dev_for_osd(ctx, config): - """Get a list of all osd device names.""" - osd_devs = [] - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): - host = remote.name.split('@')[-1] - shortname = host.split('.')[0] - devs = teuthology.get_scratch_devices(remote) - num_osd_per_host = list(teuthology.roles_of_type(roles_for_host, 'osd')) - num_osds = len(num_osd_per_host) - assert num_osds <= len(devs), 'fewer disks than osds on ' + shortname - for dev in devs[:num_osds]: - dev_short = dev.split('/')[-1] - osd_devs.append('{host}:{dev}'.format(host=shortname, dev=dev_short)) - return osd_devs - -def get_all_nodes(ctx, config): - """Return a string of node names separated by blanks""" - nodelist = [] - for t, k in ctx.config['targets'].iteritems(): - host = t.split('@')[-1] - simple_host = host.split('.')[0] - nodelist.append(simple_host) - nodelist = " ".join(nodelist) - return nodelist - -def execute_ceph_deploy(ctx, config, cmd): - """Remotely execute a ceph_deploy command""" - testdir = teuthology.get_testdir(ctx) - ceph_admin = teuthology.get_first_mon(ctx, config) - exec_cmd = cmd - (remote,) = ctx.cluster.only(ceph_admin).remotes.iterkeys() - proc = remote.run( - args = [ - 'cd', - '{tdir}/ceph-deploy'.format(tdir=testdir), - run.Raw('&&'), - run.Raw(exec_cmd), - ], - check_status=False, - ) - exitstatus = proc.exitstatus - return exitstatus - - -@contextlib.contextmanager -def build_ceph_cluster(ctx, config): - """Build a ceph cluster""" - - try: - log.info('Building ceph cluster using ceph-deploy...') - testdir = teuthology.get_testdir(ctx) - ceph_branch = None - if config.get('branch') is not None: - cbranch = config.get('branch') - for var, val in cbranch.iteritems(): - if var == 'testing': - ceph_branch = '--{var}'.format(var=var) - ceph_branch = '--{var}={val}'.format(var=var, val=val) - node_dev_list = [] - all_nodes = get_all_nodes(ctx, config) - mds_nodes = get_nodes_using_roles(ctx, config, 'mds') - mds_nodes = " ".join(mds_nodes) - mon_node = get_nodes_using_roles(ctx, config, 'mon') - mon_nodes = " ".join(mon_node) - new_mon = './ceph-deploy new'+" "+mon_nodes - install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes - purge_nodes = './ceph-deploy purge'+" "+all_nodes - purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes - mon_hostname = mon_nodes.split(' ')[0] - mon_hostname = str(mon_hostname) - gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname - deploy_mds = './ceph-deploy mds create'+" "+mds_nodes - no_of_osds = 0 - - if mon_nodes is None: - raise RuntimeError("no monitor nodes in the config file") - - estatus_new = execute_ceph_deploy(ctx, config, new_mon) - if estatus_new != 0: - raise RuntimeError("ceph-deploy: new command failed") - - log.info('adding config inputs...') - testdir = teuthology.get_testdir(ctx) - conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir) - first_mon = teuthology.get_first_mon(ctx, config) - (remote,) = ctx.cluster.only(first_mon).remotes.keys() - - lines = None - if config.get('conf') is not None: - confp = config.get('conf') - for section, keys in confp.iteritems(): - lines = '[{section}]\n'.format(section=section) - teuthology.append_lines_to_file(remote, conf_path, lines, - sudo=True) - for key, value in keys.iteritems(): - log.info("[%s] %s = %s" % (section, key, value)) - lines = '{key} = {value}\n'.format(key=key, value=value) - teuthology.append_lines_to_file(remote, conf_path, lines, - sudo=True) - - estatus_install = execute_ceph_deploy(ctx, config, install_nodes) - if estatus_install != 0: - raise RuntimeError("ceph-deploy: Failed to install ceph") - - mon_create_nodes = './ceph-deploy mon create-initial' - # If the following fails, it is OK, it might just be that the monitors - # are taking way more than a minute/monitor to form quorum, so lets - # try the next block which will wait up to 15 minutes to gatherkeys. - execute_ceph_deploy(ctx, config, mon_create_nodes) - - estatus_gather = execute_ceph_deploy(ctx, config, gather_keys) - max_gather_tries = 90 - gather_tries = 0 - while (estatus_gather != 0): - gather_tries += 1 - if gather_tries >= max_gather_tries: - msg = 'ceph-deploy was not able to gatherkeys after 15 minutes' - raise RuntimeError(msg) - estatus_gather = execute_ceph_deploy(ctx, config, gather_keys) - time.sleep(10) - - if mds_nodes: - estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds) - if estatus_mds != 0: - raise RuntimeError("ceph-deploy: Failed to deploy mds") - - if config.get('test_mon_destroy') is not None: - for d in range(1, len(mon_node)): - mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d] - estatus_mon_d = execute_ceph_deploy(ctx, config, - mon_destroy_nodes) - if estatus_mon_d != 0: - raise RuntimeError("ceph-deploy: Failed to delete monitor") - - node_dev_list = get_dev_for_osd(ctx, config) - for d in node_dev_list: - osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d - estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds) - if estatus_osd == 0: - log.info('successfully created osd') - no_of_osds += 1 - else: - zap_disk = './ceph-deploy disk zap'+" "+d - execute_ceph_deploy(ctx, config, zap_disk) - estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds) - if estatus_osd == 0: - log.info('successfully created osd') - no_of_osds += 1 - else: - raise RuntimeError("ceph-deploy: Failed to create osds") - - if config.get('wait-for-healthy', True) and no_of_osds >= 2: - is_healthy(ctx=ctx, config=None) - - log.info('Setting up client nodes...') - conf_path = '/etc/ceph/ceph.conf' - admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring' - first_mon = teuthology.get_first_mon(ctx, config) - (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys() - conf_data = teuthology.get_file( - remote=mon0_remote, - path=conf_path, - sudo=True, - ) - admin_keyring = teuthology.get_file( - remote=mon0_remote, - path=admin_keyring_path, - sudo=True, - ) - - clients = ctx.cluster.only(teuthology.is_type('client')) - for remot, roles_for_host in clients.remotes.iteritems(): - for id_ in teuthology.roles_of_type(roles_for_host, 'client'): - client_keyring = \ - '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) - mon0_remote.run( - args=[ - 'cd', - '{tdir}'.format(tdir=testdir), - run.Raw('&&'), - 'sudo', 'bash', '-c', - run.Raw('"'), 'ceph', - 'auth', - 'get-or-create', - 'client.{id}'.format(id=id_), - 'mds', 'allow', - 'mon', 'allow *', - 'osd', 'allow *', - run.Raw('>'), - client_keyring, - run.Raw('"'), - ], - ) - key_data = teuthology.get_file( - remote=mon0_remote, - path=client_keyring, - sudo=True, - ) - teuthology.sudo_write_file( - remote=remot, - path=client_keyring, - data=key_data, - perms='0644' - ) - teuthology.sudo_write_file( - remote=remot, - path=admin_keyring_path, - data=admin_keyring, - perms='0644' - ) - teuthology.sudo_write_file( - remote=remot, - path=conf_path, - data=conf_data, - perms='0644' - ) - else: - raise RuntimeError( - "The cluster is NOT operational due to insufficient OSDs") - yield - - finally: - log.info('Stopping ceph...') - ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'), - 'sudo', 'service', 'ceph', 'stop' ]) - - # Are you really not running anymore? - # try first with the init tooling - # ignoring the status so this becomes informational only - ctx.cluster.run(args=['sudo', 'status', 'ceph-all', run.Raw('||'), - 'sudo', 'service', 'ceph', 'status'], - check_status=False) - - # and now just check for the processes themselves, as if upstart/sysvinit - # is lying to us. Ignore errors if the grep fails - ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'), - 'grep', '-v', 'grep', run.Raw('|'), - 'grep', 'ceph'], check_status=False) - - if ctx.archive is not None: - # archive mon data, too - log.info('Archiving mon data...') - path = os.path.join(ctx.archive, 'data') - os.makedirs(path) - mons = ctx.cluster.only(teuthology.is_type('mon')) - for remote, roles in mons.remotes.iteritems(): - for role in roles: - if role.startswith('mon.'): - teuthology.pull_directory_tarball( - remote, - '/var/lib/ceph/mon', - path + '/' + role + '.tgz') - - log.info('Compressing logs...') - run.wait( - ctx.cluster.run( - args=[ - 'sudo', - 'find', - '/var/log/ceph', - '-name', - '*.log', - '-print0', - run.Raw('|'), - 'sudo', - 'xargs', - '-0', - '--no-run-if-empty', - '--', - 'gzip', - '--', - ], - wait=False, - ), - ) - - log.info('Archiving logs...') - path = os.path.join(ctx.archive, 'remote') - os.makedirs(path) - for remote in ctx.cluster.remotes.iterkeys(): - sub = os.path.join(path, remote.shortname) - os.makedirs(sub) - teuthology.pull_directory(remote, '/var/log/ceph', - os.path.join(sub, 'log')) - - # Prevent these from being undefined if the try block fails - all_nodes = get_all_nodes(ctx, config) - purge_nodes = './ceph-deploy purge'+" "+all_nodes - purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes - - log.info('Purging package...') - execute_ceph_deploy(ctx, config, purge_nodes) - log.info('Purging data...') - execute_ceph_deploy(ctx, config, purgedata_nodes) - - -@contextlib.contextmanager -def task(ctx, config): - """ - Set up and tear down a Ceph cluster. - - For example:: - - tasks: - - install: - extras: yes - - ssh_keys: - - ceph-deploy: - branch: - stable: bobtail - mon_initial_members: 1 - - tasks: - - install: - extras: yes - - ssh_keys: - - ceph-deploy: - branch: - dev: master - conf: - mon: - debug mon = 20 - - tasks: - - install: - extras: yes - - ssh_keys: - - ceph-deploy: - branch: - testing: - """ - if config is None: - config = {} - - overrides = ctx.config.get('overrides', {}) - teuthology.deep_merge(config, overrides.get('ceph-deploy', {})) - - assert isinstance(config, dict), \ - "task ceph-deploy only supports a dictionary for configuration" - - overrides = ctx.config.get('overrides', {}) - teuthology.deep_merge(config, overrides.get('ceph-deploy', {})) - - if config.get('branch') is not None: - assert isinstance(config['branch'], dict), 'branch must be a dictionary' - - with contextutil.nested( - lambda: install_fn.ship_utilities(ctx=ctx, config=None), - lambda: download_ceph_deploy(ctx=ctx, config=config), - lambda: build_ceph_cluster(ctx=ctx, config=dict( - conf=config.get('conf', {}), - branch=config.get('branch',{}), - mon_initial_members=config.get('mon_initial_members', None), - test_mon_destroy=config.get('test_mon_destroy', None), - )), - ): - yield diff --git a/tasks/ceph_fuse.py b/tasks/ceph_fuse.py deleted file mode 100644 index 454473759ad..00000000000 --- a/tasks/ceph_fuse.py +++ /dev/null @@ -1,207 +0,0 @@ -""" -Ceph FUSE client task -""" -import contextlib -import logging -import os -import time -from cStringIO import StringIO - -from teuthology import misc -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Mount/unmount a ``ceph-fuse`` client. - - The config is optional and defaults to mounting on all clients. If - a config is given, it is expected to be a list of clients to do - this operation on. This lets you e.g. set up one client with - ``ceph-fuse`` and another with ``kclient``. - - Example that mounts all clients:: - - tasks: - - ceph: - - ceph-fuse: - - interactive: - - Example that uses both ``kclient` and ``ceph-fuse``:: - - tasks: - - ceph: - - ceph-fuse: [client.0] - - kclient: [client.1] - - interactive: - - Example that enables valgrind: - - tasks: - - ceph: - - ceph-fuse: - client.0: - valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - - interactive: - - :param ctx: Context - :param config: Configuration - """ - log.info('Mounting ceph-fuse clients...') - fuse_daemons = {} - - testdir = misc.get_testdir(ctx) - - if config is None: - config = dict(('client.{id}'.format(id=id_), None) - for id_ in misc.all_roles_of_type(ctx.cluster, 'client')) - elif isinstance(config, list): - config = dict((name, None) for name in config) - - overrides = ctx.config.get('overrides', {}) - misc.deep_merge(config, overrides.get('ceph-fuse', {})) - - clients = list(misc.get_clients(ctx=ctx, roles=config.keys())) - - for id_, remote in clients: - client_config = config.get("client.%s" % id_) - if client_config is None: - client_config = {} - log.info("Client client.%s config is %s" % (id_, client_config)) - - daemon_signal = 'kill' - if client_config.get('coverage') or client_config.get('valgrind') is not None: - daemon_signal = 'term' - - mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format( - id=id_, remote=remote,mnt=mnt)) - - remote.run( - args=[ - 'mkdir', - '--', - mnt, - ], - ) - - run_cmd=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'daemon-helper', - daemon_signal, - ] - run_cmd_tail=[ - 'ceph-fuse', - '-f', - '--name', 'client.{id}'.format(id=id_), - # TODO ceph-fuse doesn't understand dash dash '--', - mnt, - ] - - if client_config.get('valgrind') is not None: - run_cmd = misc.get_valgrind_args( - testdir, - 'client.{id}'.format(id=id_), - run_cmd, - client_config.get('valgrind'), - ) - - run_cmd.extend(run_cmd_tail) - - proc = remote.run( - args=run_cmd, - logger=log.getChild('ceph-fuse.{id}'.format(id=id_)), - stdin=run.PIPE, - wait=False, - ) - fuse_daemons[id_] = proc - - for id_, remote in clients: - mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - wait_until_fuse_mounted( - remote=remote, - fuse=fuse_daemons[id_], - mountpoint=mnt, - ) - remote.run(args=['sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_)],) - - try: - yield - finally: - log.info('Unmounting ceph-fuse clients...') - for id_, remote in clients: - mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - try: - remote.run( - args=[ - 'sudo', - 'fusermount', - '-u', - mnt, - ], - ) - except run.CommandFailedError: - log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=remote.name)) - # abort the fuse mount, killing all hung processes - remote.run( - args=[ - 'if', 'test', '-e', '/sys/fs/fuse/connections/*/abort', - run.Raw(';'), 'then', - 'echo', - '1', - run.Raw('>'), - run.Raw('/sys/fs/fuse/connections/*/abort'), - run.Raw(';'), 'fi', - ], - ) - # make sure its unmounted - remote.run( - args=[ - 'sudo', - 'umount', - '-l', - '-f', - mnt, - ], - ) - - run.wait(fuse_daemons.itervalues()) - - for id_, remote in clients: - mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - remote.run( - args=[ - 'rmdir', - '--', - mnt, - ], - ) - - -def wait_until_fuse_mounted(remote, fuse, mountpoint): - while True: - proc = remote.run( - args=[ - 'stat', - '--file-system', - '--printf=%T\n', - '--', - mountpoint, - ], - stdout=StringIO(), - ) - fstype = proc.stdout.getvalue().rstrip('\n') - if fstype == 'fuseblk': - break - log.debug('ceph-fuse not yet mounted, got fs type {fstype!r}'.format(fstype=fstype)) - - # it shouldn't have exited yet; exposes some trivial problems - assert not fuse.poll() - - time.sleep(5) - log.info('ceph-fuse is mounted on %s', mountpoint) diff --git a/tasks/ceph_manager.py b/tasks/ceph_manager.py deleted file mode 100644 index 8256c2220c9..00000000000 --- a/tasks/ceph_manager.py +++ /dev/null @@ -1,1675 +0,0 @@ -""" -ceph manager -- Thrasher and CephManager objects -""" -from cStringIO import StringIO -import random -import time -import gevent -import json -import logging -import threading -import os -from teuthology import misc as teuthology -from tasks.scrub import Scrubber -from teuthology.orchestra.remote import Remote - -log = logging.getLogger(__name__) - -def make_admin_daemon_dir(ctx, remote): - """ - Create /var/run/ceph directory on remote site. - - :param ctx: Context - :param remote: Remote site - """ - remote.run( - args=[ - 'sudo', - 'install', '-d', '-m0777', '--', '/var/run/ceph', - ], - ) - - -def mount_osd_data(ctx, remote, osd): - """ - Mount a remote OSD - - :param ctx: Context - :param remote: Remote site - :param ods: Osd name - """ - log.debug('Mounting data for osd.{o} on {r}'.format(o=osd, r=remote)) - if remote in ctx.disk_config.remote_to_roles_to_dev and osd in ctx.disk_config.remote_to_roles_to_dev[remote]: - dev = ctx.disk_config.remote_to_roles_to_dev[remote][osd] - mount_options = ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][osd] - fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][osd] - mnt = os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd)) - - log.info('Mounting osd.{o}: dev: {n}, mountpoint: {p}, type: {t}, options: {v}'.format( - o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options)) - - remote.run( - args=[ - 'sudo', - 'mount', - '-t', fstype, - '-o', ','.join(mount_options), - dev, - mnt, - ] - ) - - -class Thrasher: - """ - Object used to thrash Ceph - """ - def __init__(self, manager, config, logger=None): - self.ceph_manager = manager - self.ceph_manager.wait_for_clean() - osd_status = self.ceph_manager.get_osd_status() - self.in_osds = osd_status['in'] - self.live_osds = osd_status['live'] - self.out_osds = osd_status['out'] - self.dead_osds = osd_status['dead'] - self.stopping = False - self.logger = logger - self.config = config - self.revive_timeout = self.config.get("revive_timeout", 150) - if self.config.get('powercycle'): - self.revive_timeout += 120 - self.clean_wait = self.config.get('clean_wait', 0) - self.minin = self.config.get("min_in", 3) - self.chance_move_pg = self.config.get('chance_move_pg', 1.0) - - num_osds = self.in_osds + self.out_osds - self.max_pgs = self.config.get("max_pgs_per_pool_osd", 1200) * num_osds - if self.logger is not None: - self.log = lambda x: self.logger.info(x) - else: - def tmp(x): - """ - Implement log behavior - """ - print x - self.log = tmp - if self.config is None: - self.config = dict() - # prevent monitor from auto-marking things out while thrasher runs - # try both old and new tell syntax, in case we are testing old code - try: - manager.raw_cluster_cmd('--', 'tell', 'mon.*', 'injectargs', - '--mon-osd-down-out-interval 0') - except Exception: - manager.raw_cluster_cmd('--', 'mon', 'tell', '*', 'injectargs', - '--mon-osd-down-out-interval 0') - self.thread = gevent.spawn(self.do_thrash) - if self.config.get('powercycle') or not self.cmd_exists_on_osds("ceph-objectstore-tool"): - self.ceph_objectstore_tool = False - self.test_rm_past_intervals = False - if self.config.get('powercycle'): - self.log("Unable to test ceph-objectstore-tool, " - "powercycle testing") - else: - self.log("Unable to test ceph-objectstore-tool, " - "not available on all OSD nodes") - else: - self.ceph_objectstore_tool = \ - self.config.get('ceph_objectstore_tool', True) - self.test_rm_past_intervals = \ - self.config.get('test_rm_past_intervals', True) - - def cmd_exists_on_osds(self, cmd): - allremotes = self.ceph_manager.ctx.cluster.only(\ - teuthology.is_type('osd')).remotes.keys() - allremotes = list(set(allremotes)) - for remote in allremotes: - proc = remote.run(args=['type', cmd], wait=True, - check_status=False, stdout=StringIO(), - stderr=StringIO()) - if proc.exitstatus != 0: - return False; - return True; - - def kill_osd(self, osd=None, mark_down=False, mark_out=False): - """ - :param osd: Osd to be killed. - :mark_down: Mark down if true. - :mark_out: Mark out if true. - """ - if osd is None: - osd = random.choice(self.live_osds) - self.log("Killing osd %s, live_osds are %s" % (str(osd), str(self.live_osds))) - self.live_osds.remove(osd) - self.dead_osds.append(osd) - self.ceph_manager.kill_osd(osd) - if mark_down: - self.ceph_manager.mark_down_osd(osd) - if mark_out and osd in self.in_osds: - self.out_osd(osd) - if self.ceph_objectstore_tool: - self.log("Testing ceph-objectstore-tool on down osd") - (remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() - FSPATH = self.ceph_manager.get_filepath() - JPATH = os.path.join(FSPATH, "journal") - exp_osd = imp_osd = osd - exp_remote = imp_remote = remote - # If an older osd is available we'll move a pg from there - if len(self.dead_osds) > 1 and random.random() < self.chance_move_pg: - exp_osd = random.choice(self.dead_osds[:-1]) - (exp_remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=exp_osd)).remotes.iterkeys() - if 'keyvaluestore_backend' in self.ceph_manager.ctx.ceph.conf['osd']: - prefix = "sudo ceph-objectstore-tool --data-path {fpath} --journal-path {jpath} --type keyvaluestore-dev --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH) - else: - prefix = "sudo ceph-objectstore-tool --data-path {fpath} --journal-path {jpath} --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH) - cmd = (prefix + "--op list-pgs").format(id=exp_osd) - proc = exp_remote.run(args=cmd, wait=True, - check_status=False, stdout=StringIO()) - if proc.exitstatus: - raise Exception("ceph-objectstore-tool: exp list-pgs failure with status {ret}".format(ret=proc.exitstatus)) - pgs = proc.stdout.getvalue().split('\n')[:-1] - if len(pgs) == 0: - self.log("No PGs found for osd.{osd}".format(osd=exp_osd)) - return - pg = random.choice(pgs) - exp_path = os.path.join(os.path.join(teuthology.get_testdir(self.ceph_manager.ctx), "data"), "exp.{pg}.{id}".format(pg=pg, id=exp_osd)) - # export - cmd = (prefix + "--op export --pgid {pg} --file {file}").format(id=exp_osd, pg=pg, file=exp_path) - proc = exp_remote.run(args=cmd) - if proc.exitstatus: - raise Exception("ceph-objectstore-tool: export failure with status {ret}".format(ret=proc.exitstatus)) - # remove - cmd = (prefix + "--op remove --pgid {pg}").format(id=exp_osd, pg=pg) - proc = exp_remote.run(args=cmd) - if proc.exitstatus: - raise Exception("ceph-objectstore-tool: remove failure with status {ret}".format(ret=proc.exitstatus)) - # If there are at least 2 dead osds we might move the pg - if exp_osd != imp_osd: - # If pg isn't already on this osd, then we will move it there - cmd = (prefix + "--op list-pgs").format(id=imp_osd) - proc = imp_remote.run(args=cmd, wait=True, - check_status=False, stdout=StringIO()) - if proc.exitstatus: - raise Exception("ceph-objectstore-tool: imp list-pgs failure with status {ret}".format(ret=proc.exitstatus)) - pgs = proc.stdout.getvalue().split('\n')[:-1] - if pg not in pgs: - self.log("Moving pg {pg} from osd.{fosd} to osd.{tosd}".format(pg=pg, fosd=exp_osd, tosd=imp_osd)) - if imp_remote != exp_remote: - # Copy export file to the other machine - self.log("Transfer export file from {srem} to {trem}".format(srem=exp_remote, trem=imp_remote)) - tmpexport = Remote.get_file(exp_remote, exp_path) - Remote.put_file(imp_remote, tmpexport, exp_path) - os.remove(tmpexport) - else: - # Can't move the pg after all - imp_osd = exp_osd - imp_remote = exp_remote - # import - cmd = (prefix + "--op import --file {file}") - cmd = cmd.format(id=imp_osd, file=exp_path) - proc = imp_remote.run(args=cmd, wait=True, check_status=False) - if proc.exitstatus == 10: - self.log("Pool went away before processing an import" - "...ignored") - elif proc.exitstatus == 11: - self.log("Attempt to import an incompatible export" - "...ignored") - elif proc.exitstatus: - raise Exception("ceph-objectstore-tool: " - "import failure with status {ret}". - format(ret=proc.exitstatus)) - cmd = "rm -f {file}".format(file=exp_path) - exp_remote.run(args=cmd) - if imp_remote != exp_remote: - imp_remote.run(args=cmd) - - def rm_past_intervals(self, osd=None): - """ - :param osd: Osd to find pg to remove past intervals - """ - if self.test_rm_past_intervals: - if osd is None: - osd = random.choice(self.dead_osds) - self.log("Use ceph_objectstore_tool to remove past intervals") - (remote,) = self.ceph_manager.ctx.\ - cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() - FSPATH = self.ceph_manager.get_filepath() - JPATH = os.path.join(FSPATH, "journal") - if ('keyvaluestore_backend' in - self.ceph_manager.ctx.ceph.conf['osd']): - prefix = ("sudo ceph-objectstore-tool " - "--data-path {fpath} --journal-path {jpath} " - "--type keyvaluestore-dev " - "--log-file=" - "/var/log/ceph/objectstore_tool.\\$pid.log ". - format(fpath=FSPATH, jpath=JPATH)) - else: - prefix = ("sudo ceph-objectstore-tool " - "--data-path {fpath} --journal-path {jpath} " - "--log-file=" - "/var/log/ceph/objectstore_tool.\\$pid.log ". - format(fpath=FSPATH, jpath=JPATH)) - cmd = (prefix + "--op list-pgs").format(id=osd) - proc = remote.run(args=cmd, wait=True, - check_status=False, stdout=StringIO()) - if proc.exitstatus: - raise Exception("ceph_objectstore_tool: " - "exp list-pgs failure with status {ret}". - format(ret=proc.exitstatus)) - pgs = proc.stdout.getvalue().split('\n')[:-1] - if len(pgs) == 0: - self.log("No PGs found for osd.{osd}".format(osd=osd)) - return - pg = random.choice(pgs) - cmd = (prefix + "--op rm-past-intervals --pgid {pg}").\ - format(id=osd, pg=pg) - proc = remote.run(args=cmd) - if proc.exitstatus: - raise Exception("ceph_objectstore_tool: " - "rm-past-intervals failure with status {ret}". - format(ret=proc.exitstatus)) - - def blackhole_kill_osd(self, osd=None): - """ - If all else fails, kill the osd. - :param osd: Osd to be killed. - """ - if osd is None: - osd = random.choice(self.live_osds) - self.log("Blackholing and then killing osd %s, live_osds are %s" % (str(osd), str(self.live_osds))) - self.live_osds.remove(osd) - self.dead_osds.append(osd) - self.ceph_manager.blackhole_kill_osd(osd) - - def revive_osd(self, osd=None): - """ - Revive the osd. - :param osd: Osd to be revived. - """ - if osd is None: - osd = random.choice(self.dead_osds) - self.log("Reviving osd %s" % (str(osd),)) - self.live_osds.append(osd) - self.dead_osds.remove(osd) - self.ceph_manager.revive_osd(osd, self.revive_timeout) - - def out_osd(self, osd=None): - """ - Mark the osd out - :param osd: Osd to be marked. - """ - if osd is None: - osd = random.choice(self.in_osds) - self.log("Removing osd %s, in_osds are: %s" % (str(osd), str(self.in_osds))) - self.ceph_manager.mark_out_osd(osd) - self.in_osds.remove(osd) - self.out_osds.append(osd) - - def in_osd(self, osd=None): - """ - Mark the osd out - :param osd: Osd to be marked. - """ - if osd is None: - osd = random.choice(self.out_osds) - if osd in self.dead_osds: - return self.revive_osd(osd) - self.log("Adding osd %s" % (str(osd),)) - self.out_osds.remove(osd) - self.in_osds.append(osd) - self.ceph_manager.mark_in_osd(osd) - self.log("Added osd %s"%(str(osd),)) - - def reweight_osd(self, osd=None): - """ - Reweight an osd that is in - :param osd: Osd to be marked. - """ - if osd is None: - osd = random.choice(self.in_osds) - val = random.uniform(.1, 1.0) - self.log("Reweighting osd %s to %s" % (str(osd), str(val))) - self.ceph_manager.raw_cluster_cmd('osd', 'reweight', str(osd), str(val)) - - def primary_affinity(self, osd=None): - if osd is None: - osd = random.choice(self.in_osds) - if random.random() >= .5: - pa = random.random() - elif random.random() >= .5: - pa = 1 - else: - pa = 0 - self.log('Setting osd %s primary_affinity to %f' % (str(osd), pa)) - self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity', str(osd), str(pa)) - - def all_up(self): - """ - Make sure all osds are up and not out. - """ - while len(self.dead_osds) > 0: - self.log("reviving osd") - self.revive_osd() - while len(self.out_osds) > 0: - self.log("inning osd") - self.in_osd() - - def do_join(self): - """ - Break out of this Ceph loop - """ - self.stopping = True - self.thread.get() - - def grow_pool(self): - """ - Increase the size of the pool - """ - pool = self.ceph_manager.get_pool() - self.log("Growing pool %s"%(pool,)) - self.ceph_manager.expand_pool(pool, self.config.get('pool_grow_by', 10), self.max_pgs) - - def fix_pgp_num(self): - """ - Fix number of pgs in pool. - """ - pool = self.ceph_manager.get_pool() - self.log("fixing pg num pool %s"%(pool,)) - self.ceph_manager.set_pool_pgpnum(pool) - - def test_pool_min_size(self): - """ - Kill and revive all osds except one. - """ - self.log("test_pool_min_size") - self.all_up() - self.ceph_manager.wait_for_recovery( - timeout=self.config.get('timeout') - ) - the_one = random.choice(self.in_osds) - self.log("Killing everyone but %s", the_one) - to_kill = filter(lambda x: x != the_one, self.in_osds) - [self.kill_osd(i) for i in to_kill] - [self.out_osd(i) for i in to_kill] - time.sleep(self.config.get("test_pool_min_size_time", 10)) - self.log("Killing %s" % (the_one,)) - self.kill_osd(the_one) - self.out_osd(the_one) - self.log("Reviving everyone but %s" % (the_one,)) - [self.revive_osd(i) for i in to_kill] - [self.in_osd(i) for i in to_kill] - self.log("Revived everyone but %s" % (the_one,)) - self.log("Waiting for clean") - self.ceph_manager.wait_for_recovery( - timeout=self.config.get('timeout') - ) - - def inject_pause(self, conf_key, duration, check_after, should_be_down): - """ - Pause injection testing. Check for osd being down when finished. - """ - the_one = random.choice(self.live_osds) - self.log("inject_pause on {osd}".format(osd = the_one)) - self.log( - "Testing {key} pause injection for duration {duration}".format( - key = conf_key, - duration = duration - )) - self.log( - "Checking after {after}, should_be_down={shouldbedown}".format( - after = check_after, - shouldbedown = should_be_down - )) - self.ceph_manager.set_config(the_one, **{conf_key:duration}) - if not should_be_down: - return - time.sleep(check_after) - status = self.ceph_manager.get_osd_status() - assert the_one in status['down'] - time.sleep(duration - check_after + 20) - status = self.ceph_manager.get_osd_status() - assert not the_one in status['down'] - - def test_backfill_full(self): - """ - Test backfills stopping when the replica fills up. - - First, use osd_backfill_full_ratio to simulate a now full - osd by setting it to 0 on all of the OSDs. - - Second, on a random subset, set - osd_debug_skip_full_check_in_backfill_reservation to force - the more complicated check in do_scan to be exercised. - - Then, verify that all backfills stop. - """ - self.log("injecting osd_backfill_full_ratio = 0") - for i in self.live_osds: - self.ceph_manager.set_config( - i, - osd_debug_skip_full_check_in_backfill_reservation = random.choice( - ['false', 'true']), - osd_backfill_full_ratio = 0) - for i in range(30): - status = self.ceph_manager.compile_pg_status() - if 'backfill' not in status.keys(): - break - self.log( - "waiting for {still_going} backfills".format( - still_going=status.get('backfill'))) - time.sleep(1) - assert('backfill' not in self.ceph_manager.compile_pg_status().keys()) - for i in self.live_osds: - self.ceph_manager.set_config( - i, - osd_debug_skip_full_check_in_backfill_reservation = \ - 'false', - osd_backfill_full_ratio = 0.85) - - def test_map_discontinuity(self): - """ - 1) Allows the osds to recover - 2) kills an osd - 3) allows the remaining osds to recover - 4) waits for some time - 5) revives the osd - This sequence should cause the revived osd to have to handle - a map gap since the mons would have trimmed - """ - while len(self.in_osds) < (self.minin + 1): - self.in_osd() - self.log("Waiting for recovery") - self.ceph_manager.wait_for_all_up( - timeout=self.config.get('timeout') - ) - # now we wait 20s for the pg status to change, if it takes longer, - # the test *should* fail! - time.sleep(20) - self.ceph_manager.wait_for_clean( - timeout=self.config.get('timeout') - ) - - # now we wait 20s for the backfill replicas to hear about the clean - time.sleep(20) - self.log("Recovered, killing an osd") - self.kill_osd(mark_down=True, mark_out=True) - self.log("Waiting for clean again") - self.ceph_manager.wait_for_clean( - timeout=self.config.get('timeout') - ) - self.log("Waiting for trim") - time.sleep(int(self.config.get("map_discontinuity_sleep_time", 40))) - self.revive_osd() - - def choose_action(self): - """ - Random action selector. - """ - chance_down = self.config.get('chance_down', 0.4) - chance_test_min_size = self.config.get('chance_test_min_size', 0) - chance_test_backfill_full = self.config.get('chance_test_backfill_full', 0) - if isinstance(chance_down, int): - chance_down = float(chance_down) / 100 - minin = self.minin - minout = self.config.get("min_out", 0) - minlive = self.config.get("min_live", 2) - mindead = self.config.get("min_dead", 0) - - self.log('choose_action: min_in %d min_out %d min_live %d min_dead %d' % - (minin, minout, minlive, mindead)) - actions = [] - if len(self.in_osds) > minin: - actions.append((self.out_osd, 1.0,)) - if len(self.live_osds) > minlive and chance_down > 0: - actions.append((self.kill_osd, chance_down,)) - if len(self.dead_osds) > 1: - actions.append((self.rm_past_intervals, 1.0,)) - if len(self.out_osds) > minout: - actions.append((self.in_osd, 1.7,)) - if len(self.dead_osds) > mindead: - actions.append((self.revive_osd, 1.0,)) - if self.config.get('thrash_primary_affinity', True): - actions.append((self.primary_affinity, 1.0,)) - actions.append((self.reweight_osd, self.config.get('reweight_osd',.5),)) - actions.append((self.grow_pool, self.config.get('chance_pgnum_grow', 0),)) - actions.append((self.fix_pgp_num, self.config.get('chance_pgpnum_fix', 0),)) - actions.append((self.test_pool_min_size, chance_test_min_size,)) - actions.append((self.test_backfill_full, chance_test_backfill_full,)) - for key in ['heartbeat_inject_failure', 'filestore_inject_stall']: - for scenario in [ - (lambda: self.inject_pause(key, - self.config.get('pause_short', 3), - 0, - False), - self.config.get('chance_inject_pause_short', 1),), - (lambda: self.inject_pause(key, - self.config.get('pause_long', 80), - self.config.get('pause_check_after', 70), - True), - self.config.get('chance_inject_pause_long', 0),)]: - actions.append(scenario) - - total = sum([y for (x, y) in actions]) - val = random.uniform(0, total) - for (action, prob) in actions: - if val < prob: - return action - val -= prob - return None - - def do_thrash(self): - """ - Loop to select random actions to thrash ceph manager with. - """ - cleanint = self.config.get("clean_interval", 60) - scrubint = self.config.get("scrub_interval", -1) - maxdead = self.config.get("max_dead", 0) - delay = self.config.get("op_delay", 5) - self.log("starting do_thrash") - while not self.stopping: - self.log(" ".join([str(x) for x in ["in_osds: ", self.in_osds, " out_osds: ", self.out_osds, - "dead_osds: ", self.dead_osds, "live_osds: ", - self.live_osds]])) - if random.uniform(0, 1) < (float(delay) / cleanint): - while len(self.dead_osds) > maxdead: - self.revive_osd() - for osd in self.in_osds: - self.ceph_manager.raw_cluster_cmd('osd', 'reweight', - str(osd), str(1)) - if random.uniform(0, 1) < float( - self.config.get('chance_test_map_discontinuity', 0)): - self.test_map_discontinuity() - else: - self.ceph_manager.wait_for_recovery( - timeout=self.config.get('timeout') - ) - time.sleep(self.clean_wait) - if scrubint > 0: - if random.uniform(0, 1) < (float(delay) / scrubint): - self.log('Scrubbing while thrashing being performed') - Scrubber(self.ceph_manager, self.config) - self.choose_action()() - time.sleep(delay) - self.all_up() - -class CephManager: - """ - Ceph manager object. - Contains several local functions that form a bulk of this module. - """ - - REPLICATED_POOL = 1 - ERASURE_CODED_POOL = 3 - - def __init__(self, controller, ctx=None, config=None, logger=None): - self.lock = threading.RLock() - self.ctx = ctx - self.config = config - self.controller = controller - self.next_pool_id = 0 - self.created_erasure_pool = False - if (logger): - self.log = lambda x: logger.info(x) - else: - def tmp(x): - """ - implement log behavior. - """ - print x - self.log = tmp - if self.config is None: - self.config = dict() - pools = self.list_pools() - self.pools = {} - for pool in pools: - self.pools[pool] = self.get_pool_property(pool, 'pg_num') - - def raw_cluster_cmd(self, *args): - """ - Start ceph on a raw cluster. Return count - """ - testdir = teuthology.get_testdir(self.ctx) - ceph_args = [ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph', - ] - ceph_args.extend(args) - proc = self.controller.run( - args=ceph_args, - stdout=StringIO(), - ) - return proc.stdout.getvalue() - - def raw_cluster_cmd_result(self, *args): - """ - Start ceph on a cluster. Return success or failure information. - """ - testdir = teuthology.get_testdir(self.ctx) - ceph_args = [ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph', - ] - ceph_args.extend(args) - proc = self.controller.run( - args=ceph_args, - check_status=False, - ) - return proc.exitstatus - - def do_rados(self, remote, cmd): - """ - Execute a remote rados command. - """ - testdir = teuthology.get_testdir(self.ctx) - pre = [ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'rados', - ] - pre.extend(cmd) - proc = remote.run( - args=pre, - wait=True, - ) - return proc - - def rados_write_objects( - self, pool, num_objects, size, timelimit, threads, cleanup=False): - """ - Write rados objects - Threads not used yet. - """ - args = [ - '-p', pool, - '--num-objects', num_objects, - '-b', size, - 'bench', timelimit, - 'write' - ] - if not cleanup: args.append('--no-cleanup') - return self.do_rados(self.controller, map(str, args)) - - def do_put(self, pool, obj, fname): - """ - Implement rados put operation - """ - return self.do_rados( - self.controller, - [ - '-p', - pool, - 'put', - obj, - fname - ] - ) - - def do_get(self, pool, obj, fname='/dev/null'): - """ - Implement rados get operation - """ - return self.do_rados( - self.controller, - [ - '-p', - pool, - 'stat', - obj, - fname - ] - ) - - def osd_admin_socket(self, osdnum, command, check_status=True): - """ - Remotely start up ceph specifying the admin socket - :param command a list of words to use as the command to the admin socket - """ - testdir = teuthology.get_testdir(self.ctx) - remote = None - for _remote, roles_for_host in self.ctx.cluster.remotes.iteritems(): - for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): - if int(id_) == int(osdnum): - remote = _remote - assert remote is not None - args = [ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph', - '--admin-daemon', - '/var/run/ceph/ceph-osd.{id}.asok'.format(id=osdnum), - ] - args.extend(command) - return remote.run( - args=args, - stdout=StringIO(), - wait=True, - check_status=check_status - ) - - def get_pgid(self, pool, pgnum): - """ - :param pool: pool name - :param pgnum: pg number - :returns: a string representing this pg. - """ - poolnum = self.get_pool_num(pool) - pg_str = "{poolnum}.{pgnum}".format( - poolnum=poolnum, - pgnum=pgnum) - return pg_str - - def get_pg_replica(self, pool, pgnum): - """ - get replica for pool, pgnum (e.g. (data, 0)->0 - """ - output = self.raw_cluster_cmd("pg", "dump", '--format=json') - j = json.loads('\n'.join(output.split('\n')[1:])) - pg_str = self.get_pgid(pool, pgnum) - for pg in j['pg_stats']: - if pg['pgid'] == pg_str: - return int(pg['acting'][-1]) - assert False - - def get_pg_primary(self, pool, pgnum): - """ - get primary for pool, pgnum (e.g. (data, 0)->0 - """ - output = self.raw_cluster_cmd("pg", "dump", '--format=json') - j = json.loads('\n'.join(output.split('\n')[1:])) - pg_str = self.get_pgid(pool, pgnum) - for pg in j['pg_stats']: - if pg['pgid'] == pg_str: - return int(pg['acting'][0]) - assert False - - def get_pool_num(self, pool): - """ - get number for pool (e.g., data -> 2) - """ - out = self.raw_cluster_cmd('osd', 'dump', '--format=json') - j = json.loads('\n'.join(out.split('\n')[1:])) - for i in j['pools']: - if i['pool_name'] == pool: - return int(i['pool']) - assert False - - def list_pools(self): - """ - list all pool names - """ - out = self.raw_cluster_cmd('osd', 'dump', '--format=json') - j = json.loads('\n'.join(out.split('\n')[1:])) - self.log(j['pools']) - return [str(i['pool_name']) for i in j['pools']] - - def clear_pools(self): - """ - remove all pools - """ - [self.remove_pool(i) for i in self.list_pools()] - - def kick_recovery_wq(self, osdnum): - """ - Run kick_recovery_wq on cluster. - """ - return self.raw_cluster_cmd( - 'tell', "osd.%d" % (int(osdnum),), - 'debug', - 'kick_recovery_wq', - '0') - - def wait_run_admin_socket(self, osdnum, args=['version'], timeout=300): - """ - If osd_admin_socket call suceeds, return. Otherwise wait - five seconds and try again. - """ - tries = 0 - while True: - proc = self.osd_admin_socket( - osdnum, args, - check_status=False) - if proc.exitstatus is 0: - break - else: - tries += 1 - if (tries * 5) > timeout: - raise Exception('timed out waiting for admin_socket to appear after osd.{o} restart'.format(o=osdnum)) - self.log( - "waiting on admin_socket for {osdnum}, {command}".format( - osdnum=osdnum, - command=args)) - time.sleep(5) - - def get_pool_dump(self, pool): - """ - get the osd dump part of a pool - """ - osd_dump = self.get_osd_dump_json() - for i in osd_dump['pools']: - if i['pool_name'] == pool: - return i - assert False - - def set_config(self, osdnum, **argdict): - """ - :param osdnum: osd number - :param argdict: dictionary containing values to set. - """ - for k, v in argdict.iteritems(): - self.wait_run_admin_socket( - osdnum, - ['config', 'set', str(k), str(v)]) - - def raw_cluster_status(self): - """ - Get status from cluster - """ - status = self.raw_cluster_cmd('status', '--format=json-pretty') - return json.loads(status) - - def raw_osd_status(self): - """ - Get osd status from cluster - """ - return self.raw_cluster_cmd('osd', 'dump') - - def get_osd_status(self): - """ - Get osd statuses sorted by states that the osds are in. - """ - osd_lines = filter( - lambda x: x.startswith('osd.') and (("up" in x) or ("down" in x)), - self.raw_osd_status().split('\n')) - self.log(osd_lines) - in_osds = [int(i[4:].split()[0]) for i in filter( - lambda x: " in " in x, - osd_lines)] - out_osds = [int(i[4:].split()[0]) for i in filter( - lambda x: " out " in x, - osd_lines)] - up_osds = [int(i[4:].split()[0]) for i in filter( - lambda x: " up " in x, - osd_lines)] - down_osds = [int(i[4:].split()[0]) for i in filter( - lambda x: " down " in x, - osd_lines)] - dead_osds = [int(x.id_) for x in - filter(lambda x: not x.running(), self.ctx.daemons.iter_daemons_of_role('osd'))] - live_osds = [int(x.id_) for x in - filter(lambda x: x.running(), self.ctx.daemons.iter_daemons_of_role('osd'))] - return { 'in' : in_osds, 'out' : out_osds, 'up' : up_osds, - 'down' : down_osds, 'dead' : dead_osds, 'live' : live_osds, - 'raw' : osd_lines} - - def get_num_pgs(self): - """ - Check cluster status for the number of pgs - """ - status = self.raw_cluster_status() - self.log(status) - return status['pgmap']['num_pgs'] - - def create_pool_with_unique_name(self, pg_num=16, ec_pool=False, ec_m=1, ec_k=2): - """ - Create a pool named unique_pool_X where X is unique. - """ - name = "" - with self.lock: - name = "unique_pool_%s" % (str(self.next_pool_id),) - self.next_pool_id += 1 - self.create_pool( - name, - pg_num, - ec_pool=ec_pool, - ec_m=ec_m, - ec_k=ec_k) - return name - - def create_pool(self, pool_name, pg_num=16, ec_pool=False, ec_m=1, ec_k=2): - """ - Create a pool named from the pool_name parameter. - :param pool_name: name of the pool being created. - :param pg_num: initial number of pgs. - """ - with self.lock: - assert isinstance(pool_name, str) - assert isinstance(pg_num, int) - assert pool_name not in self.pools - self.log("creating pool_name %s"%(pool_name,)) - if ec_pool and not self.created_erasure_pool: - self.created_erasure_pool = True - self.raw_cluster_cmd('osd', 'erasure-code-profile', 'set', 'teuthologyprofile', 'ruleset-failure-domain=osd', 'm='+str(ec_m), 'k='+str(ec_k)) - - if ec_pool: - self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num), str(pg_num), 'erasure', 'teuthologyprofile') - else: - self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num)) - self.pools[pool_name] = pg_num - - def remove_pool(self, pool_name): - """ - Remove the indicated pool - :param pool_name: Pool to be removed - """ - with self.lock: - assert isinstance(pool_name, str) - assert pool_name in self.pools - self.log("removing pool_name %s" % (pool_name,)) - del self.pools[pool_name] - self.do_rados( - self.controller, - ['rmpool', pool_name, pool_name, "--yes-i-really-really-mean-it"] - ) - - def get_pool(self): - """ - Pick a random pool - """ - with self.lock: - return random.choice(self.pools.keys()) - - def get_pool_pg_num(self, pool_name): - """ - Return the number of pgs in the pool specified. - """ - with self.lock: - assert isinstance(pool_name, str) - if pool_name in self.pools: - return self.pools[pool_name] - return 0 - - def get_pool_property(self, pool_name, prop): - """ - :param pool_name: pool - :param prop: property to be checked. - :returns: property as an int value. - """ - with self.lock: - assert isinstance(pool_name, str) - assert isinstance(prop, str) - output = self.raw_cluster_cmd( - 'osd', - 'pool', - 'get', - pool_name, - prop) - return int(output.split()[1]) - - def set_pool_property(self, pool_name, prop, val): - """ - :param pool_name: pool - :param prop: property to be set. - :param val: value to set. - - This routine retries if set operation fails. - """ - with self.lock: - assert isinstance(pool_name, str) - assert isinstance(prop, str) - assert isinstance(val, int) - tries = 0 - while True: - r = self.raw_cluster_cmd_result( - 'osd', - 'pool', - 'set', - pool_name, - prop, - str(val)) - if r != 11: # EAGAIN - break - tries += 1 - if tries > 50: - raise Exception('timed out getting EAGAIN when setting pool property %s %s = %s' % (pool_name, prop, val)) - self.log('got EAGAIN setting pool property, waiting a few seconds...') - time.sleep(2) - - def expand_pool(self, pool_name, by, max_pgs): - """ - Increase the number of pgs in a pool - """ - with self.lock: - assert isinstance(pool_name, str) - assert isinstance(by, int) - assert pool_name in self.pools - if self.get_num_creating() > 0: - return - if (self.pools[pool_name] + by) > max_pgs: - return - self.log("increase pool size by %d"%(by,)) - new_pg_num = self.pools[pool_name] + by - self.set_pool_property(pool_name, "pg_num", new_pg_num) - self.pools[pool_name] = new_pg_num - - def set_pool_pgpnum(self, pool_name): - """ - Set pgpnum property of pool_name pool. - """ - with self.lock: - assert isinstance(pool_name, str) - assert pool_name in self.pools - if self.get_num_creating() > 0: - return - self.set_pool_property(pool_name, 'pgp_num', self.pools[pool_name]) - - def list_pg_missing(self, pgid): - """ - return list of missing pgs with the id specified - """ - r = None - offset = {} - while True: - out = self.raw_cluster_cmd('--', 'pg', pgid, 'list_missing', - json.dumps(offset)) - j = json.loads(out) - if r is None: - r = j - else: - r['objects'].extend(j['objects']) - if not 'more' in j: - break - if j['more'] == 0: - break - offset = j['objects'][-1]['oid'] - if 'more' in r: - del r['more'] - return r - - def get_pg_stats(self): - """ - Dump the cluster and get pg stats - """ - out = self.raw_cluster_cmd('pg', 'dump', '--format=json') - j = json.loads('\n'.join(out.split('\n')[1:])) - return j['pg_stats'] - - def compile_pg_status(self): - """ - Return a histogram of pg state values - """ - ret = {} - j = self.get_pg_stats() - for pg in j: - for status in pg['state'].split('+'): - if status not in ret: - ret[status] = 0 - ret[status] += 1 - return ret - - def pg_scrubbing(self, pool, pgnum): - """ - pg scrubbing wrapper - """ - pgstr = self.get_pgid(pool, pgnum) - stats = self.get_single_pg_stats(pgstr) - return 'scrub' in stats['state'] - - def pg_repairing(self, pool, pgnum): - """ - pg repairing wrapper - """ - pgstr = self.get_pgid(pool, pgnum) - stats = self.get_single_pg_stats(pgstr) - return 'repair' in stats['state'] - - def pg_inconsistent(self, pool, pgnum): - """ - pg inconsistent wrapper - """ - pgstr = self.get_pgid(pool, pgnum) - stats = self.get_single_pg_stats(pgstr) - return 'inconsistent' in stats['state'] - - def get_last_scrub_stamp(self, pool, pgnum): - """ - Get the timestamp of the last scrub. - """ - stats = self.get_single_pg_stats(self.get_pgid(pool, pgnum)) - return stats["last_scrub_stamp"] - - def do_pg_scrub(self, pool, pgnum, stype): - """ - Scrub pg and wait for scrubbing to finish - """ - init = self.get_last_scrub_stamp(pool, pgnum) - self.raw_cluster_cmd('pg', stype, self.get_pgid(pool, pgnum)) - while init == self.get_last_scrub_stamp(pool, pgnum): - self.log("waiting for scrub type %s"%(stype,)) - time.sleep(10) - - def get_single_pg_stats(self, pgid): - """ - Return pg for the pgid specified. - """ - all_stats = self.get_pg_stats() - - for pg in all_stats: - if pg['pgid'] == pgid: - return pg - - return None - - def get_osd_dump_json(self): - """ - osd dump --format=json converted to a python object - :returns: the python object - """ - out = self.raw_cluster_cmd('osd', 'dump', '--format=json') - return json.loads('\n'.join(out.split('\n')[1:])) - - def get_osd_dump(self): - """ - Dump osds - :returns: all osds - """ - out = self.raw_cluster_cmd('osd', 'dump', '--format=json') - j = json.loads('\n'.join(out.split('\n')[1:])) - return j['osds'] - - def get_stuck_pgs(self, type_, threshold): - """ - :returns: stuck pg information from the cluster - """ - out = self.raw_cluster_cmd('pg', 'dump_stuck', type_, str(threshold), - '--format=json') - return json.loads(out) - - def get_num_unfound_objects(self): - """ - Check cluster status to get the number of unfound objects - """ - status = self.raw_cluster_status() - self.log(status) - return status['pgmap'].get('unfound_objects', 0) - - def get_num_creating(self): - """ - Find the number of pgs in creating mode. - """ - pgs = self.get_pg_stats() - num = 0 - for pg in pgs: - if 'creating' in pg['state']: - num += 1 - return num - - def get_num_active_clean(self): - """ - Find the number of active and clean pgs. - """ - pgs = self.get_pg_stats() - num = 0 - for pg in pgs: - if pg['state'].count('active') and pg['state'].count('clean') and not pg['state'].count('stale'): - num += 1 - return num - - def get_num_active_recovered(self): - """ - Find the number of active and recovered pgs. - """ - pgs = self.get_pg_stats() - num = 0 - for pg in pgs: - if pg['state'].count('active') and not pg['state'].count('recover') and not pg['state'].count('backfill') and not pg['state'].count('stale'): - num += 1 - return num - - def get_is_making_recovery_progress(self): - """ - Return whether there is recovery progress discernable in the - raw cluster status - """ - status = self.raw_cluster_status() - kps = status['pgmap'].get('recovering_keys_per_sec', 0) - bps = status['pgmap'].get('recovering_bytes_per_sec', 0) - ops = status['pgmap'].get('recovering_objects_per_sec', 0) - return kps > 0 or bps > 0 or ops > 0 - - def get_num_active(self): - """ - Find the number of active pgs. - """ - pgs = self.get_pg_stats() - num = 0 - for pg in pgs: - if pg['state'].count('active') and not pg['state'].count('stale'): - num += 1 - return num - - def get_num_down(self): - """ - Find the number of pgs that are down. - """ - pgs = self.get_pg_stats() - num = 0 - for pg in pgs: - if (pg['state'].count('down') and not pg['state'].count('stale')) or \ - (pg['state'].count('incomplete') and not pg['state'].count('stale')): - num += 1 - return num - - def get_num_active_down(self): - """ - Find the number of pgs that are either active or down. - """ - pgs = self.get_pg_stats() - num = 0 - for pg in pgs: - if (pg['state'].count('active') and not pg['state'].count('stale')) or \ - (pg['state'].count('down') and not pg['state'].count('stale')) or \ - (pg['state'].count('incomplete') and not pg['state'].count('stale')): - num += 1 - return num - - def is_clean(self): - """ - True if all pgs are clean - """ - return self.get_num_active_clean() == self.get_num_pgs() - - def is_recovered(self): - """ - True if all pgs have recovered - """ - return self.get_num_active_recovered() == self.get_num_pgs() - - def is_active_or_down(self): - """ - True if all pgs are active or down - """ - return self.get_num_active_down() == self.get_num_pgs() - - def wait_for_clean(self, timeout=None): - """ - Returns trues when all pgs are clean. - """ - self.log("waiting for clean") - start = time.time() - num_active_clean = self.get_num_active_clean() - while not self.is_clean(): - if timeout is not None: - if self.get_is_making_recovery_progress(): - self.log("making progress, resetting timeout") - start = time.time() - else: - self.log("no progress seen, keeping timeout for now") - assert time.time() - start < timeout, \ - 'failed to become clean before timeout expired' - cur_active_clean = self.get_num_active_clean() - if cur_active_clean != num_active_clean: - start = time.time() - num_active_clean = cur_active_clean - time.sleep(3) - self.log("clean!") - - def are_all_osds_up(self): - """ - Returns true if all osds are up. - """ - x = self.get_osd_dump() - return (len(x) == \ - sum([(y['up'] > 0) for y in x])) - - def wait_for_all_up(self, timeout=None): - """ - When this exits, either the timeout has expired, or all - osds are up. - """ - self.log("waiting for all up") - start = time.time() - while not self.are_all_osds_up(): - if timeout is not None: - assert time.time() - start < timeout, \ - 'timeout expired in wait_for_all_up' - time.sleep(3) - self.log("all up!") - - def wait_for_recovery(self, timeout=None): - """ - Check peering. When this exists, we have recovered. - """ - self.log("waiting for recovery to complete") - start = time.time() - num_active_recovered = self.get_num_active_recovered() - while not self.is_recovered(): - if timeout is not None: - if self.get_is_making_recovery_progress(): - self.log("making progress, resetting timeout") - start = time.time() - else: - self.log("no progress seen, keeping timeout for now") - assert time.time() - start < timeout, \ - 'failed to recover before timeout expired' - cur_active_recovered = self.get_num_active_recovered() - if cur_active_recovered != num_active_recovered: - start = time.time() - num_active_recovered = cur_active_recovered - time.sleep(3) - self.log("recovered!") - - def wait_for_active(self, timeout=None): - """ - Check peering. When this exists, we are definitely active - """ - self.log("waiting for peering to complete") - start = time.time() - num_active = self.get_num_active() - while not self.is_active(): - if timeout is not None: - assert time.time() - start < timeout, \ - 'failed to recover before timeout expired' - cur_active = self.get_num_active() - if cur_active != num_active: - start = time.time() - num_active = cur_active - time.sleep(3) - self.log("active!") - - def wait_for_active_or_down(self, timeout=None): - """ - Check peering. When this exists, we are definitely either - active or down - """ - self.log("waiting for peering to complete or become blocked") - start = time.time() - num_active_down = self.get_num_active_down() - while not self.is_active_or_down(): - if timeout is not None: - assert time.time() - start < timeout, \ - 'failed to recover before timeout expired' - cur_active_down = self.get_num_active_down() - if cur_active_down != num_active_down: - start = time.time() - num_active_down = cur_active_down - time.sleep(3) - self.log("active or down!") - - def osd_is_up(self, osd): - """ - Wrapper for osd check - """ - osds = self.get_osd_dump() - return osds[osd]['up'] > 0 - - def wait_till_osd_is_up(self, osd, timeout=None): - """ - Loop waiting for osd. - """ - self.log('waiting for osd.%d to be up' % osd) - start = time.time() - while not self.osd_is_up(osd): - if timeout is not None: - assert time.time() - start < timeout, \ - 'osd.%d failed to come up before timeout expired' % osd - time.sleep(3) - self.log('osd.%d is up' % osd) - - def is_active(self): - """ - Wrapper to check if active - """ - return self.get_num_active() == self.get_num_pgs() - - def wait_till_active(self, timeout=None): - """ - Wait until osds are active. - """ - self.log("waiting till active") - start = time.time() - while not self.is_active(): - if timeout is not None: - assert time.time() - start < timeout, \ - 'failed to become active before timeout expired' - time.sleep(3) - self.log("active!") - - def mark_out_osd(self, osd): - """ - Wrapper to mark osd out. - """ - self.raw_cluster_cmd('osd', 'out', str(osd)) - - def kill_osd(self, osd): - """ - Kill osds by either power cycling (if indicated by the config) - or by stopping. - """ - if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() - self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name)) - assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." - remote.console.power_off() - else: - self.ctx.daemons.get_daemon('osd', osd).stop() - - def blackhole_kill_osd(self, osd): - """ - Stop osd if nothing else works. - """ - self.raw_cluster_cmd('--', 'tell', 'osd.%d' % osd, - 'injectargs', '--filestore-blackhole') - time.sleep(2) - self.ctx.daemons.get_daemon('osd', osd).stop() - - def revive_osd(self, osd, timeout=150): - """ - Revive osds by either power cycling (if indicated by the config) - or by restarting. - """ - if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() - self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name)) - assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." - remote.console.power_on() - if not remote.console.check_status(300): - raise Exception('Failed to revive osd.{o} via ipmi'.format(o=osd)) - teuthology.reconnect(self.ctx, 60, [remote]) - mount_osd_data(self.ctx, remote, str(osd)) - make_admin_daemon_dir(self.ctx, remote) - self.ctx.daemons.get_daemon('osd', osd).reset() - self.ctx.daemons.get_daemon('osd', osd).restart() - # wait for dump_ops_in_flight; this command doesn't appear - # until after the signal handler is installed and it is safe - # to stop the osd again without making valgrind leak checks - # unhappy. see #5924. - self.wait_run_admin_socket(osd, - args=['dump_ops_in_flight'], - timeout=timeout) - - def mark_down_osd(self, osd): - """ - Cluster command wrapper - """ - self.raw_cluster_cmd('osd', 'down', str(osd)) - - def mark_in_osd(self, osd): - """ - Cluster command wrapper - """ - self.raw_cluster_cmd('osd', 'in', str(osd)) - - - ## monitors - - def signal_mon(self, mon, sig): - """ - Wrapper to local get_deamon call - """ - self.ctx.daemons.get_daemon('mon', mon).signal(sig) - - def kill_mon(self, mon): - """ - Kill the monitor by either power cycling (if the config says so), - or by doing a stop. - """ - if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys() - self.log('kill_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name)) - assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." - remote.console.power_off() - else: - self.ctx.daemons.get_daemon('mon', mon).stop() - - def revive_mon(self, mon): - """ - Restart by either power cycling (if the config says so), - or by doing a normal restart. - """ - if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys() - self.log('revive_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name)) - assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." - remote.console.power_on() - make_admin_daemon_dir(self.ctx, remote) - self.ctx.daemons.get_daemon('mon', mon).restart() - - def get_mon_status(self, mon): - """ - Extract all the monitor status information from the cluster - """ - addr = self.ctx.ceph.conf['mon.%s' % mon]['mon addr'] - out = self.raw_cluster_cmd('-m', addr, 'mon_status') - return json.loads(out) - - def get_mon_quorum(self): - """ - Extract monitor quorum information from the cluster - """ - out = self.raw_cluster_cmd('quorum_status') - j = json.loads(out) - self.log('quorum_status is %s' % out) - return j['quorum'] - - def wait_for_mon_quorum_size(self, size, timeout=300): - """ - Loop until quorum size is reached. - """ - self.log('waiting for quorum size %d' % size) - start = time.time() - while not len(self.get_mon_quorum()) == size: - if timeout is not None: - assert time.time() - start < timeout, \ - 'failed to reach quorum size %d before timeout expired' % size - time.sleep(3) - self.log("quorum is size %d" % size) - - def get_mon_health(self, debug=False): - """ - Extract all the monitor health information. - """ - out = self.raw_cluster_cmd('health', '--format=json') - if debug: - self.log('health:\n{h}'.format(h=out)) - return json.loads(out) - - ## metadata servers - - def kill_mds(self, mds): - """ - Powercyle if set in config, otherwise just stop. - """ - if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys() - self.log('kill_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name)) - assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." - remote.console.power_off() - else: - self.ctx.daemons.get_daemon('mds', mds).stop() - - def kill_mds_by_rank(self, rank): - """ - kill_mds wrapper to kill based on rank passed. - """ - status = self.get_mds_status_by_rank(rank) - self.kill_mds(status['name']) - - def revive_mds(self, mds, standby_for_rank=None): - """ - Revive mds -- do an ipmpi powercycle (if indicated by the config) - and then restart (using --hot-standby if specified. - """ - if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys() - self.log('revive_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name)) - assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." - remote.console.power_on() - make_admin_daemon_dir(self.ctx, remote) - args = [] - if standby_for_rank: - args.extend(['--hot-standby', standby_for_rank]) - self.ctx.daemons.get_daemon('mds', mds).restart(*args) - - def revive_mds_by_rank(self, rank, standby_for_rank=None): - """ - revive_mds wrapper to revive based on rank passed. - """ - status = self.get_mds_status_by_rank(rank) - self.revive_mds(status['name'], standby_for_rank) - - def get_mds_status(self, mds): - """ - Run cluster commands for the mds in order to get mds information - """ - out = self.raw_cluster_cmd('mds', 'dump', '--format=json') - j = json.loads(' '.join(out.splitlines()[1:])) - # collate; for dup ids, larger gid wins. - for info in j['info'].itervalues(): - if info['name'] == mds: - return info - return None - - def get_mds_status_by_rank(self, rank): - """ - Run cluster commands for the mds in order to get mds information - check rank. - """ - out = self.raw_cluster_cmd('mds', 'dump', '--format=json') - j = json.loads(' '.join(out.splitlines()[1:])) - # collate; for dup ids, larger gid wins. - for info in j['info'].itervalues(): - if info['rank'] == rank: - return info - return None - - def get_mds_status_all(self): - """ - Run cluster command to extract all the mds status. - """ - out = self.raw_cluster_cmd('mds', 'dump', '--format=json') - j = json.loads(' '.join(out.splitlines()[1:])) - return j - - def get_filepath(self): - """ - Return path to osd data with {id} needing to be replaced - """ - return "/var/lib/ceph/osd/ceph-{id}" - -def utility_task(name): - """ - Generate ceph_manager subtask corresponding to ceph_manager - method name - """ - def task(ctx, config): - if config is None: - config = {} - args = config.get('args', []) - kwargs = config.get('kwargs', {}) - fn = getattr(ctx.manager, name) - fn(*args, **kwargs) - return task - -revive_osd = utility_task("revive_osd") -kill_osd = utility_task("kill_osd") -create_pool = utility_task("create_pool") -remove_pool = utility_task("remove_pool") -wait_for_clean = utility_task("wait_for_clean") -set_pool_property = utility_task("set_pool_property") diff --git a/tasks/ceph_objectstore_tool.py b/tasks/ceph_objectstore_tool.py deleted file mode 100644 index 3b899de33b8..00000000000 --- a/tasks/ceph_objectstore_tool.py +++ /dev/null @@ -1,679 +0,0 @@ -""" -ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility -""" -from cStringIO import StringIO -import contextlib -import logging -import ceph_manager -from teuthology import misc as teuthology -import time -import os -import string -from teuthology.orchestra import run -import sys -import tempfile -import json -from util.rados import (rados, create_replicated_pool, create_ec_pool) -# from util.rados import (rados, create_ec_pool, -# create_replicated_pool, -# create_cache_pool) - -log = logging.getLogger(__name__) - -# Should get cluster name "ceph" from somewhere -# and normal path from osd_data and osd_journal in conf -FSPATH = "/var/lib/ceph/osd/ceph-{id}" -JPATH = "/var/lib/ceph/osd/ceph-{id}/journal" - - -def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR, - BASE_NAME, DATALINECOUNT): - objects = range(1, NUM_OBJECTS + 1) - for i in objects: - NAME = BASE_NAME + "{num}".format(num=i) - LOCALNAME = os.path.join(DATADIR, NAME) - - dataline = range(DATALINECOUNT) - fd = open(LOCALNAME, "w") - data = "This is the data for " + NAME + "\n" - for _ in dataline: - fd.write(data) - fd.close() - - -def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, - BASE_NAME, DATALINECOUNT): - - objects = range(1, NUM_OBJECTS + 1) - for i in objects: - NAME = BASE_NAME + "{num}".format(num=i) - DDNAME = os.path.join(DATADIR, NAME) - - remote.run(args=['rm', '-f', DDNAME]) - - dataline = range(DATALINECOUNT) - data = "This is the data for " + NAME + "\n" - DATA = "" - for _ in dataline: - DATA += data - teuthology.write_file(remote, DDNAME, DATA) - - -def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, - BASE_NAME, DATALINECOUNT, POOL, db, ec): - ERRORS = 0 - log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS)) - - objects = range(1, NUM_OBJECTS + 1) - for i in objects: - NAME = BASE_NAME + "{num}".format(num=i) - DDNAME = os.path.join(DATADIR, NAME) - - proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME], - wait=False) - # proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME]) - ret = proc.wait() - if ret != 0: - log.critical("Rados put failed with status {ret}". - format(ret=proc.exitstatus)) - sys.exit(1) - - db[NAME] = {} - - keys = range(i) - db[NAME]["xattr"] = {} - for k in keys: - if k == 0: - continue - mykey = "key{i}-{k}".format(i=i, k=k) - myval = "val{i}-{k}".format(i=i, k=k) - proc = remote.run(args=['rados', '-p', POOL, 'setxattr', - NAME, mykey, myval]) - ret = proc.wait() - if ret != 0: - log.error("setxattr failed with {ret}".format(ret=ret)) - ERRORS += 1 - db[NAME]["xattr"][mykey] = myval - - # Erasure coded pools don't support omap - if ec: - continue - - # Create omap header in all objects but REPobject1 - if i != 1: - myhdr = "hdr{i}".format(i=i) - proc = remote.run(args=['rados', '-p', POOL, 'setomapheader', - NAME, myhdr]) - ret = proc.wait() - if ret != 0: - log.critical("setomapheader failed with {ret}".format(ret=ret)) - ERRORS += 1 - db[NAME]["omapheader"] = myhdr - - db[NAME]["omap"] = {} - for k in keys: - if k == 0: - continue - mykey = "okey{i}-{k}".format(i=i, k=k) - myval = "oval{i}-{k}".format(i=i, k=k) - proc = remote.run(args=['rados', '-p', POOL, 'setomapval', - NAME, mykey, myval]) - ret = proc.wait() - if ret != 0: - log.critical("setomapval failed with {ret}".format(ret=ret)) - db[NAME]["omap"][mykey] = myval - - return ERRORS - - -def get_lines(filename): - tmpfd = open(filename, "r") - line = True - lines = [] - while line: - line = tmpfd.readline().rstrip('\n') - if line: - lines += [line] - tmpfd.close() - os.unlink(filename) - return lines - - -@contextlib.contextmanager -def task(ctx, config): - """ - Run ceph_objectstore_tool test - - The config should be as follows:: - - ceph_objectstore_tool: - objects: 20 # - pgnum: 12 - """ - - if config is None: - config = {} - assert isinstance(config, dict), \ - 'ceph_objectstore_tool task only accepts a dict for configuration' - - log.info('Beginning ceph_objectstore_tool...') - - log.debug(config) - log.debug(ctx) - clients = ctx.cluster.only(teuthology.is_type('client')) - assert len(clients.remotes) > 0, 'Must specify at least 1 client' - (cli_remote, _) = clients.remotes.popitem() - log.debug(cli_remote) - - # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys())) - # client = clients.popitem() - # log.info(client) - osds = ctx.cluster.only(teuthology.is_type('osd')) - log.info("OSDS") - log.info(osds) - log.info(osds.remotes) - - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - config=config, - logger=log.getChild('ceph_manager'), - ) - ctx.manager = manager - - while (len(manager.get_osd_status()['up']) != - len(manager.get_osd_status()['raw'])): - time.sleep(10) - while (len(manager.get_osd_status()['in']) != - len(manager.get_osd_status()['up'])): - time.sleep(10) - manager.raw_cluster_cmd('osd', 'set', 'noout') - manager.raw_cluster_cmd('osd', 'set', 'nodown') - - PGNUM = config.get('pgnum', 12) - log.info("pgnum: {num}".format(num=PGNUM)) - - ERRORS = 0 - - REP_POOL = "rep_pool" - REP_NAME = "REPobject" - create_replicated_pool(cli_remote, REP_POOL, PGNUM) - ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME) - - EC_POOL = "ec_pool" - EC_NAME = "ECobject" - create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM) - ERRORS += test_objectstore(ctx, config, cli_remote, - EC_POOL, EC_NAME, ec=True) - - if ERRORS == 0: - log.info("TEST PASSED") - else: - log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS)) - - assert ERRORS == 0 - - try: - yield - finally: - log.info('Ending ceph_objectstore_tool') - - -def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): - manager = ctx.manager - - osds = ctx.cluster.only(teuthology.is_type('osd')) - - TEUTHDIR = teuthology.get_testdir(ctx) - DATADIR = os.path.join(TEUTHDIR, "data") - DATALINECOUNT = 10000 - ERRORS = 0 - NUM_OBJECTS = config.get('objects', 10) - log.info("objects: {num}".format(num=NUM_OBJECTS)) - - pool_dump = manager.get_pool_dump(REP_POOL) - REPID = pool_dump['pool'] - - log.debug("repid={num}".format(num=REPID)) - - db = {} - - LOCALDIR = tempfile.mkdtemp("cod") - - cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR, - REP_NAME, DATALINECOUNT) - allremote = [] - allremote.append(cli_remote) - allremote += osds.remotes.keys() - allremote = list(set(allremote)) - for remote in allremote: - cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, - REP_NAME, DATALINECOUNT) - - ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR, - REP_NAME, DATALINECOUNT, REP_POOL, db, ec) - - pgs = {} - for stats in manager.get_pg_stats(): - if stats["pgid"].find(str(REPID) + ".") != 0: - continue - if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL: - for osd in stats["acting"]: - pgs.setdefault(osd, []).append(stats["pgid"]) - elif pool_dump["type"] == ceph_manager.CephManager.ERASURE_CODED_POOL: - shard = 0 - for osd in stats["acting"]: - pgs.setdefault(osd, []).append("{pgid}s{shard}". - format(pgid=stats["pgid"], - shard=shard)) - shard += 1 - else: - raise Exception("{pool} has an unexpected type {type}". - format(pool=REP_POOL, type=pool_dump["type"])) - - log.info(pgs) - log.info(db) - - for osd in manager.get_osd_status()['up']: - manager.kill_osd(osd) - time.sleep(5) - - pgswithobjects = set() - objsinpg = {} - - # Test --op list and generate json for all objects - log.info("Test --op list by generating json for all objects") - prefix = ("sudo ceph-objectstore-tool " - "--data-path {fpath} " - "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH) - for remote in osds.remotes.iterkeys(): - log.debug(remote) - log.debug(osds.remotes[remote]) - for role in osds.remotes[remote]: - if string.find(role, "osd.") != 0: - continue - osdid = int(role.split('.')[1]) - log.info("process osd.{id} on {remote}". - format(id=osdid, remote=remote)) - cmd = (prefix + "--op list").format(id=osdid) - proc = remote.run(args=cmd.split(), check_status=False, - stdout=StringIO()) - if proc.exitstatus != 0: - log.error("Bad exit status {ret} from --op list request". - format(ret=proc.exitstatus)) - ERRORS += 1 - else: - for pgline in proc.stdout.getvalue().splitlines(): - if not pgline: - continue - (pg, obj) = json.loads(pgline) - name = obj['oid'] - if name in db: - pgswithobjects.add(pg) - objsinpg.setdefault(pg, []).append(name) - db[name].setdefault("pg2json", - {})[pg] = json.dumps(obj) - - log.info(db) - log.info(pgswithobjects) - log.info(objsinpg) - - if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL: - # Test get-bytes - log.info("Test get-bytes and set-bytes") - for basename in db.keys(): - file = os.path.join(DATADIR, basename) - GETNAME = os.path.join(DATADIR, "get") - SETNAME = os.path.join(DATADIR, "set") - - for remote in osds.remotes.iterkeys(): - for role in osds.remotes[remote]: - if string.find(role, "osd.") != 0: - continue - osdid = int(role.split('.')[1]) - if osdid not in pgs: - continue - - for pg, JSON in db[basename]["pg2json"].iteritems(): - if pg in pgs[osdid]: - cmd = ((prefix + "--pgid {pg}"). - format(id=osdid, pg=pg).split()) - cmd.append(run.Raw("'{json}'".format(json=JSON))) - cmd += ("get-bytes {fname}". - format(fname=GETNAME).split()) - proc = remote.run(args=cmd, check_status=False) - if proc.exitstatus != 0: - remote.run(args="rm -f {getfile}". - format(getfile=GETNAME).split()) - log.error("Bad exit status {ret}". - format(ret=proc.exitstatus)) - ERRORS += 1 - continue - cmd = ("diff -q {file} {getfile}". - format(file=file, getfile=GETNAME)) - proc = remote.run(args=cmd.split()) - if proc.exitstatus != 0: - log.error("Data from get-bytes differ") - # log.debug("Got:") - # cat_file(logging.DEBUG, GETNAME) - # log.debug("Expected:") - # cat_file(logging.DEBUG, file) - ERRORS += 1 - remote.run(args="rm -f {getfile}". - format(getfile=GETNAME).split()) - - data = ("put-bytes going into {file}\n". - format(file=file)) - teuthology.write_file(remote, SETNAME, data) - cmd = ((prefix + "--pgid {pg}"). - format(id=osdid, pg=pg).split()) - cmd.append(run.Raw("'{json}'".format(json=JSON))) - cmd += ("set-bytes {fname}". - format(fname=SETNAME).split()) - proc = remote.run(args=cmd, check_status=False) - proc.wait() - if proc.exitstatus != 0: - log.info("set-bytes failed for object {obj} " - "in pg {pg} osd.{id} ret={ret}". - format(obj=basename, pg=pg, - id=osdid, ret=proc.exitstatus)) - ERRORS += 1 - - cmd = ((prefix + "--pgid {pg}"). - format(id=osdid, pg=pg).split()) - cmd.append(run.Raw("'{json}'".format(json=JSON))) - cmd += "get-bytes -".split() - proc = remote.run(args=cmd, check_status=False, - stdout=StringIO()) - proc.wait() - if proc.exitstatus != 0: - log.error("get-bytes after " - "set-bytes ret={ret}". - format(ret=proc.exitstatus)) - ERRORS += 1 - else: - if data != proc.stdout.getvalue(): - log.error("Data inconsistent after " - "set-bytes, got:") - log.error(proc.stdout.getvalue()) - ERRORS += 1 - - cmd = ((prefix + "--pgid {pg}"). - format(id=osdid, pg=pg).split()) - cmd.append(run.Raw("'{json}'".format(json=JSON))) - cmd += ("set-bytes {fname}". - format(fname=file).split()) - proc = remote.run(args=cmd, check_status=False) - proc.wait() - if proc.exitstatus != 0: - log.info("set-bytes failed for object {obj} " - "in pg {pg} osd.{id} ret={ret}". - format(obj=basename, pg=pg, - id=osdid, ret=proc.exitstatus)) - ERRORS += 1 - - log.info("Test list-attrs get-attr") - for basename in db.keys(): - file = os.path.join(DATADIR, basename) - GETNAME = os.path.join(DATADIR, "get") - SETNAME = os.path.join(DATADIR, "set") - - for remote in osds.remotes.iterkeys(): - for role in osds.remotes[remote]: - if string.find(role, "osd.") != 0: - continue - osdid = int(role.split('.')[1]) - if osdid not in pgs: - continue - - for pg, JSON in db[basename]["pg2json"].iteritems(): - if pg in pgs[osdid]: - cmd = ((prefix + "--pgid {pg}"). - format(id=osdid, pg=pg).split()) - cmd.append(run.Raw("'{json}'".format(json=JSON))) - cmd += ["list-attrs"] - proc = remote.run(args=cmd, check_status=False, - stdout=StringIO(), stderr=StringIO()) - proc.wait() - if proc.exitstatus != 0: - log.error("Bad exit status {ret}". - format(ret=proc.exitstatus)) - ERRORS += 1 - continue - keys = proc.stdout.getvalue().split() - values = dict(db[basename]["xattr"]) - - for key in keys: - if (key == "_" or - key == "snapset" or - key == "hinfo_key"): - continue - key = key.strip("_") - if key not in values: - log.error("The key {key} should be present". - format(key=key)) - ERRORS += 1 - continue - exp = values.pop(key) - cmd = ((prefix + "--pgid {pg}"). - format(id=osdid, pg=pg).split()) - cmd.append(run.Raw("'{json}'".format(json=JSON))) - cmd += ("get-attr {key}". - format(key="_" + key).split()) - proc = remote.run(args=cmd, check_status=False, - stdout=StringIO()) - proc.wait() - if proc.exitstatus != 0: - log.error("get-attr failed with {ret}". - format(ret=proc.exitstatus)) - ERRORS += 1 - continue - val = proc.stdout.getvalue() - if exp != val: - log.error("For key {key} got value {got} " - "instead of {expected}". - format(key=key, got=val, - expected=exp)) - ERRORS += 1 - if "hinfo_key" in keys: - cmd_prefix = prefix.format(id=osdid) - cmd = """ - expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64) - echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} - - test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder - echo $expected | base64 --decode | \ - {prefix} --pgid {pg} '{json}' set-attr {key} - - test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected - """.format(prefix=cmd_prefix, pg=pg, json=JSON, - key="hinfo_key") - log.debug(cmd) - proc = remote.run(args=['bash', '-e', '-x', - '-c', cmd], - check_status=False, - stdout=StringIO(), - stderr=StringIO()) - proc.wait() - if proc.exitstatus != 0: - log.error("failed with " + - str(proc.exitstatus)) - log.error(proc.stdout.getvalue() + " " + - proc.stderr.getvalue()) - ERRORS += 1 - - if len(values) != 0: - log.error("Not all keys found, remaining keys:") - log.error(values) - - log.info("Test pg info") - for remote in osds.remotes.iterkeys(): - for role in osds.remotes[remote]: - if string.find(role, "osd.") != 0: - continue - osdid = int(role.split('.')[1]) - if osdid not in pgs: - continue - - for pg in pgs[osdid]: - cmd = ((prefix + "--op info --pgid {pg}"). - format(id=osdid, pg=pg).split()) - proc = remote.run(args=cmd, check_status=False, - stdout=StringIO()) - proc.wait() - if proc.exitstatus != 0: - log.error("Failure of --op info command with {ret}". - format(proc.exitstatus)) - ERRORS += 1 - continue - info = proc.stdout.getvalue() - if not str(pg) in info: - log.error("Bad data from info: {info}".format(info=info)) - ERRORS += 1 - - log.info("Test pg logging") - for remote in osds.remotes.iterkeys(): - for role in osds.remotes[remote]: - if string.find(role, "osd.") != 0: - continue - osdid = int(role.split('.')[1]) - if osdid not in pgs: - continue - - for pg in pgs[osdid]: - cmd = ((prefix + "--op log --pgid {pg}"). - format(id=osdid, pg=pg).split()) - proc = remote.run(args=cmd, check_status=False, - stdout=StringIO()) - proc.wait() - if proc.exitstatus != 0: - log.error("Getting log failed for pg {pg} " - "from osd.{id} with {ret}". - format(pg=pg, id=osdid, ret=proc.exitstatus)) - ERRORS += 1 - continue - HASOBJ = pg in pgswithobjects - MODOBJ = "modify" in proc.stdout.getvalue() - if HASOBJ != MODOBJ: - log.error("Bad log for pg {pg} from osd.{id}". - format(pg=pg, id=osdid)) - MSG = (HASOBJ and [""] or ["NOT "])[0] - log.error("Log should {msg}have a modify entry". - format(msg=MSG)) - ERRORS += 1 - - log.info("Test pg export") - EXP_ERRORS = 0 - for remote in osds.remotes.iterkeys(): - for role in osds.remotes[remote]: - if string.find(role, "osd.") != 0: - continue - osdid = int(role.split('.')[1]) - if osdid not in pgs: - continue - - for pg in pgs[osdid]: - fpath = os.path.join(DATADIR, "osd{id}.{pg}". - format(id=osdid, pg=pg)) - - cmd = ((prefix + "--op export --pgid {pg} --file {file}"). - format(id=osdid, pg=pg, file=fpath)) - proc = remote.run(args=cmd, check_status=False, - stdout=StringIO()) - proc.wait() - if proc.exitstatus != 0: - log.error("Exporting failed for pg {pg} " - "on osd.{id} with {ret}". - format(pg=pg, id=osdid, ret=proc.exitstatus)) - EXP_ERRORS += 1 - - ERRORS += EXP_ERRORS - - log.info("Test pg removal") - RM_ERRORS = 0 - for remote in osds.remotes.iterkeys(): - for role in osds.remotes[remote]: - if string.find(role, "osd.") != 0: - continue - osdid = int(role.split('.')[1]) - if osdid not in pgs: - continue - - for pg in pgs[osdid]: - cmd = ((prefix + "--op remove --pgid {pg}"). - format(pg=pg, id=osdid)) - proc = remote.run(args=cmd, check_status=False, - stdout=StringIO()) - proc.wait() - if proc.exitstatus != 0: - log.error("Removing failed for pg {pg} " - "on osd.{id} with {ret}". - format(pg=pg, id=osdid, ret=proc.exitstatus)) - RM_ERRORS += 1 - - ERRORS += RM_ERRORS - - IMP_ERRORS = 0 - if EXP_ERRORS == 0 and RM_ERRORS == 0: - log.info("Test pg import") - - for remote in osds.remotes.iterkeys(): - for role in osds.remotes[remote]: - if string.find(role, "osd.") != 0: - continue - osdid = int(role.split('.')[1]) - if osdid not in pgs: - continue - - for pg in pgs[osdid]: - fpath = os.path.join(DATADIR, "osd{id}.{pg}". - format(id=osdid, pg=pg)) - - cmd = ((prefix + "--op import --file {file}"). - format(id=osdid, file=fpath)) - proc = remote.run(args=cmd, check_status=False, - stdout=StringIO()) - proc.wait() - if proc.exitstatus != 0: - log.error("Import failed from {file} with {ret}". - format(file=fpath, ret=proc.exitstatus)) - IMP_ERRORS += 1 - else: - log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES") - - ERRORS += IMP_ERRORS - - if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0: - log.info("Restarting OSDs....") - # They are still look to be up because of setting nodown - for osd in manager.get_osd_status()['up']: - manager.revive_osd(osd) - # Wait for health? - time.sleep(5) - # Let scrub after test runs verify consistency of all copies - log.info("Verify replicated import data") - objects = range(1, NUM_OBJECTS + 1) - for i in objects: - NAME = REP_NAME + "{num}".format(num=i) - TESTNAME = os.path.join(DATADIR, "gettest") - REFNAME = os.path.join(DATADIR, NAME) - - proc = rados(ctx, cli_remote, - ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False) - - ret = proc.wait() - if ret != 0: - log.error("After import, rados get failed with {ret}". - format(ret=proc.exitstatus)) - ERRORS += 1 - continue - - cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME, - ref=REFNAME) - proc = cli_remote.run(args=cmd, check_status=False) - proc.wait() - if proc.exitstatus != 0: - log.error("Data comparison failed for {obj}".format(obj=NAME)) - ERRORS += 1 - - return ERRORS diff --git a/tasks/chef.py b/tasks/chef.py deleted file mode 100644 index 9a9f1bc2c82..00000000000 --- a/tasks/chef.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Chef-solo task -""" -import logging - -from teuthology.orchestra import run -from teuthology import misc - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Run chef-solo on all nodes. - """ - log.info('Running chef-solo...') - - run.wait( - ctx.cluster.run( - args=[ - 'wget', -# '-q', - '-O-', -# 'https://raw.github.com/ceph/ceph-qa-chef/master/solo/solo-from-scratch', - 'http://git.ceph.com/?p=ceph-qa-chef.git;a=blob_plain;f=solo/solo-from-scratch;hb=HEAD', - run.Raw('|'), - 'sh', - '-x', - ], - wait=False, - ) - ) - - log.info('Reconnecting after ceph-qa-chef run') - misc.reconnect(ctx, 10) #Reconnect for ulimit and other ceph-qa-chef changes - diff --git a/tasks/cifs_mount.py b/tasks/cifs_mount.py deleted file mode 100644 index b282b0b7dfb..00000000000 --- a/tasks/cifs_mount.py +++ /dev/null @@ -1,137 +0,0 @@ -""" -Mount cifs clients. Unmount when finished. -""" -import contextlib -import logging -import os - -from teuthology import misc as teuthology -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Mount/unmount a cifs client. - - The config is optional and defaults to mounting on all clients. If - a config is given, it is expected to be a list of clients to do - this operation on. - - Example that starts smbd and mounts cifs on all nodes:: - - tasks: - - ceph: - - samba: - - cifs-mount: - - interactive: - - Example that splits smbd and cifs: - - tasks: - - ceph: - - samba: [samba.0] - - cifs-mount: [client.0] - - ceph-fuse: [client.1] - - interactive: - - Example that specifies the share name: - - tasks: - - ceph: - - ceph-fuse: - - samba: - samba.0: - cephfuse: "{testdir}/mnt.0" - - cifs-mount: - client.0: - share: cephfuse - - :param ctx: Context - :param config: Configuration - """ - log.info('Mounting cifs clients...') - - if config is None: - config = dict(('client.{id}'.format(id=id_), None) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) - elif isinstance(config, list): - config = dict((name, None) for name in config) - - clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) - - from .samba import get_sambas - samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')] - sambas = list(get_sambas(ctx=ctx, roles=samba_roles)) - (ip, _) = sambas[0][1].ssh.get_transport().getpeername() - log.info('samba ip: {ip}'.format(ip=ip)) - - for id_, remote in clients: - mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) - log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format( - id=id_, remote=remote,mnt=mnt)) - - remote.run( - args=[ - 'mkdir', - '--', - mnt, - ], - ) - - rolestr = 'client.{id_}'.format(id_=id_) - unc = "ceph" - log.info("config: {c}".format(c=config)) - if config[rolestr] is not None and 'share' in config[rolestr]: - unc = config[rolestr]['share'] - - remote.run( - args=[ - 'sudo', - 'mount', - '-t', - 'cifs', - '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc), - '-o', - 'username=ubuntu,password=ubuntu', - mnt, - ], - ) - - remote.run( - args=[ - 'sudo', - 'chown', - 'ubuntu:ubuntu', - '{m}/'.format(m=mnt), - ], - ) - - try: - yield - finally: - log.info('Unmounting cifs clients...') - for id_, remote in clients: - remote.run( - args=[ - 'sudo', - 'umount', - mnt, - ], - ) - for id_, remote in clients: - while True: - try: - remote.run( - args=[ - 'rmdir', '--', mnt, - run.Raw('2>&1'), - run.Raw('|'), - 'grep', 'Device or resource busy', - ], - ) - import time - time.sleep(1) - except Exception: - break diff --git a/tasks/cram.py b/tasks/cram.py deleted file mode 100644 index b4539d497d5..00000000000 --- a/tasks/cram.py +++ /dev/null @@ -1,135 +0,0 @@ -""" -Cram tests -""" -import logging -import os - -from teuthology import misc as teuthology -from teuthology.parallel import parallel -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Run all cram tests from the specified urls on the specified - clients. Each client runs tests in parallel. - - Limitations: - Tests must have a .t suffix. Tests with duplicate names will - overwrite each other, so only the last one will run. - - For example:: - - tasks: - - ceph: - - cram: - clients: - client.0: - - http://ceph.com/qa/test.t - - http://ceph.com/qa/test2.t] - client.1: [http://ceph.com/qa/test.t] - - You can also run a list of cram tests on all clients:: - - tasks: - - ceph: - - cram: - clients: - all: [http://ceph.com/qa/test.t] - - :param ctx: Context - :param config: Configuration - """ - assert isinstance(config, dict) - assert 'clients' in config and isinstance(config['clients'], dict), \ - 'configuration must contain a dictionary of clients' - - clients = teuthology.replace_all_with_clients(ctx.cluster, - config['clients']) - testdir = teuthology.get_testdir(ctx) - - try: - for client, tests in clients.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() - client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) - remote.run( - args=[ - 'mkdir', '--', client_dir, - run.Raw('&&'), - 'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir), - run.Raw('&&'), - '{tdir}/virtualenv/bin/pip'.format(tdir=testdir), - 'install', 'cram', - ], - ) - for test in tests: - log.info('fetching test %s for %s', test, client) - assert test.endswith('.t'), 'tests must end in .t' - remote.run( - args=[ - 'wget', '-nc', '-nv', '-P', client_dir, '--', test, - ], - ) - - with parallel() as p: - for role in clients.iterkeys(): - p.spawn(_run_tests, ctx, role) - finally: - for client, tests in clients.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() - client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) - test_files = set([test.rsplit('/', 1)[1] for test in tests]) - - # remove test files unless they failed - for test_file in test_files: - abs_file = os.path.join(client_dir, test_file) - remote.run( - args=[ - 'test', '-f', abs_file + '.err', - run.Raw('||'), - 'rm', '-f', '--', abs_file, - ], - ) - - # ignore failure since more than one client may - # be run on a host, and the client dir should be - # non-empty if the test failed - remote.run( - args=[ - 'rm', '-rf', '--', - '{tdir}/virtualenv'.format(tdir=testdir), - run.Raw(';'), - 'rmdir', '--ignore-fail-on-non-empty', client_dir, - ], - ) - -def _run_tests(ctx, role): - """ - For each role, check to make sure it's a client, then run the cram on that client - - :param ctx: Context - :param role: Roles - """ - assert isinstance(role, basestring) - PREFIX = 'client.' - assert role.startswith(PREFIX) - id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - ceph_ref = ctx.summary.get('ceph-sha1', 'master') - - testdir = teuthology.get_testdir(ctx) - log.info('Running tests for %s...', role) - remote.run( - args=[ - run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)), - run.Raw('CEPH_ID="{id}"'.format(id=id_)), - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - '{tdir}/virtualenv/bin/cram'.format(tdir=testdir), - '-v', '--', - run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)), - ], - logger=log.getChild(role), - ) diff --git a/tasks/devstack.py b/tasks/devstack.py deleted file mode 100644 index c5cd41b06bd..00000000000 --- a/tasks/devstack.py +++ /dev/null @@ -1,382 +0,0 @@ -#!/usr/bin/env python -import contextlib -import logging -from cStringIO import StringIO -import textwrap -from configparser import ConfigParser -import time - -from teuthology.orchestra import run -from teuthology import misc -from teuthology.contextutil import nested - -log = logging.getLogger(__name__) - -DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git' -DS_STABLE_BRANCHES = ("havana", "grizzly") - -is_devstack_node = lambda role: role.startswith('devstack') -is_osd_node = lambda role: role.startswith('osd') - - -@contextlib.contextmanager -def task(ctx, config): - if config is None: - config = {} - if not isinstance(config, dict): - raise TypeError("config must be a dict") - with nested(lambda: install(ctx=ctx, config=config), - lambda: smoke(ctx=ctx, config=config), - ): - yield - - -@contextlib.contextmanager -def install(ctx, config): - """ - Install OpenStack DevStack and configure it to use a Ceph cluster for - Glance and Cinder. - - Requires one node with a role 'devstack' - - Since devstack runs rampant on the system it's used on, typically you will - want to reprovision that machine after using devstack on it. - - Also, the default 2GB of RAM that is given to vps nodes is insufficient. I - recommend 4GB. Downburst can be instructed to give 4GB to a vps node by - adding this to the yaml: - - downburst: - ram: 4G - - This was created using documentation found here: - https://github.com/openstack-dev/devstack/blob/master/README.md - http://ceph.com/docs/master/rbd/rbd-openstack/ - """ - if config is None: - config = {} - if not isinstance(config, dict): - raise TypeError("config must be a dict") - - devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0] - an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0] - - devstack_branch = config.get("branch", "master") - install_devstack(devstack_node, devstack_branch) - try: - configure_devstack_and_ceph(ctx, config, devstack_node, an_osd_node) - yield - finally: - pass - - -def install_devstack(devstack_node, branch="master"): - log.info("Cloning DevStack repo...") - - args = ['git', 'clone', DEVSTACK_GIT_REPO] - devstack_node.run(args=args) - - if branch != "master": - if branch in DS_STABLE_BRANCHES and not branch.startswith("stable"): - branch = "stable/" + branch - log.info("Checking out {branch} branch...".format(branch=branch)) - cmd = "cd devstack && git checkout " + branch - devstack_node.run(args=cmd) - - log.info("Installing DevStack...") - args = ['cd', 'devstack', run.Raw('&&'), './stack.sh'] - devstack_node.run(args=args) - - -def configure_devstack_and_ceph(ctx, config, devstack_node, ceph_node): - pool_size = config.get('pool_size', '128') - create_pools(ceph_node, pool_size) - distribute_ceph_conf(devstack_node, ceph_node) - # This is where we would install python-ceph and ceph-common but it appears - # the ceph task does that for us. - generate_ceph_keys(ceph_node) - distribute_ceph_keys(devstack_node, ceph_node) - secret_uuid = set_libvirt_secret(devstack_node, ceph_node) - update_devstack_config_files(devstack_node, secret_uuid) - set_apache_servername(devstack_node) - # Rebooting is the most-often-used method of restarting devstack services - misc.reboot(devstack_node) - start_devstack(devstack_node) - restart_apache(devstack_node) - - -def create_pools(ceph_node, pool_size): - log.info("Creating pools on Ceph cluster...") - - for pool_name in ['volumes', 'images', 'backups']: - args = ['ceph', 'osd', 'pool', 'create', pool_name, pool_size] - ceph_node.run(args=args) - - -def distribute_ceph_conf(devstack_node, ceph_node): - log.info("Copying ceph.conf to DevStack node...") - - ceph_conf_path = '/etc/ceph/ceph.conf' - ceph_conf = misc.get_file(ceph_node, ceph_conf_path, sudo=True) - misc.sudo_write_file(devstack_node, ceph_conf_path, ceph_conf) - - -def generate_ceph_keys(ceph_node): - log.info("Generating Ceph keys...") - - ceph_auth_cmds = [ - ['ceph', 'auth', 'get-or-create', 'client.cinder', 'mon', - 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa - ['ceph', 'auth', 'get-or-create', 'client.glance', 'mon', - 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa - ['ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon', - 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa - ] - for cmd in ceph_auth_cmds: - ceph_node.run(args=cmd) - - -def distribute_ceph_keys(devstack_node, ceph_node): - log.info("Copying Ceph keys to DevStack node...") - - def copy_key(from_remote, key_name, to_remote, dest_path, owner): - key_stringio = StringIO() - from_remote.run( - args=['ceph', 'auth', 'get-or-create', key_name], - stdout=key_stringio) - key_stringio.seek(0) - misc.sudo_write_file(to_remote, dest_path, - key_stringio, owner=owner) - keys = [ - dict(name='client.glance', - path='/etc/ceph/ceph.client.glance.keyring', - # devstack appears to just want root:root - #owner='glance:glance', - ), - dict(name='client.cinder', - path='/etc/ceph/ceph.client.cinder.keyring', - # devstack appears to just want root:root - #owner='cinder:cinder', - ), - dict(name='client.cinder-backup', - path='/etc/ceph/ceph.client.cinder-backup.keyring', - # devstack appears to just want root:root - #owner='cinder:cinder', - ), - ] - for key_dict in keys: - copy_key(ceph_node, key_dict['name'], devstack_node, - key_dict['path'], key_dict.get('owner')) - - -def set_libvirt_secret(devstack_node, ceph_node): - log.info("Setting libvirt secret...") - - cinder_key_stringio = StringIO() - ceph_node.run(args=['ceph', 'auth', 'get-key', 'client.cinder'], - stdout=cinder_key_stringio) - cinder_key = cinder_key_stringio.getvalue().strip() - - uuid_stringio = StringIO() - devstack_node.run(args=['uuidgen'], stdout=uuid_stringio) - uuid = uuid_stringio.getvalue().strip() - - secret_path = '/tmp/secret.xml' - secret_template = textwrap.dedent(""" - - {uuid} - - client.cinder secret - - """) - misc.sudo_write_file(devstack_node, secret_path, - secret_template.format(uuid=uuid)) - devstack_node.run(args=['sudo', 'virsh', 'secret-define', '--file', - secret_path]) - devstack_node.run(args=['sudo', 'virsh', 'secret-set-value', '--secret', - uuid, '--base64', cinder_key]) - return uuid - - -def update_devstack_config_files(devstack_node, secret_uuid): - log.info("Updating DevStack config files to use Ceph...") - - def backup_config(node, file_name, backup_ext='.orig.teuth'): - node.run(args=['cp', '-f', file_name, file_name + backup_ext]) - - def update_config(config_name, config_stream, update_dict, - section='DEFAULT'): - parser = ConfigParser() - parser.read_file(config_stream) - for (key, value) in update_dict.items(): - parser.set(section, key, value) - out_stream = StringIO() - parser.write(out_stream) - out_stream.seek(0) - return out_stream - - updates = [ - dict(name='/etc/glance/glance-api.conf', options=dict( - default_store='rbd', - rbd_store_user='glance', - rbd_store_pool='images', - show_image_direct_url='True',)), - dict(name='/etc/cinder/cinder.conf', options=dict( - volume_driver='cinder.volume.drivers.rbd.RBDDriver', - rbd_pool='volumes', - rbd_ceph_conf='/etc/ceph/ceph.conf', - rbd_flatten_volume_from_snapshot='false', - rbd_max_clone_depth='5', - glance_api_version='2', - rbd_user='cinder', - rbd_secret_uuid=secret_uuid, - backup_driver='cinder.backup.drivers.ceph', - backup_ceph_conf='/etc/ceph/ceph.conf', - backup_ceph_user='cinder-backup', - backup_ceph_chunk_size='134217728', - backup_ceph_pool='backups', - backup_ceph_stripe_unit='0', - backup_ceph_stripe_count='0', - restore_discard_excess_bytes='true', - )), - dict(name='/etc/nova/nova.conf', options=dict( - libvirt_images_type='rbd', - libvirt_images_rbd_pool='volumes', - libvirt_images_rbd_ceph_conf='/etc/ceph/ceph.conf', - rbd_user='cinder', - rbd_secret_uuid=secret_uuid, - libvirt_inject_password='false', - libvirt_inject_key='false', - libvirt_inject_partition='-2', - )), - ] - - for update in updates: - file_name = update['name'] - options = update['options'] - config_str = misc.get_file(devstack_node, file_name, sudo=True) - config_stream = StringIO(config_str) - backup_config(devstack_node, file_name) - new_config_stream = update_config(file_name, config_stream, options) - misc.sudo_write_file(devstack_node, file_name, new_config_stream) - - -def set_apache_servername(node): - # Apache complains: "Could not reliably determine the server's fully - # qualified domain name, using 127.0.0.1 for ServerName" - # So, let's make sure it knows its name. - log.info("Setting Apache ServerName...") - - hostname = node.hostname - config_file = '/etc/apache2/conf.d/servername' - misc.sudo_write_file(node, config_file, - "ServerName {name}".format(name=hostname)) - - -def start_devstack(devstack_node): - log.info("Patching devstack start script...") - # This causes screen to start headless - otherwise rejoin-stack.sh fails - # because there is no terminal attached. - cmd = "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh" - devstack_node.run(args=cmd) - - log.info("Starting devstack...") - cmd = "cd devstack && ./rejoin-stack.sh" - devstack_node.run(args=cmd) - - # This was added because I was getting timeouts on Cinder requests - which - # were trying to access Keystone on port 5000. A more robust way to handle - # this would be to introduce a wait-loop on devstack_node that checks to - # see if a service is listening on port 5000. - log.info("Waiting 30s for devstack to start...") - time.sleep(30) - - -def restart_apache(node): - node.run(args=['sudo', '/etc/init.d/apache2', 'restart'], wait=True) - - -@contextlib.contextmanager -def exercise(ctx, config): - log.info("Running devstack exercises...") - - if config is None: - config = {} - if not isinstance(config, dict): - raise TypeError("config must be a dict") - - devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0] - - # TODO: save the log *and* preserve failures - #devstack_archive_dir = create_devstack_archive(ctx, devstack_node) - - try: - #cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format( # noqa - # dir=devstack_archive_dir) - cmd = "cd devstack && ./exercise.sh" - devstack_node.run(args=cmd, wait=True) - yield - finally: - pass - - -def create_devstack_archive(ctx, devstack_node): - test_dir = misc.get_testdir(ctx) - devstack_archive_dir = "{test_dir}/archive/devstack".format( - test_dir=test_dir) - devstack_node.run(args="mkdir -p " + devstack_archive_dir) - return devstack_archive_dir - - -@contextlib.contextmanager -def smoke(ctx, config): - log.info("Running a basic smoketest...") - - devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0] - an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0] - - try: - create_volume(devstack_node, an_osd_node, 'smoke0', 1) - yield - finally: - pass - - -def create_volume(devstack_node, ceph_node, vol_name, size): - """ - :param size: The size of the volume, in GB - """ - size = str(size) - log.info("Creating a {size}GB volume named {name}...".format( - name=vol_name, - size=size)) - args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create', - '--display-name', vol_name, size] - out_stream = StringIO() - devstack_node.run(args=args, stdout=out_stream, wait=True) - vol_info = parse_os_table(out_stream.getvalue()) - log.debug("Volume info: %s", str(vol_info)) - - out_stream = StringIO() - try: - ceph_node.run(args="rbd --id cinder ls -l volumes", stdout=out_stream, - wait=True) - except run.CommandFailedError: - log.debug("Original rbd call failed; retrying without '--id cinder'") - ceph_node.run(args="rbd ls -l volumes", stdout=out_stream, - wait=True) - - assert vol_info['id'] in out_stream.getvalue(), \ - "Volume not found on Ceph cluster" - assert vol_info['size'] == size, \ - "Volume size on Ceph cluster is different than specified" - return vol_info['id'] - - -def parse_os_table(table_str): - out_dict = dict() - for line in table_str.split('\n'): - if line.startswith('|'): - items = line.split() - out_dict[items[1]] = items[3] - return out_dict diff --git a/tasks/die_on_err.py b/tasks/die_on_err.py deleted file mode 100644 index bf422ae547d..00000000000 --- a/tasks/die_on_err.py +++ /dev/null @@ -1,70 +0,0 @@ -""" -Raise exceptions on osd coredumps or test err directories -""" -import contextlib -import logging -import time -from teuthology.orchestra import run - -import ceph_manager -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Die if {testdir}/err exists or if an OSD dumps core - """ - if config is None: - config = {} - - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') - log.info('num_osds is %s' % num_osds) - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - while len(manager.get_osd_status()['up']) < num_osds: - time.sleep(10) - - testdir = teuthology.get_testdir(ctx) - - while True: - for i in range(num_osds): - (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys() - p = osd_remote.run( - args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ], - wait=True, - check_status=False, - ) - exit_status = p.exitstatus - - if exit_status == 0: - log.info("osd %d has an error" % i) - raise Exception("osd %d error" % i) - - log_path = '/var/log/ceph/osd.%d.log' % (i) - - p = osd_remote.run( - args = [ - 'tail', '-1', log_path, - run.Raw('|'), - 'grep', '-q', 'end dump' - ], - wait=True, - check_status=False, - ) - exit_status = p.exitstatus - - if exit_status == 0: - log.info("osd %d dumped core" % i) - raise Exception("osd %d dumped core" % i) - - time.sleep(5) diff --git a/tasks/divergent_priors.py b/tasks/divergent_priors.py deleted file mode 100644 index e10f67547ca..00000000000 --- a/tasks/divergent_priors.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Special case divergence test -""" -import logging -import time - -import ceph_manager -from teuthology import misc as teuthology -from util.rados import rados - - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test handling of divergent entries with prior_version - prior to log_tail - - config: none - - Requires 3 osds. - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'divergent_priors task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - ctx.manager = manager - - while len(manager.get_osd_status()['up']) < 3: - time.sleep(10) - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.raw_cluster_cmd('osd', 'set', 'noout') - manager.raw_cluster_cmd('osd', 'set', 'noin') - manager.raw_cluster_cmd('osd', 'set', 'nodown') - manager.wait_for_clean() - - # something that is always there - dummyfile = '/etc/fstab' - dummyfile2 = '/etc/resolv.conf' - - # create 1 pg pool - log.info('creating foo') - manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1') - - osds = [0, 1, 2] - for i in osds: - manager.set_config(i, osd_min_pg_log_entries=1) - - # determine primary - divergent = manager.get_pg_primary('foo', 0) - log.info("primary and soon to be divergent is %d", divergent) - non_divergent = [0,1,2] - non_divergent.remove(divergent) - - log.info('writing initial objects') - # write 1000 objects - for i in range(1000): - rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) - - manager.wait_for_clean() - - # blackhole non_divergent - log.info("blackholing osds %s", str(non_divergent)) - for i in non_divergent: - manager.set_config(i, filestore_blackhole='') - - # write 1 (divergent) object - log.info('writing divergent object existing_0') - rados( - ctx, mon, ['-p', 'foo', 'put', 'existing_0', dummyfile2], - wait=False) - time.sleep(10) - mon.run( - args=['killall', '-9', 'rados'], - wait=True, - check_status=False) - - # kill all the osds - log.info('killing all the osds') - for i in osds: - manager.kill_osd(i) - for i in osds: - manager.mark_down_osd(i) - for i in osds: - manager.mark_out_osd(i) - - # bring up non-divergent - log.info("bringing up non_divergent %s", str(non_divergent)) - for i in non_divergent: - manager.revive_osd(i) - for i in non_divergent: - manager.mark_in_osd(i) - - log.info('making log long to prevent backfill') - for i in non_divergent: - manager.set_config(i, osd_min_pg_log_entries=100000) - - # write 1 non-divergent object (ensure that old divergent one is divergent) - log.info('writing non-divergent object existing_1') - rados(ctx, mon, ['-p', 'foo', 'put', 'existing_1', dummyfile2]) - - manager.wait_for_recovery() - - # ensure no recovery - log.info('delay recovery') - for i in non_divergent: - manager.set_config(i, osd_recovery_delay_start=100000) - - # bring in our divergent friend - log.info("revive divergent %d", divergent) - manager.revive_osd(divergent) - - while len(manager.get_osd_status()['up']) < 3: - time.sleep(10) - - log.info('delay recovery divergent') - manager.set_config(divergent, osd_recovery_delay_start=100000) - log.info('mark divergent in') - manager.mark_in_osd(divergent) - - log.info('wait for peering') - rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile]) - - log.info("killing divergent %d", divergent) - manager.kill_osd(divergent) - log.info("reviving divergent %d", divergent) - manager.revive_osd(divergent) - - log.info('allowing recovery') - for i in non_divergent: - manager.set_config(i, osd_recovery_delay_start=0) - - log.info('reading existing_0') - exit_status = rados(ctx, mon, - ['-p', 'foo', 'get', 'existing_0', - '-o', '/tmp/existing']) - assert exit_status is 0 - log.info("success") diff --git a/tasks/dump_stuck.py b/tasks/dump_stuck.py deleted file mode 100644 index 9e1780f0156..00000000000 --- a/tasks/dump_stuck.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -Dump_stuck command -""" -import logging -import re -import time - -import ceph_manager -from teuthology import misc as teuthology - - -log = logging.getLogger(__name__) - -def check_stuck(manager, num_inactive, num_unclean, num_stale, timeout=10): - """ - Do checks. Make sure get_stuck_pgs return the right amout of information, then - extract health information from the raw_cluster_cmd and compare the results with - values passed in. This passes if all asserts pass. - - :param num_manager: Ceph manager - :param num_inactive: number of inaactive pages that are stuck - :param num_unclean: number of unclean pages that are stuck - :paran num_stale: number of stale pages that are stuck - :param timeout: timeout value for get_stuck_pgs calls - """ - inactive = manager.get_stuck_pgs('inactive', timeout) - assert len(inactive) == num_inactive - unclean = manager.get_stuck_pgs('unclean', timeout) - assert len(unclean) == num_unclean - stale = manager.get_stuck_pgs('stale', timeout) - assert len(stale) == num_stale - - # check health output as well - health = manager.raw_cluster_cmd('health') - log.debug('ceph health is: %s', health) - if num_inactive > 0: - m = re.search('(\d+) pgs stuck inactive', health) - assert int(m.group(1)) == num_inactive - if num_unclean > 0: - m = re.search('(\d+) pgs stuck unclean', health) - assert int(m.group(1)) == num_unclean - if num_stale > 0: - m = re.search('(\d+) pgs stuck stale', health) - assert int(m.group(1)) == num_stale - -def task(ctx, config): - """ - Test the dump_stuck command. - - :param ctx: Context - :param config: Configuration - """ - assert config is None, \ - 'dump_stuck requires no configuration' - assert teuthology.num_instances_of_type(ctx.cluster, 'osd') == 2, \ - 'dump_stuck requires exactly 2 osds' - - timeout = 60 - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.wait_for_clean(timeout) - - manager.raw_cluster_cmd('tell', 'mon.0', 'injectargs', '--', -# '--mon-osd-report-timeout 90', - '--mon-pg-stuck-threshold 10') - - check_stuck( - manager, - num_inactive=0, - num_unclean=0, - num_stale=0, - ) - num_pgs = manager.get_num_pgs() - - manager.mark_out_osd(0) - time.sleep(timeout) - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.wait_for_recovery(timeout) - - check_stuck( - manager, - num_inactive=0, - num_unclean=num_pgs, - num_stale=0, - ) - - manager.mark_in_osd(0) - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.wait_for_clean(timeout) - - check_stuck( - manager, - num_inactive=0, - num_unclean=0, - num_stale=0, - ) - - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'): - manager.kill_osd(id_) - manager.mark_down_osd(id_) - - starttime = time.time() - done = False - while not done: - try: - check_stuck( - manager, - num_inactive=0, - num_unclean=0, - num_stale=num_pgs, - ) - done = True - except AssertionError: - # wait up to 15 minutes to become stale - if time.time() - starttime > 900: - raise - - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'): - manager.revive_osd(id_) - manager.mark_in_osd(id_) - while True: - try: - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - break - except Exception: - log.exception('osds must not be started yet, waiting...') - time.sleep(1) - manager.wait_for_clean(timeout) - - check_stuck( - manager, - num_inactive=0, - num_unclean=0, - num_stale=0, - ) diff --git a/tasks/ec_lost_unfound.py b/tasks/ec_lost_unfound.py deleted file mode 100644 index f12ae74c12f..00000000000 --- a/tasks/ec_lost_unfound.py +++ /dev/null @@ -1,134 +0,0 @@ -""" -Lost_unfound -""" -import logging -import ceph_manager -from teuthology import misc as teuthology -from util.rados import rados - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test handling of lost objects on an ec pool. - - A pretty rigid cluster is brought up andtested by this task - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'lost_unfound task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') - manager.wait_for_clean() - - - pool = manager.create_pool_with_unique_name( - ec_pool=True, - ec_m=2, - ec_k=2) - - # something that is always there - dummyfile = '/etc/fstab' - - # kludge to make sure they get a map - rados(ctx, mon, ['-p', pool, 'put', 'dummy', dummyfile]) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.wait_for_recovery() - - # create old objects - for f in range(1, 10): - rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', pool, 'rm', 'existed_%d' % f]) - - # delay recovery, and make the pg log very long (to prevent backfill) - manager.raw_cluster_cmd( - 'tell', 'osd.1', - 'injectargs', - '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000' - ) - - manager.kill_osd(0) - manager.mark_down_osd(0) - manager.kill_osd(3) - manager.mark_down_osd(3) - - for f in range(1, 10): - rados(ctx, mon, ['-p', pool, 'put', 'new_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile]) - - # take out osd.1 and a necessary shard of those objects. - manager.kill_osd(1) - manager.mark_down_osd(1) - manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it') - manager.revive_osd(0) - manager.wait_till_osd_is_up(0) - manager.revive_osd(3) - manager.wait_till_osd_is_up(3) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') - manager.wait_till_active() - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') - - # verify that there are unfound objects - unfound = manager.get_num_unfound_objects() - log.info("there are %d unfound objects" % unfound) - assert unfound - - # mark stuff lost - pgs = manager.get_pg_stats() - for pg in pgs: - if pg['stat_sum']['num_objects_unfound'] > 0: - # verify that i can list them direct from the osd - log.info('listing missing/lost in %s state %s', pg['pgid'], - pg['state']); - m = manager.list_pg_missing(pg['pgid']) - log.info('%s' % m) - assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound'] - - log.info("reverting unfound in %s", pg['pgid']) - manager.raw_cluster_cmd('pg', pg['pgid'], - 'mark_unfound_lost', 'delete') - else: - log.info("no unfound in %s", pg['pgid']) - - manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5') - manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5') - manager.raw_cluster_cmd('tell', 'osd.3', 'debug', 'kick_recovery_wq', '5') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') - manager.wait_for_recovery() - - # verify result - for f in range(1, 10): - err = rados(ctx, mon, ['-p', pool, 'get', 'new_%d' % f, '-']) - assert err - err = rados(ctx, mon, ['-p', pool, 'get', 'existed_%d' % f, '-']) - assert err - err = rados(ctx, mon, ['-p', pool, 'get', 'existing_%d' % f, '-']) - assert err - - # see if osd.1 can cope - manager.revive_osd(1) - manager.wait_till_osd_is_up(1) - manager.wait_for_clean() diff --git a/tasks/filestore_idempotent.py b/tasks/filestore_idempotent.py deleted file mode 100644 index ac43fb0ffe2..00000000000 --- a/tasks/filestore_idempotent.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -Filestore/filejournal handler -""" -import logging -from teuthology.orchestra import run -import random - -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test filestore/filejournal handling of non-idempotent events. - - Currently this is a kludge; we require the ceph task preceeds us just - so that we get the tarball installed to run the test binary. - - :param ctx: Context - :param config: Configuration - """ - assert config is None or isinstance(config, list) \ - or isinstance(config, dict), \ - "task only supports a list or dictionary for configuration" - all_clients = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - if config is None: - config = all_clients - if isinstance(config, list): - config = dict.fromkeys(config) - clients = config.keys() - - # just use the first client... - client = clients[0]; - (remote,) = ctx.cluster.only(client).remotes.iterkeys() - - testdir = teuthology.get_testdir(ctx) - - dir = '%s/data/test.%s' % (testdir, client) - - seed = str(int(random.uniform(1,100))) - - try: - log.info('creating a working dir') - remote.run(args=['mkdir', dir]) - remote.run( - args=[ - 'cd', dir, - run.Raw('&&'), - 'wget','-q', '-Orun_seed_to.sh', - 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to.sh;hb=HEAD', - run.Raw('&&'), - 'wget','-q', '-Orun_seed_to_range.sh', - 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to_range.sh;hb=HEAD', - run.Raw('&&'), - 'chmod', '+x', 'run_seed_to.sh', 'run_seed_to_range.sh', - ]); - - log.info('running a series of tests') - proc = remote.run( - args=[ - 'cd', dir, - run.Raw('&&'), - './run_seed_to_range.sh', seed, '50', '300', - ], - wait=False, - check_status=False) - result = proc.wait(); - - if result != 0: - remote.run( - args=[ - 'cp', '-a', dir, '{tdir}/archive/idempotent_failure'.format(tdir=testdir), - ]) - raise Exception("./run_seed_to_range.sh errored out") - - finally: - remote.run(args=[ - 'rm', '-rf', '--', dir - ]) - diff --git a/tasks/kclient.py b/tasks/kclient.py deleted file mode 100644 index e06f84561d8..00000000000 --- a/tasks/kclient.py +++ /dev/null @@ -1,112 +0,0 @@ -""" -Mount/unmount a ``kernel`` client. -""" -import contextlib -import logging -import os - -from teuthology import misc as teuthology -from util.kclient import write_secret_file - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Mount/unmount a ``kernel`` client. - - The config is optional and defaults to mounting on all clients. If - a config is given, it is expected to be a list of clients to do - this operation on. This lets you e.g. set up one client with - ``ceph-fuse`` and another with ``kclient``. - - Example that mounts all clients:: - - tasks: - - ceph: - - kclient: - - interactive: - - Example that uses both ``kclient` and ``ceph-fuse``:: - - tasks: - - ceph: - - ceph-fuse: [client.0] - - kclient: [client.1] - - interactive: - - :param ctx: Context - :param config: Configuration - """ - log.info('Mounting kernel clients...') - assert config is None or isinstance(config, list), \ - "task kclient got invalid config" - - if config is None: - config = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - clients = list(teuthology.get_clients(ctx=ctx, roles=config)) - - testdir = teuthology.get_testdir(ctx) - - for id_, remote in clients: - mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format( - id=id_, remote=remote, mnt=mnt)) - - # figure mon ips - remotes_and_roles = ctx.cluster.remotes.items() - roles = [roles for (remote_, roles) in remotes_and_roles] - ips = [host for (host, port) in (remote_.ssh.get_transport().getpeername() for (remote_, roles) in remotes_and_roles)] - mons = teuthology.get_mons(roles, ips).values() - - keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) - secret = '{tdir}/data/client.{id}.secret'.format(tdir=testdir, id=id_) - write_secret_file(ctx, remote, 'client.{id}'.format(id=id_), - keyring, secret) - - remote.run( - args=[ - 'mkdir', - '--', - mnt, - ], - ) - - remote.run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - '/sbin/mount.ceph', - '{mons}:/'.format(mons=','.join(mons)), - mnt, - '-v', - '-o', - 'name={id},secretfile={secret}'.format(id=id_, - secret=secret), - ], - ) - - try: - yield - finally: - log.info('Unmounting kernel clients...') - for id_, remote in clients: - log.debug('Unmounting client client.{id}...'.format(id=id_)) - mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - remote.run( - args=[ - 'sudo', - 'umount', - mnt, - ], - ) - remote.run( - args=[ - 'rmdir', - '--', - mnt, - ], - ) diff --git a/tasks/locktest.py b/tasks/locktest.py deleted file mode 100755 index 59a7122223e..00000000000 --- a/tasks/locktest.py +++ /dev/null @@ -1,134 +0,0 @@ -""" -locktests -""" -import logging - -from teuthology.orchestra import run -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Run locktests, from the xfstests suite, on the given - clients. Whether the clients are ceph-fuse or kernel does not - matter, and the two clients can refer to the same mount. - - The config is a list of two clients to run the locktest on. The - first client will be the host. - - For example: - tasks: - - ceph: - - ceph-fuse: [client.0, client.1] - - locktest: - [client.0, client.1] - - This task does not yield; there would be little point. - - :param ctx: Context - :param config: Configuration - """ - - assert isinstance(config, list) - log.info('fetching and building locktests...') - (host,) = ctx.cluster.only(config[0]).remotes - (client,) = ctx.cluster.only(config[1]).remotes - ( _, _, host_id) = config[0].partition('.') - ( _, _, client_id) = config[1].partition('.') - testdir = teuthology.get_testdir(ctx) - hostmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=host_id) - clientmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=client_id) - - try: - for client_name in config: - log.info('building on {client_}'.format(client_=client_name)) - ctx.cluster.only(client_name).run( - args=[ - # explicitly does not support multiple autotest tasks - # in a single run; the result archival would conflict - 'mkdir', '{tdir}/archive/locktest'.format(tdir=testdir), - run.Raw('&&'), - 'mkdir', '{tdir}/locktest'.format(tdir=testdir), - run.Raw('&&'), - 'wget', - '-nv', - 'https://raw.github.com/gregsfortytwo/xfstests-ceph/master/src/locktest.c', - '-O', '{tdir}/locktest/locktest.c'.format(tdir=testdir), - run.Raw('&&'), - 'g++', '{tdir}/locktest/locktest.c'.format(tdir=testdir), - '-o', '{tdir}/locktest/locktest'.format(tdir=testdir) - ], - logger=log.getChild('locktest_client.{id}'.format(id=client_name)), - ) - - log.info('built locktest on each client') - - host.run(args=['sudo', 'touch', - '{mnt}/locktestfile'.format(mnt=hostmnt), - run.Raw('&&'), - 'sudo', 'chown', 'ubuntu.ubuntu', - '{mnt}/locktestfile'.format(mnt=hostmnt) - ] - ) - - log.info('starting on host') - hostproc = host.run( - args=[ - '{tdir}/locktest/locktest'.format(tdir=testdir), - '-p', '6788', - '-d', - '{mnt}/locktestfile'.format(mnt=hostmnt), - ], - wait=False, - logger=log.getChild('locktest.host'), - ) - log.info('starting on client') - (_,_,hostaddr) = host.name.partition('@') - clientproc = client.run( - args=[ - '{tdir}/locktest/locktest'.format(tdir=testdir), - '-p', '6788', - '-d', - '-h', hostaddr, - '{mnt}/locktestfile'.format(mnt=clientmnt), - ], - logger=log.getChild('locktest.client'), - wait=False - ) - - hostresult = hostproc.wait() - clientresult = clientproc.wait() - if (hostresult != 0) or (clientresult != 0): - raise Exception("Did not pass locking test!") - log.info('finished locktest executable with results {r} and {s}'. \ - format(r=hostresult, s=clientresult)) - - finally: - log.info('cleaning up host dir') - host.run( - args=[ - 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir), - run.Raw('&&'), - 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir), - run.Raw('&&'), - 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir), - run.Raw('&&'), - 'rmdir', '{tdir}/locktest' - ], - logger=log.getChild('.{id}'.format(id=config[0])), - ) - log.info('cleaning up client dir') - client.run( - args=[ - 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir), - run.Raw('&&'), - 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir), - run.Raw('&&'), - 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir), - run.Raw('&&'), - 'rmdir', '{tdir}/locktest'.format(tdir=testdir) - ], - logger=log.getChild('.{id}'.format(\ - id=config[1])), - ) diff --git a/tasks/lost_unfound.py b/tasks/lost_unfound.py deleted file mode 100644 index bf209a37090..00000000000 --- a/tasks/lost_unfound.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -Lost_unfound -""" -import logging -import time -import ceph_manager -from teuthology import misc as teuthology -from util.rados import rados - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test handling of lost objects. - - A pretty rigid cluseter is brought up andtested by this task - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'lost_unfound task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - while len(manager.get_osd_status()['up']) < 3: - time.sleep(10) - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_clean() - - # something that is always there - dummyfile = '/etc/fstab' - - # take an osd out until the very end - manager.kill_osd(2) - manager.mark_down_osd(2) - manager.mark_out_osd(2) - - # kludge to make sure they get a map - rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile]) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.wait_for_recovery() - - # create old objects - for f in range(1, 10): - rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', 'data', 'rm', 'existed_%d' % f]) - - # delay recovery, and make the pg log very long (to prevent backfill) - manager.raw_cluster_cmd( - 'tell', 'osd.1', - 'injectargs', - '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000' - ) - - manager.kill_osd(0) - manager.mark_down_osd(0) - - for f in range(1, 10): - rados(ctx, mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) - - # bring osd.0 back up, let it peer, but don't replicate the new - # objects... - log.info('osd.0 command_args is %s' % 'foo') - log.info(ctx.daemons.get_daemon('osd', 0).command_args) - ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([ - '--osd-recovery-delay-start', '1000' - ]) - manager.revive_osd(0) - manager.mark_in_osd(0) - manager.wait_till_osd_is_up(0) - - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.wait_till_active() - - # take out osd.1 and the only copy of those objects. - manager.kill_osd(1) - manager.mark_down_osd(1) - manager.mark_out_osd(1) - manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it') - - # bring up osd.2 so that things would otherwise, in theory, recovery fully - manager.revive_osd(2) - manager.mark_in_osd(2) - manager.wait_till_osd_is_up(2) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_till_active() - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - - # verify that there are unfound objects - unfound = manager.get_num_unfound_objects() - log.info("there are %d unfound objects" % unfound) - assert unfound - - # mark stuff lost - pgs = manager.get_pg_stats() - for pg in pgs: - if pg['stat_sum']['num_objects_unfound'] > 0: - primary = 'osd.%d' % pg['acting'][0] - - # verify that i can list them direct from the osd - log.info('listing missing/lost in %s state %s', pg['pgid'], - pg['state']); - m = manager.list_pg_missing(pg['pgid']) - #log.info('%s' % m) - assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound'] - num_unfound=0 - for o in m['objects']: - if len(o['locations']) == 0: - num_unfound += 1 - assert m['num_unfound'] == num_unfound - - log.info("reverting unfound in %s on %s", pg['pgid'], primary) - manager.raw_cluster_cmd('pg', pg['pgid'], - 'mark_unfound_lost', 'revert') - else: - log.info("no unfound in %s", pg['pgid']) - - manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5') - manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_recovery() - - # verify result - for f in range(1, 10): - err = rados(ctx, mon, ['-p', 'data', 'get', 'new_%d' % f, '-']) - assert err - err = rados(ctx, mon, ['-p', 'data', 'get', 'existed_%d' % f, '-']) - assert err - err = rados(ctx, mon, ['-p', 'data', 'get', 'existing_%d' % f, '-']) - assert not err - - # see if osd.1 can cope - manager.revive_osd(1) - manager.mark_in_osd(1) - manager.wait_till_osd_is_up(1) - manager.wait_for_clean() diff --git a/tasks/manypools.py b/tasks/manypools.py deleted file mode 100644 index 1ddcba5c8a9..00000000000 --- a/tasks/manypools.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -Force pg creation on all osds -""" -from teuthology import misc as teuthology -from teuthology.orchestra import run -import logging - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Create the specified number of pools and write 16 objects to them (thereby forcing - the PG creation on each OSD). This task creates pools from all the clients, - in parallel. It is easy to add other daemon types which have the appropriate - permissions, but I don't think anything else does. - The config is just the number of pools to create. I recommend setting - "mon create pg interval" to a very low value in your ceph config to speed - this up. - - You probably want to do this to look at memory consumption, and - maybe to test how performance changes with the number of PGs. For example: - - tasks: - - ceph: - config: - mon: - mon create pg interval: 1 - - manypools: 3000 - - radosbench: - clients: [client.0] - time: 360 - """ - - log.info('creating {n} pools'.format(n=config)) - - poolnum = int(config) - creator_remotes = [] - client_roles = teuthology.all_roles_of_type(ctx.cluster, 'client') - log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles)) - for role in client_roles: - log.info('role={role_}'.format(role_=role)) - (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys() - creator_remotes.append((creator_remote, 'client.{id}'.format(id=role))) - - remaining_pools = poolnum - poolprocs=dict() - while (remaining_pools > 0): - log.info('{n} pools remaining to create'.format(n=remaining_pools)) - for remote, role_ in creator_remotes: - poolnum = remaining_pools - remaining_pools -= 1 - if remaining_pools < 0: - continue - log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_)) - proc = remote.run( - args=[ - 'rados', - '--name', role_, - 'mkpool', 'pool{num}'.format(num=poolnum), '-1', - run.Raw('&&'), - 'rados', - '--name', role_, - '--pool', 'pool{num}'.format(num=poolnum), - 'bench', '0', 'write', '-t', '16', '--block-size', '1' - ], - wait = False - ) - log.info('waiting for pool and object creates') - poolprocs[remote] = proc - - run.wait(poolprocs.itervalues()) - - log.info('created all {n} pools and wrote 16 objects to each'.format(n=poolnum)) diff --git a/tasks/mds_creation_failure.py b/tasks/mds_creation_failure.py deleted file mode 100644 index a3d052fb95c..00000000000 --- a/tasks/mds_creation_failure.py +++ /dev/null @@ -1,83 +0,0 @@ - -import logging -import contextlib -import time -import ceph_manager -from teuthology import misc -from teuthology.orchestra.run import CommandFailedError, Raw - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def task(ctx, config): - """ - Go through filesystem creation with a synthetic failure in an MDS - in its 'up:creating' state, to exercise the retry behaviour. - """ - # Grab handles to the teuthology objects of interest - mdslist = list(misc.all_roles_of_type(ctx.cluster, 'mds')) - if len(mdslist) != 1: - # Require exactly one MDS, the code path for creation failure when - # a standby is available is different - raise RuntimeError("This task requires exactly one MDS") - - mds_id = mdslist[0] - (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys() - manager = ceph_manager.CephManager( - mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'), - ) - - # Stop the MDS and reset the filesystem so that next start will go into CREATING - mds = ctx.daemons.get_daemon('mds', mds_id) - mds.stop() - data_pool_id = manager.get_pool_num("data") - md_pool_id = manager.get_pool_num("metadata") - manager.raw_cluster_cmd_result('mds', 'newfs', md_pool_id.__str__(), data_pool_id.__str__(), - '--yes-i-really-mean-it') - - # Start the MDS with mds_kill_create_at set, it will crash during creation - mds.restart_with_args(["--mds_kill_create_at=1"]) - try: - mds.wait_for_exit() - except CommandFailedError as e: - if e.exitstatus == 1: - log.info("MDS creation killed as expected") - else: - log.error("Unexpected status code %s" % e.exitstatus) - raise - - # Since I have intentionally caused a crash, I will clean up the resulting core - # file to avoid task.internal.coredump seeing it as a failure. - log.info("Removing core file from synthetic MDS failure") - mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))]) - - # It should have left the MDS map state still in CREATING - status = manager.get_mds_status(mds_id) - assert status['state'] == 'up:creating' - - # Start the MDS again without the kill flag set, it should proceed with creation successfully - mds.restart() - - # Wait for state ACTIVE - t = 0 - create_timeout = 120 - while True: - status = manager.get_mds_status(mds_id) - if status['state'] == 'up:active': - log.info("MDS creation completed successfully") - break - elif status['state'] == 'up:creating': - log.info("MDS still in creating state") - if t > create_timeout: - log.error("Creating did not complete within %ss" % create_timeout) - raise RuntimeError("Creating did not complete within %ss" % create_timeout) - t += 1 - time.sleep(1) - else: - log.error("Unexpected MDS state: %s" % status['state']) - assert(status['state'] in ['up:active', 'up:creating']) - - # The system should be back up in a happy healthy state, go ahead and run any further tasks - # inside this context. - yield diff --git a/tasks/mds_thrash.py b/tasks/mds_thrash.py deleted file mode 100644 index c60b741a49e..00000000000 --- a/tasks/mds_thrash.py +++ /dev/null @@ -1,352 +0,0 @@ -""" -Thrash mds by simulating failures -""" -import logging -import contextlib -import ceph_manager -import random -import time -from gevent.greenlet import Greenlet -from gevent.event import Event -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - - -class MDSThrasher(Greenlet): - """ - MDSThrasher:: - - The MDSThrasher thrashes MDSs during execution of other tasks (workunits, etc). - - The config is optional. Many of the config parameters are a a maximum value - to use when selecting a random value from a range. To always use the maximum - value, set no_random to true. The config is a dict containing some or all of: - - seed: [no default] seed the random number generator - - randomize: [default: true] enables randomization and use the max/min values - - max_thrash: [default: 1] the maximum number of MDSs that will be thrashed at - any given time. - - max_thrash_delay: [default: 30] maximum number of seconds to delay before - thrashing again. - - max_revive_delay: [default: 10] maximum number of seconds to delay before - bringing back a thrashed MDS - - thrash_in_replay: [default: 0.0] likelihood that the MDS will be thrashed - during replay. Value should be between 0.0 and 1.0 - - max_replay_thrash_delay: [default: 4] maximum number of seconds to delay while in - the replay state before thrashing - - thrash_weights: allows specific MDSs to be thrashed more/less frequently. This option - overrides anything specified by max_thrash. This option is a dict containing - mds.x: weight pairs. For example, [mds.a: 0.7, mds.b: 0.3, mds.c: 0.0]. Each weight - is a value from 0.0 to 1.0. Any MDSs not specified will be automatically - given a weight of 0.0. For a given MDS, by default the trasher delays for up - to max_thrash_delay, trashes, waits for the MDS to recover, and iterates. If a non-zero - weight is specified for an MDS, for each iteration the thrasher chooses whether to thrash - during that iteration based on a random value [0-1] not exceeding the weight of that MDS. - - Examples:: - - - The following example sets the likelihood that mds.a will be thrashed - to 80%, mds.b to 20%, and other MDSs will not be thrashed. It also sets the - likelihood that an MDS will be thrashed in replay to 40%. - Thrash weights do not have to sum to 1. - - tasks: - - ceph: - - mds_thrash: - thrash_weights: - - mds.a: 0.8 - - mds.b: 0.2 - thrash_in_replay: 0.4 - - ceph-fuse: - - workunit: - clients: - all: [suites/fsx.sh] - - The following example disables randomization, and uses the max delay values: - - tasks: - - ceph: - - mds_thrash: - max_thrash_delay: 10 - max_revive_delay: 1 - max_replay_thrash_delay: 4 - - """ - - def __init__(self, ctx, manager, config, logger, failure_group, weight): - super(MDSThrasher, self).__init__() - - self.ctx = ctx - self.manager = manager - assert self.manager.is_clean() - - self.stopping = Event() - self.logger = logger - self.config = config - - self.randomize = bool(self.config.get('randomize', True)) - self.max_thrash_delay = float(self.config.get('thrash_delay', 30.0)) - self.thrash_in_replay = float(self.config.get('thrash_in_replay', False)) - assert self.thrash_in_replay >= 0.0 and self.thrash_in_replay <= 1.0, 'thrash_in_replay ({v}) must be between [0.0, 1.0]'.format( - v=self.thrash_in_replay) - - self.max_replay_thrash_delay = float(self.config.get('max_replay_thrash_delay', 4.0)) - - self.max_revive_delay = float(self.config.get('max_revive_delay', 10.0)) - - self.failure_group = failure_group - self.weight = weight - - def _run(self): - try: - self.do_thrash() - except: - # Log exceptions here so we get the full backtrace (it's lost - # by the time someone does a .get() on this greenlet) - self.logger.exception("Exception in do_thrash:") - raise - - def log(self, x): - """Write data to logger assigned to this MDThrasher""" - self.logger.info(x) - - def stop(self): - self.stopping.set() - - def do_thrash(self): - """ - Perform the random thrashing action - """ - self.log('starting mds_do_thrash for failure group: ' + ', '.join( - ['mds.{_id}'.format(_id=_f) for _f in self.failure_group])) - while not self.stopping.is_set(): - delay = self.max_thrash_delay - if self.randomize: - delay = random.randrange(0.0, self.max_thrash_delay) - - if delay > 0.0: - self.log('waiting for {delay} secs before thrashing'.format(delay=delay)) - self.stopping.wait(delay) - if self.stopping.is_set(): - continue - - skip = random.randrange(0.0, 1.0) - if self.weight < 1.0 and skip > self.weight: - self.log('skipping thrash iteration with skip ({skip}) > weight ({weight})'.format(skip=skip, - weight=self.weight)) - continue - - # find the active mds in the failure group - statuses = [self.manager.get_mds_status(m) for m in self.failure_group] - actives = filter(lambda s: s and s['state'] == 'up:active', statuses) - assert len(actives) == 1, 'Can only have one active in a failure group' - - active_mds = actives[0]['name'] - active_rank = actives[0]['rank'] - - self.log('kill mds.{id} (rank={r})'.format(id=active_mds, r=active_rank)) - self.manager.kill_mds_by_rank(active_rank) - - # wait for mon to report killed mds as crashed - last_laggy_since = None - itercount = 0 - while True: - failed = self.manager.get_mds_status_all()['failed'] - status = self.manager.get_mds_status(active_mds) - if not status: - break - if 'laggy_since' in status: - last_laggy_since = status['laggy_since'] - break - if any([(f == active_mds) for f in failed]): - break - self.log( - 'waiting till mds map indicates mds.{_id} is laggy/crashed, in failed state, or mds.{_id} is removed from mdsmap'.format( - _id=active_mds)) - itercount = itercount + 1 - if itercount > 10: - self.log('mds map: {status}'.format(status=self.manager.get_mds_status_all())) - time.sleep(2) - if last_laggy_since: - self.log( - 'mds.{_id} reported laggy/crashed since: {since}'.format(_id=active_mds, since=last_laggy_since)) - else: - self.log('mds.{_id} down, removed from mdsmap'.format(_id=active_mds, since=last_laggy_since)) - - # wait for a standby mds to takeover and become active - takeover_mds = None - takeover_rank = None - itercount = 0 - while True: - statuses = [self.manager.get_mds_status(m) for m in self.failure_group] - actives = filter(lambda s: s and s['state'] == 'up:active', statuses) - if len(actives) > 0: - assert len(actives) == 1, 'Can only have one active in failure group' - takeover_mds = actives[0]['name'] - takeover_rank = actives[0]['rank'] - break - itercount = itercount + 1 - if itercount > 10: - self.log('mds map: {status}'.format(status=self.manager.get_mds_status_all())) - - self.log('New active mds is mds.{_id}'.format(_id=takeover_mds)) - - # wait for a while before restarting old active to become new - # standby - delay = self.max_revive_delay - if self.randomize: - delay = random.randrange(0.0, self.max_revive_delay) - - self.log('waiting for {delay} secs before reviving mds.{id}'.format( - delay=delay, id=active_mds)) - time.sleep(delay) - - self.log('reviving mds.{id}'.format(id=active_mds)) - self.manager.revive_mds(active_mds, standby_for_rank=takeover_rank) - - status = {} - while True: - status = self.manager.get_mds_status(active_mds) - if status and (status['state'] == 'up:standby' or status['state'] == 'up:standby-replay'): - break - self.log( - 'waiting till mds map indicates mds.{_id} is in standby or standby-replay'.format(_id=active_mds)) - time.sleep(2) - self.log('mds.{_id} reported in {state} state'.format(_id=active_mds, state=status['state'])) - - # don't do replay thrashing right now - continue - # this might race with replay -> active transition... - if status['state'] == 'up:replay' and random.randrange(0.0, 1.0) < self.thrash_in_replay: - - delay = self.max_replay_thrash_delay - if self.randomize: - delay = random.randrange(0.0, self.max_replay_thrash_delay) - time.sleep(delay) - self.log('kill replaying mds.{id}'.format(id=self.to_kill)) - self.manager.kill_mds(self.to_kill) - - delay = self.max_revive_delay - if self.randomize: - delay = random.randrange(0.0, self.max_revive_delay) - - self.log('waiting for {delay} secs before reviving mds.{id}'.format( - delay=delay, id=self.to_kill)) - time.sleep(delay) - - self.log('revive mds.{id}'.format(id=self.to_kill)) - self.manager.revive_mds(self.to_kill) - - -@contextlib.contextmanager -def task(ctx, config): - """ - Stress test the mds by thrashing while another task/workunit - is running. - - Please refer to MDSThrasher class for further information on the - available options. - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'mds_thrash task only accepts a dict for configuration' - mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds')) - assert len(mdslist) > 1, \ - 'mds_thrash task requires at least 2 metadata servers' - - # choose random seed - seed = None - if 'seed' in config: - seed = int(config['seed']) - else: - seed = int(time.time()) - log.info('mds thrasher using random seed: {seed}'.format(seed=seed)) - random.seed(seed) - - max_thrashers = config.get('max_thrash', 1) - thrashers = {} - - (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys() - manager = ceph_manager.CephManager( - first, ctx=ctx, logger=log.getChild('ceph_manager'), - ) - - # make sure everyone is in active, standby, or standby-replay - log.info('Wait for all MDSs to reach steady state...') - statuses = None - statuses_by_rank = None - while True: - statuses = {m: manager.get_mds_status(m) for m in mdslist} - statuses_by_rank = {} - for _, s in statuses.iteritems(): - if isinstance(s, dict): - statuses_by_rank[s['rank']] = s - - ready = filter(lambda (_, s): s is not None and (s['state'] == 'up:active' - or s['state'] == 'up:standby' - or s['state'] == 'up:standby-replay'), - statuses.items()) - if len(ready) == len(statuses): - break - time.sleep(2) - log.info('Ready to start thrashing') - - # setup failure groups - failure_groups = {} - actives = {s['name']: s for (_, s) in statuses.iteritems() if s['state'] == 'up:active'} - log.info('Actives is: {d}'.format(d=actives)) - log.info('Statuses is: {d}'.format(d=statuses_by_rank)) - for active in actives: - for (r, s) in statuses.iteritems(): - if s['standby_for_name'] == active: - if not active in failure_groups: - failure_groups[active] = [] - log.info('Assigning mds rank {r} to failure group {g}'.format(r=r, g=active)) - failure_groups[active].append(r) - - manager.wait_for_clean() - for (active, standbys) in failure_groups.iteritems(): - weight = 1.0 - if 'thrash_weights' in config: - weight = int(config['thrash_weights'].get('mds.{_id}'.format(_id=active), '0.0')) - - failure_group = [active] - failure_group.extend(standbys) - - thrasher = MDSThrasher( - ctx, manager, config, - logger=log.getChild('mds_thrasher.failure_group.[{a}, {sbs}]'.format( - a=active, - sbs=', '.join(standbys) - ) - ), - failure_group=failure_group, - weight=weight) - thrasher.start() - thrashers[active] = thrasher - - # if thrash_weights isn't specified and we've reached max_thrash, - # we're done - if not 'thrash_weights' in config and len(thrashers) == max_thrashers: - break - - try: - log.debug('Yielding') - yield - finally: - log.info('joining mds_thrashers') - for t in thrashers: - log.info('join thrasher for failure group [{fg}]'.format(fg=', '.join(failure_group))) - thrashers[t].stop() - thrashers[t].join() - log.info('done joining') diff --git a/tasks/metadata.yaml b/tasks/metadata.yaml deleted file mode 100644 index ccdc3b077cb..00000000000 --- a/tasks/metadata.yaml +++ /dev/null @@ -1,2 +0,0 @@ -instance-id: test -local-hostname: test diff --git a/tasks/mon_clock_skew_check.py b/tasks/mon_clock_skew_check.py deleted file mode 100644 index 891e6ec484e..00000000000 --- a/tasks/mon_clock_skew_check.py +++ /dev/null @@ -1,261 +0,0 @@ -""" -Handle clock skews in monitors. -""" -import logging -import contextlib -import ceph_manager -import time -import gevent -from StringIO import StringIO -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -class ClockSkewCheck: - """ - Periodically check if there are any clock skews among the monitors in the - quorum. By default, assume no skews are supposed to exist; that can be - changed using the 'expect-skew' option. If 'fail-on-skew' is set to false, - then we will always succeed and only report skews if any are found. - - This class does not spawn a thread. It assumes that, if that is indeed - wanted, it should be done by a third party (for instance, the task using - this class). We intend it as such in order to reuse this class if need be. - - This task accepts the following options: - - interval amount of seconds to wait in-between checks. (default: 30.0) - max-skew maximum skew, in seconds, that is considered tolerable before - issuing a warning. (default: 0.05) - expect-skew 'true' or 'false', to indicate whether to expect a skew during - the run or not. If 'true', the test will fail if no skew is - found, and succeed if a skew is indeed found; if 'false', it's - the other way around. (default: false) - never-fail Don't fail the run if a skew is detected and we weren't - expecting it, or if no skew is detected and we were expecting - it. (default: False) - - at-least-once Runs at least once, even if we are told to stop. - (default: True) - at-least-once-timeout If we were told to stop but we are attempting to - run at least once, timeout after this many seconds. - (default: 600) - - Example: - Expect a skew higher than 0.05 seconds, but only report it without - failing the teuthology run. - - - mon_clock_skew_check: - interval: 30 - max-skew: 0.05 - expect_skew: true - never-fail: true - """ - - def __init__(self, ctx, manager, config, logger): - self.ctx = ctx - self.manager = manager - - self.stopping = False - self.logger = logger - self.config = config - - if self.config is None: - self.config = dict() - - self.check_interval = float(self.config.get('interval', 30.0)) - - first_mon = teuthology.get_first_mon(ctx, config) - remote = ctx.cluster.only(first_mon).remotes.keys()[0] - proc = remote.run( - args=[ - 'sudo', - 'ceph-mon', - '-i', first_mon[4:], - '--show-config-value', 'mon_clock_drift_allowed' - ], stdout=StringIO(), wait=True - ) - self.max_skew = self.config.get('max-skew', float(proc.stdout.getvalue())) - - self.expect_skew = self.config.get('expect-skew', False) - self.never_fail = self.config.get('never-fail', False) - self.at_least_once = self.config.get('at-least-once', True) - self.at_least_once_timeout = self.config.get('at-least-once-timeout', 600.0) - - def info(self, x): - """ - locally define logger for info messages - """ - self.logger.info(x) - - def warn(self, x): - """ - locally define logger for warnings - """ - self.logger.warn(x) - - def debug(self, x): - """ - locally define logger for debug messages - """ - self.logger.info(x) - self.logger.debug(x) - - def finish(self): - """ - Break out of the do_check loop. - """ - self.stopping = True - - def sleep_interval(self): - """ - If a sleep interval is set, sleep for that amount of time. - """ - if self.check_interval > 0.0: - self.debug('sleeping for {s} seconds'.format( - s=self.check_interval)) - time.sleep(self.check_interval) - - def print_skews(self, skews): - """ - Display skew values. - """ - total = len(skews) - if total > 0: - self.info('---------- found {n} skews ----------'.format(n=total)) - for mon_id, values in skews.iteritems(): - self.info('mon.{id}: {v}'.format(id=mon_id, v=values)) - self.info('-------------------------------------') - else: - self.info('---------- no skews were found ----------') - - def do_check(self): - """ - Clock skew checker. Loops until finish() is called. - """ - self.info('start checking for clock skews') - skews = dict() - ran_once = False - - started_on = None - - while not self.stopping or (self.at_least_once and not ran_once): - - if self.at_least_once and not ran_once and self.stopping: - if started_on is None: - self.info('kicking-off timeout (if any)') - started_on = time.time() - elif self.at_least_once_timeout > 0.0: - assert time.time() - started_on < self.at_least_once_timeout, \ - 'failed to obtain a timecheck before timeout expired' - - quorum_size = len(teuthology.get_mon_names(self.ctx)) - self.manager.wait_for_mon_quorum_size(quorum_size) - - health = self.manager.get_mon_health(True) - timechecks = health['timechecks'] - - clean_check = False - - if timechecks['round_status'] == 'finished': - assert (timechecks['round'] % 2) == 0, \ - 'timecheck marked as finished but round ' \ - 'disagrees (r {r})'.format( - r=timechecks['round']) - clean_check = True - else: - assert timechecks['round_status'] == 'on-going', \ - 'timecheck status expected \'on-going\' ' \ - 'but found \'{s}\' instead'.format( - s=timechecks['round_status']) - if 'mons' in timechecks.keys() and len(timechecks['mons']) > 1: - self.info('round still on-going, but there are available reports') - else: - self.info('no timechecks available just yet') - self.sleep_interval() - continue - - assert len(timechecks['mons']) > 1, \ - 'there are not enough reported timechecks; ' \ - 'expected > 1 found {n}'.format(n=len(timechecks['mons'])) - - for check in timechecks['mons']: - mon_skew = float(check['skew']) - mon_health = check['health'] - mon_id = check['name'] - if abs(mon_skew) > self.max_skew: - assert mon_health == 'HEALTH_WARN', \ - 'mon.{id} health is \'{health}\' but skew {s} > max {ms}'.format( - id=mon_id,health=mon_health,s=abs(mon_skew),ms=self.max_skew) - - log_str = 'mon.{id} with skew {s} > max {ms}'.format( - id=mon_id,s=abs(mon_skew),ms=self.max_skew) - - """ add to skew list """ - details = check['details'] - skews[mon_id] = {'skew': mon_skew, 'details': details} - - if self.expect_skew: - self.info('expected skew: {str}'.format(str=log_str)) - else: - self.warn('unexpected skew: {str}'.format(str=log_str)) - - if clean_check or (self.expect_skew and len(skews) > 0): - ran_once = True - self.print_skews(skews) - self.sleep_interval() - - total = len(skews) - self.print_skews(skews) - - error_str = '' - found_error = False - - if self.expect_skew: - if total == 0: - error_str = 'We were expecting a skew, but none was found!' - found_error = True - else: - if total > 0: - error_str = 'We were not expecting a skew, but we did find it!' - found_error = True - - if found_error: - self.info(error_str) - if not self.never_fail: - assert False, error_str - -@contextlib.contextmanager -def task(ctx, config): - """ - Use clas ClockSkewCheck to check for clock skews on the monitors. - This task will spawn a thread running ClockSkewCheck's do_check(). - - All the configuration will be directly handled by ClockSkewCheck, - so please refer to the class documentation for further information. - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'mon_clock_skew_check task only accepts a dict for configuration' - log.info('Beginning mon_clock_skew_check...') - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - skew_check = ClockSkewCheck(ctx, - manager, config, - logger=log.getChild('mon_clock_skew_check')) - skew_check_thread = gevent.spawn(skew_check.do_check) - try: - yield - finally: - log.info('joining mon_clock_skew_check') - skew_check.finish() - skew_check_thread.get() - - diff --git a/tasks/mon_recovery.py b/tasks/mon_recovery.py deleted file mode 100644 index bfa2cdf78f1..00000000000 --- a/tasks/mon_recovery.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Monitor recovery -""" -import logging -import ceph_manager -from teuthology import misc as teuthology - - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test monitor recovery. - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - mons = [f.split('.')[1] for f in teuthology.get_mon_names(ctx)] - log.info("mon ids = %s" % mons) - - manager.wait_for_mon_quorum_size(len(mons)) - - log.info('verifying all monitors are in the quorum') - for m in mons: - s = manager.get_mon_status(m) - assert s['state'] == 'leader' or s['state'] == 'peon' - assert len(s['quorum']) == len(mons) - - log.info('restarting each monitor in turn') - for m in mons: - # stop a monitor - manager.kill_mon(m) - manager.wait_for_mon_quorum_size(len(mons) - 1) - - # restart - manager.revive_mon(m) - manager.wait_for_mon_quorum_size(len(mons)) - - # in forward and reverse order, - rmons = mons - rmons.reverse() - for mons in mons, rmons: - log.info('stopping all monitors') - for m in mons: - manager.kill_mon(m) - - log.info('forming a minimal quorum for %s, then adding monitors' % mons) - qnum = (len(mons) / 2) + 1 - num = 0 - for m in mons: - manager.revive_mon(m) - num += 1 - if num >= qnum: - manager.wait_for_mon_quorum_size(num) - - # on both leader and non-leader ranks... - for rank in [0, 1]: - # take one out - log.info('removing mon %s' % mons[rank]) - manager.kill_mon(mons[rank]) - manager.wait_for_mon_quorum_size(len(mons) - 1) - - log.info('causing some monitor log activity') - m = 30 - for n in range(1, m): - manager.raw_cluster_cmd('log', '%d of %d' % (n, m)) - - log.info('adding mon %s back in' % mons[rank]) - manager.revive_mon(mons[rank]) - manager.wait_for_mon_quorum_size(len(mons)) diff --git a/tasks/mon_thrash.py b/tasks/mon_thrash.py deleted file mode 100644 index b45aaa99978..00000000000 --- a/tasks/mon_thrash.py +++ /dev/null @@ -1,343 +0,0 @@ -""" -Monitor thrash -""" -import logging -import contextlib -import ceph_manager -import random -import time -import gevent -import json -import math -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -def _get_mons(ctx): - """ - Get monitor names from the context value. - """ - mons = [f[len('mon.'):] for f in teuthology.get_mon_names(ctx)] - return mons - -class MonitorThrasher: - """ - How it works:: - - - pick a monitor - - kill it - - wait for quorum to be formed - - sleep for 'revive_delay' seconds - - revive monitor - - wait for quorum to be formed - - sleep for 'thrash_delay' seconds - - Options:: - - seed Seed to use on the RNG to reproduce a previous - behaviour (default: None; i.e., not set) - revive_delay Number of seconds to wait before reviving - the monitor (default: 10) - thrash_delay Number of seconds to wait in-between - test iterations (default: 0) - thrash_store Thrash monitor store before killing the monitor being thrashed (default: False) - thrash_store_probability Probability of thrashing a monitor's store - (default: 50) - thrash_many Thrash multiple monitors instead of just one. If - 'maintain-quorum' is set to False, then we will - thrash up to as many monitors as there are - available. (default: False) - maintain_quorum Always maintain quorum, taking care on how many - monitors we kill during the thrashing. If we - happen to only have one or two monitors configured, - if this option is set to True, then we won't run - this task as we cannot guarantee maintenance of - quorum. Setting it to false however would allow the - task to run with as many as just one single monitor. - (default: True) - freeze_mon_probability: how often to freeze the mon instead of killing it, - in % (default: 0) - freeze_mon_duration: how many seconds to freeze the mon (default: 15) - scrub Scrub after each iteration (default: True) - - Note: if 'store-thrash' is set to True, then 'maintain-quorum' must also - be set to True. - - For example:: - - tasks: - - ceph: - - mon_thrash: - revive_delay: 20 - thrash_delay: 1 - thrash_store: true - thrash_store_probability: 40 - seed: 31337 - maintain_quorum: true - thrash_many: true - - ceph-fuse: - - workunit: - clients: - all: - - mon/workloadgen.sh - """ - def __init__(self, ctx, manager, config, logger): - self.ctx = ctx - self.manager = manager - self.manager.wait_for_clean() - - self.stopping = False - self.logger = logger - self.config = config - - if self.config is None: - self.config = dict() - - """ Test reproducibility """ - self.random_seed = self.config.get('seed', None) - - if self.random_seed is None: - self.random_seed = int(time.time()) - - self.rng = random.Random() - self.rng.seed(int(self.random_seed)) - - """ Monitor thrashing """ - self.revive_delay = float(self.config.get('revive_delay', 10.0)) - self.thrash_delay = float(self.config.get('thrash_delay', 0.0)) - - self.thrash_many = self.config.get('thrash_many', False) - self.maintain_quorum = self.config.get('maintain_quorum', True) - - self.scrub = self.config.get('scrub', True) - - self.freeze_mon_probability = float(self.config.get('freeze_mon_probability', 10)) - self.freeze_mon_duration = float(self.config.get('freeze_mon_duration', 15.0)) - - assert self.max_killable() > 0, \ - 'Unable to kill at least one monitor with the current config.' - - """ Store thrashing """ - self.store_thrash = self.config.get('store_thrash', False) - self.store_thrash_probability = int( - self.config.get('store_thrash_probability', 50)) - if self.store_thrash: - assert self.store_thrash_probability > 0, \ - 'store_thrash is set, probability must be > 0' - assert self.maintain_quorum, \ - 'store_thrash = true must imply maintain_quorum = true' - - self.thread = gevent.spawn(self.do_thrash) - - def log(self, x): - """ - locally log info messages - """ - self.logger.info(x) - - def do_join(self): - """ - Break out of this processes thrashing loop. - """ - self.stopping = True - self.thread.get() - - def should_thrash_store(self): - """ - If allowed, indicate that we should thrash a certain percentage of - the time as determined by the store_thrash_probability value. - """ - if not self.store_thrash: - return False - return self.rng.randrange(0, 101) < self.store_thrash_probability - - def thrash_store(self, mon): - """ - Thrash the monitor specified. - :param mon: monitor to thrash - """ - addr = self.ctx.ceph.conf['mon.%s' % mon]['mon addr'] - self.log('thrashing mon.{id}@{addr} store'.format(id=mon, addr=addr)) - out = self.manager.raw_cluster_cmd('-m', addr, 'sync', 'force') - j = json.loads(out) - assert j['ret'] == 0, \ - 'error forcing store sync on mon.{id}:\n{ret}'.format( - id=mon,ret=out) - - def should_freeze_mon(self): - """ - Indicate that we should freeze a certain percentago of the time - as determined by the freeze_mon_probability value. - """ - return self.rng.randrange(0, 101) < self.freeze_mon_probability - - def freeze_mon(self, mon): - """ - Send STOP signal to freeze the monitor. - """ - log.info('Sending STOP to mon %s', mon) - self.manager.signal_mon(mon, 19) # STOP - - def unfreeze_mon(self, mon): - """ - Send CONT signal to unfreeze the monitor. - """ - log.info('Sending CONT to mon %s', mon) - self.manager.signal_mon(mon, 18) # CONT - - def kill_mon(self, mon): - """ - Kill the monitor specified - """ - self.log('killing mon.{id}'.format(id=mon)) - self.manager.kill_mon(mon) - - def revive_mon(self, mon): - """ - Revive the monitor specified - """ - self.log('killing mon.{id}'.format(id=mon)) - self.log('reviving mon.{id}'.format(id=mon)) - self.manager.revive_mon(mon) - - def max_killable(self): - """ - Return the maximum number of monitors we can kill. - """ - m = len(_get_mons(self.ctx)) - if self.maintain_quorum: - return max(math.ceil(m/2.0)-1, 0) - else: - return m - - def do_thrash(self): - """ - Cotinuously loop and thrash the monitors. - """ - self.log('start thrashing') - self.log('seed: {s}, revive delay: {r}, thrash delay: {t} '\ - 'thrash many: {tm}, maintain quorum: {mq} '\ - 'store thrash: {st}, probability: {stp} '\ - 'freeze mon: prob {fp} duration {fd}'.format( - s=self.random_seed,r=self.revive_delay,t=self.thrash_delay, - tm=self.thrash_many, mq=self.maintain_quorum, - st=self.store_thrash,stp=self.store_thrash_probability, - fp=self.freeze_mon_probability,fd=self.freeze_mon_duration, - )) - - while not self.stopping: - mons = _get_mons(self.ctx) - self.manager.wait_for_mon_quorum_size(len(mons)) - self.log('making sure all monitors are in the quorum') - for m in mons: - s = self.manager.get_mon_status(m) - assert s['state'] == 'leader' or s['state'] == 'peon' - assert len(s['quorum']) == len(mons) - - kill_up_to = self.rng.randrange(1, self.max_killable()+1) - mons_to_kill = self.rng.sample(mons, kill_up_to) - self.log('monitors to thrash: {m}'.format(m=mons_to_kill)) - - mons_to_freeze = [] - for mon in mons: - if mon in mons_to_kill: - continue - if self.should_freeze_mon(): - mons_to_freeze.append(mon) - self.log('monitors to freeze: {m}'.format(m=mons_to_freeze)) - - for mon in mons_to_kill: - self.log('thrashing mon.{m}'.format(m=mon)) - - """ we only thrash stores if we are maintaining quorum """ - if self.should_thrash_store() and self.maintain_quorum: - self.thrash_store(mon) - - self.kill_mon(mon) - - if mons_to_freeze: - for mon in mons_to_freeze: - self.freeze_mon(mon) - self.log('waiting for {delay} secs to unfreeze mons'.format( - delay=self.freeze_mon_duration)) - time.sleep(self.freeze_mon_duration) - for mon in mons_to_freeze: - self.unfreeze_mon(mon) - - if self.maintain_quorum: - self.manager.wait_for_mon_quorum_size(len(mons)-len(mons_to_kill)) - for m in mons: - if m in mons_to_kill: - continue - s = self.manager.get_mon_status(m) - assert s['state'] == 'leader' or s['state'] == 'peon' - assert len(s['quorum']) == len(mons)-len(mons_to_kill) - - self.log('waiting for {delay} secs before reviving monitors'.format( - delay=self.revive_delay)) - time.sleep(self.revive_delay) - - for mon in mons_to_kill: - self.revive_mon(mon) - # do more freezes - if mons_to_freeze: - for mon in mons_to_freeze: - self.freeze_mon(mon) - self.log('waiting for {delay} secs to unfreeze mons'.format( - delay=self.freeze_mon_duration)) - time.sleep(self.freeze_mon_duration) - for mon in mons_to_freeze: - self.unfreeze_mon(mon) - - self.manager.wait_for_mon_quorum_size(len(mons)) - for m in mons: - s = self.manager.get_mon_status(m) - assert s['state'] == 'leader' or s['state'] == 'peon' - assert len(s['quorum']) == len(mons) - - if self.scrub: - self.log('triggering scrub') - try: - self.manager.raw_cluster_cmd('scrub') - except Exception: - log.exception("Saw exception while triggering scrub") - - if self.thrash_delay > 0.0: - self.log('waiting for {delay} secs before continuing thrashing'.format( - delay=self.thrash_delay)) - time.sleep(self.thrash_delay) - -@contextlib.contextmanager -def task(ctx, config): - """ - Stress test the monitor by thrashing them while another task/workunit - is running. - - Please refer to MonitorThrasher class for further information on the - available options. - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'mon_thrash task only accepts a dict for configuration' - assert len(_get_mons(ctx)) > 2, \ - 'mon_thrash task requires at least 3 monitors' - log.info('Beginning mon_thrash...') - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - thrash_proc = MonitorThrasher(ctx, - manager, config, - logger=log.getChild('mon_thrasher')) - try: - log.debug('Yielding') - yield - finally: - log.info('joining mon_thrasher') - thrash_proc.do_join() - mons = _get_mons(ctx) - manager.wait_for_mon_quorum_size(len(mons)) diff --git a/tasks/multibench.py b/tasks/multibench.py deleted file mode 100644 index bc22b470593..00000000000 --- a/tasks/multibench.py +++ /dev/null @@ -1,57 +0,0 @@ -""" -Multibench testing -""" -import contextlib -import logging -import radosbench -import time -import copy -import gevent - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Run multibench - - The config should be as follows: - - multibench: - time: - segments: - radosbench: - - example: - - tasks: - - ceph: - - multibench: - clients: [client.0] - time: 360 - - interactive: - """ - log.info('Beginning multibench...') - assert isinstance(config, dict), \ - "please list clients to run on" - - def run_one(num): - """Run test spawn from gevent""" - start = time.time() - benchcontext = copy.copy(config.get('radosbench')) - iterations = 0 - while time.time() - start < int(config.get('time', 600)): - log.info("Starting iteration %s of segment %s"%(iterations, num)) - benchcontext['pool'] = str(num) + "-" + str(iterations) - with radosbench.task(ctx, benchcontext): - time.sleep() - iterations += 1 - log.info("Starting %s threads"%(str(config.get('segments', 3)),)) - segments = [ - gevent.spawn(run_one, i) - for i in range(0, int(config.get('segments', 3)))] - - try: - yield - finally: - [i.get() for i in segments] diff --git a/tasks/object_source_down.py b/tasks/object_source_down.py deleted file mode 100644 index 17b94490668..00000000000 --- a/tasks/object_source_down.py +++ /dev/null @@ -1,103 +0,0 @@ -""" -Test Object locations going down -""" -import logging -import ceph_manager -from teuthology import misc as teuthology -from util.rados import rados - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test handling of object location going down - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'lost_unfound task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - while len(manager.get_osd_status()['up']) < 3: - manager.sleep(10) - manager.wait_for_clean() - - # something that is always there - dummyfile = '/etc/fstab' - - # take 0, 1 out - manager.mark_out_osd(0) - manager.mark_out_osd(1) - manager.wait_for_clean() - - # delay recovery, and make the pg log very long (to prevent backfill) - manager.raw_cluster_cmd( - 'tell', 'osd.0', - 'injectargs', - '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' - ) - # delay recovery, and make the pg log very long (to prevent backfill) - manager.raw_cluster_cmd( - 'tell', 'osd.1', - 'injectargs', - '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' - ) - # delay recovery, and make the pg log very long (to prevent backfill) - manager.raw_cluster_cmd( - 'tell', 'osd.2', - 'injectargs', - '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' - ) - # delay recovery, and make the pg log very long (to prevent backfill) - manager.raw_cluster_cmd( - 'tell', 'osd.3', - 'injectargs', - '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' - ) - - # kludge to make sure they get a map - rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile]) - - # create old objects - for f in range(1, 10): - rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) - - manager.mark_out_osd(3) - manager.wait_till_active() - - manager.mark_in_osd(0) - manager.wait_till_active() - - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - - manager.mark_out_osd(2) - manager.wait_till_active() - - # bring up 1 - manager.mark_in_osd(1) - manager.wait_till_active() - - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - log.info("Getting unfound objects") - unfound = manager.get_num_unfound_objects() - assert not unfound - - manager.kill_osd(2) - manager.mark_down_osd(2) - manager.kill_osd(3) - manager.mark_down_osd(3) - - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - log.info("Getting unfound objects") - unfound = manager.get_num_unfound_objects() - assert unfound diff --git a/tasks/omapbench.py b/tasks/omapbench.py deleted file mode 100644 index e026c74dbc0..00000000000 --- a/tasks/omapbench.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -Run omapbench executable within teuthology -""" -import contextlib -import logging - -from teuthology.orchestra import run -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Run omapbench - - The config should be as follows:: - - omapbench: - clients: [client list] - threads: - objects: - entries: - keysize: - valsize: - increment: - omaptype: - - example:: - - tasks: - - ceph: - - omapbench: - clients: [client.0] - threads: 30 - objects: 1000 - entries: 10 - keysize: 10 - valsize: 100 - increment: 100 - omaptype: uniform - - interactive: - """ - log.info('Beginning omapbench...') - assert isinstance(config, dict), \ - "please list clients to run on" - omapbench = {} - testdir = teuthology.get_testdir(ctx) - print(str(config.get('increment',-1))) - for role in config.get('clients', ['client.0']): - assert isinstance(role, basestring) - PREFIX = 'client.' - assert role.startswith(PREFIX) - id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - proc = remote.run( - args=[ - "/bin/sh", "-c", - " ".join(['adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage', - 'omapbench', - '--name', role[len(PREFIX):], - '-t', str(config.get('threads', 30)), - '-o', str(config.get('objects', 1000)), - '--entries', str(config.get('entries',10)), - '--keysize', str(config.get('keysize',10)), - '--valsize', str(config.get('valsize',1000)), - '--inc', str(config.get('increment',10)), - '--omaptype', str(config.get('omaptype','uniform')) - ]).format(tdir=testdir), - ], - logger=log.getChild('omapbench.{id}'.format(id=id_)), - stdin=run.PIPE, - wait=False - ) - omapbench[id_] = proc - - try: - yield - finally: - log.info('joining omapbench') - run.wait(omapbench.itervalues()) diff --git a/tasks/osd_backfill.py b/tasks/osd_backfill.py deleted file mode 100644 index d034d791a5e..00000000000 --- a/tasks/osd_backfill.py +++ /dev/null @@ -1,105 +0,0 @@ -""" -Osd backfill test -""" -import logging -import ceph_manager -import time -from teuthology import misc as teuthology - - -log = logging.getLogger(__name__) - - -def rados_start(ctx, remote, cmd): - """ - Run a remote rados command (currently used to only write data) - """ - log.info("rados %s" % ' '.join(cmd)) - testdir = teuthology.get_testdir(ctx) - pre = [ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'rados', - ]; - pre.extend(cmd) - proc = remote.run( - args=pre, - wait=False, - ) - return proc - -def task(ctx, config): - """ - Test backfill - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'thrashosds task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') - log.info('num_osds is %s' % num_osds) - assert num_osds == 3 - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - while len(manager.get_osd_status()['up']) < 3: - manager.sleep(10) - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_clean() - - # write some data - p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096', - '--no-cleanup']) - err = p.wait(); - log.info('err is %d' % err) - - # mark osd.0 out to trigger a rebalance/backfill - manager.mark_out_osd(0) - - # also mark it down to it won't be included in pg_temps - manager.kill_osd(0) - manager.mark_down_osd(0) - - # wait for everything to peer and be happy... - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_recovery() - - # write some new data - p = rados_start(ctx, mon, ['-p', 'data', 'bench', '30', 'write', '-b', '4096', - '--no-cleanup']) - - time.sleep(15) - - # blackhole + restart osd.1 - # this triggers a divergent backfill target - manager.blackhole_kill_osd(1) - time.sleep(2) - manager.revive_osd(1) - - # wait for our writes to complete + succeed - err = p.wait() - log.info('err is %d' % err) - - # cluster must recover - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_recovery() - - # re-add osd.0 - manager.revive_osd(0) - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_clean() - - diff --git a/tasks/osd_failsafe_enospc.py b/tasks/osd_failsafe_enospc.py deleted file mode 100644 index bf089988022..00000000000 --- a/tasks/osd_failsafe_enospc.py +++ /dev/null @@ -1,218 +0,0 @@ -""" -Handle osdfailsafe configuration settings (nearfull ratio and full ratio) -""" -from cStringIO import StringIO -import logging -import time - -import ceph_manager -from teuthology.orchestra import run -from util.rados import rados -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test handling of osd_failsafe_nearfull_ratio and osd_failsafe_full_ratio - configuration settings - - In order for test to pass must use log-whitelist as follows - - tasks: - - chef: - - install: - - ceph: - log-whitelist: ['OSD near full', 'OSD full dropping all updates'] - - osd_failsafe_enospc: - - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'osd_failsafe_enospc task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - ctx.manager = manager - - # Give 2 seconds for injectargs + osd_op_complaint_time (30) + 2 * osd_heartbeat_interval (6) + 6 padding - sleep_time = 50 - - # something that is always there - dummyfile = '/etc/fstab' - dummyfile2 = '/etc/resolv.conf' - - # create 1 pg pool with 1 rep which can only be on osd.0 - osds = manager.get_osd_dump() - for osd in osds: - if osd['osd'] != 0: - manager.mark_out_osd(osd['osd']) - - log.info('creating pool foo') - manager.create_pool("foo") - manager.raw_cluster_cmd('osd', 'pool', 'set', 'foo', 'size', '1') - - # State NONE -> NEAR - log.info('1. Verify warning messages when exceeding nearfull_ratio') - - proc = mon.run( - args=[ - 'daemon-helper', - 'kill', - 'ceph', '-w' - ], - stdin=run.PIPE, - stdout=StringIO(), - wait=False, - ) - - manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .00001') - - time.sleep(sleep_time) - proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w - proc.wait() - - lines = proc.stdout.getvalue().split('\n') - - count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) - assert count == 2, 'Incorrect number of warning messages expected 2 got %d' % count - count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) - assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count - - # State NEAR -> FULL - log.info('2. Verify error messages when exceeding full_ratio') - - proc = mon.run( - args=[ - 'daemon-helper', - 'kill', - 'ceph', '-w' - ], - stdin=run.PIPE, - stdout=StringIO(), - wait=False, - ) - - manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001') - - time.sleep(sleep_time) - proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w - proc.wait() - - lines = proc.stdout.getvalue().split('\n') - - count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) - assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count - - log.info('3. Verify write failure when exceeding full_ratio') - - # Write data should fail - ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile1', dummyfile]) - assert ret != 0, 'Expected write failure but it succeeded with exit status 0' - - # Put back default - manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97') - time.sleep(10) - - # State FULL -> NEAR - log.info('4. Verify write success when NOT exceeding full_ratio') - - # Write should succeed - ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile2', dummyfile2]) - assert ret == 0, 'Expected write to succeed, but got exit status %d' % ret - - log.info('5. Verify warning messages again when exceeding nearfull_ratio') - - proc = mon.run( - args=[ - 'daemon-helper', - 'kill', - 'ceph', '-w' - ], - stdin=run.PIPE, - stdout=StringIO(), - wait=False, - ) - - time.sleep(sleep_time) - proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w - proc.wait() - - lines = proc.stdout.getvalue().split('\n') - - count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) - assert count == 1 or count == 2, 'Incorrect number of warning messages expected 1 or 2 got %d' % count - count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) - assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count - - manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .90') - time.sleep(10) - - # State NONE -> FULL - log.info('6. Verify error messages again when exceeding full_ratio') - - proc = mon.run( - args=[ - 'daemon-helper', - 'kill', - 'ceph', '-w' - ], - stdin=run.PIPE, - stdout=StringIO(), - wait=False, - ) - - manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001') - - time.sleep(sleep_time) - proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w - proc.wait() - - lines = proc.stdout.getvalue().split('\n') - - count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) - assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count - count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) - assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count - - # State FULL -> NONE - log.info('7. Verify no messages settings back to default') - - manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97') - time.sleep(10) - - proc = mon.run( - args=[ - 'daemon-helper', - 'kill', - 'ceph', '-w' - ], - stdin=run.PIPE, - stdout=StringIO(), - wait=False, - ) - - time.sleep(sleep_time) - proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w - proc.wait() - - lines = proc.stdout.getvalue().split('\n') - - count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) - assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count - count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) - assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count - - log.info('Test Passed') - - # Bring all OSDs back in - manager.remove_pool("foo") - for osd in osds: - if osd['osd'] != 0: - manager.mark_in_osd(osd['osd']) diff --git a/tasks/osd_recovery.py b/tasks/osd_recovery.py deleted file mode 100644 index ff88fb47d74..00000000000 --- a/tasks/osd_recovery.py +++ /dev/null @@ -1,206 +0,0 @@ -""" -osd recovery -""" -import logging -import ceph_manager -import time -from teuthology import misc as teuthology - - -log = logging.getLogger(__name__) - - -def rados_start(testdir, remote, cmd): - """ - Run a remote rados command (currently used to only write data) - """ - log.info("rados %s" % ' '.join(cmd)) - pre = [ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'rados', - ]; - pre.extend(cmd) - proc = remote.run( - args=pre, - wait=False, - ) - return proc - -def task(ctx, config): - """ - Test (non-backfill) recovery - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'task only accepts a dict for configuration' - testdir = teuthology.get_testdir(ctx) - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') - log.info('num_osds is %s' % num_osds) - assert num_osds == 3 - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - while len(manager.get_osd_status()['up']) < 3: - manager.sleep(10) - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_clean() - - # test some osdmap flags - manager.raw_cluster_cmd('osd', 'set', 'noin') - manager.raw_cluster_cmd('osd', 'set', 'noout') - manager.raw_cluster_cmd('osd', 'set', 'noup') - manager.raw_cluster_cmd('osd', 'set', 'nodown') - manager.raw_cluster_cmd('osd', 'unset', 'noin') - manager.raw_cluster_cmd('osd', 'unset', 'noout') - manager.raw_cluster_cmd('osd', 'unset', 'noup') - manager.raw_cluster_cmd('osd', 'unset', 'nodown') - - # write some new data - p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '60', 'write', '-b', '4096', - '--no-cleanup']) - - time.sleep(15) - - # trigger a divergent target: - # blackhole + restart osd.1 (shorter log) - manager.blackhole_kill_osd(1) - # kill osd.2 (longer log... we'll make it divergent below) - manager.kill_osd(2) - time.sleep(2) - manager.revive_osd(1) - - # wait for our writes to complete + succeed - err = p.wait() - log.info('err is %d' % err) - - # cluster must repeer - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.wait_for_active_or_down() - - # write some more (make sure osd.2 really is divergent) - p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096']) - p.wait(); - - # revive divergent osd - manager.revive_osd(2) - - while len(manager.get_osd_status()['up']) < 3: - log.info('waiting a bit...') - time.sleep(2) - log.info('3 are up!') - - # cluster must recover - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_clean() - - -def test_incomplete_pgs(ctx, config): - """ - Test handling of incomplete pgs. Requires 4 osds. - """ - testdir = teuthology.get_testdir(ctx) - if config is None: - config = {} - assert isinstance(config, dict), \ - 'task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') - log.info('num_osds is %s' % num_osds) - assert num_osds == 4 - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - while len(manager.get_osd_status()['up']) < 4: - time.sleep(10) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') - manager.wait_for_clean() - - log.info('Testing incomplete pgs...') - - for i in range(4): - manager.set_config( - i, - osd_recovery_delay_start=1000) - - # move data off of osd.0, osd.1 - manager.raw_cluster_cmd('osd', 'out', '0', '1') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') - manager.wait_for_clean() - - # lots of objects in rbd (no pg log, will backfill) - p = rados_start(testdir, mon, - ['-p', 'rbd', 'bench', '60', 'write', '-b', '1', - '--no-cleanup']) - p.wait() - - # few objects in metadata pool (with pg log, normal recovery) - for f in range(1, 20): - p = rados_start(testdir, mon, ['-p', 'metadata', 'put', - 'foo.%d' % f, '/etc/passwd']) - p.wait() - - # move it back - manager.raw_cluster_cmd('osd', 'in', '0', '1') - manager.raw_cluster_cmd('osd', 'out', '2', '3') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats') - manager.wait_for_active() - - assert not manager.is_clean() - assert not manager.is_recovered() - - # kill 2 + 3 - log.info('stopping 2,3') - manager.kill_osd(2) - manager.kill_osd(3) - log.info('...') - manager.raw_cluster_cmd('osd', 'down', '2', '3') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.wait_for_active_or_down() - - assert manager.get_num_down() > 0 - - # revive 2 + 3 - manager.revive_osd(2) - manager.revive_osd(3) - while len(manager.get_osd_status()['up']) < 4: - log.info('waiting a bit...') - time.sleep(2) - log.info('all are up!') - - for i in range(4): - manager.kick_recovery_wq(i) - - # cluster must recover - manager.wait_for_clean() diff --git a/tasks/peer.py b/tasks/peer.py deleted file mode 100644 index f1789cf12d6..00000000000 --- a/tasks/peer.py +++ /dev/null @@ -1,96 +0,0 @@ -""" -Peer test (Single test, not much configurable here) -""" -import logging -import json - -import ceph_manager -from teuthology import misc as teuthology -from util.rados import rados - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test peering. - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'peer task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - while len(manager.get_osd_status()['up']) < 3: - manager.sleep(10) - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_clean() - - for i in range(3): - manager.set_config( - i, - osd_recovery_delay_start=120) - - # take on osd down - manager.kill_osd(2) - manager.mark_down_osd(2) - - # kludge to make sure they get a map - rados(ctx, mon, ['-p', 'data', 'get', 'dummy', '-']) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.wait_for_recovery() - - # kill another and revive 2, so that some pgs can't peer. - manager.kill_osd(1) - manager.mark_down_osd(1) - manager.revive_osd(2) - manager.wait_till_osd_is_up(2) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - - manager.wait_for_active_or_down() - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - - # look for down pgs - num_down_pgs = 0 - pgs = manager.get_pg_stats() - for pg in pgs: - out = manager.raw_cluster_cmd('pg', pg['pgid'], 'query') - log.debug("out string %s",out) - j = json.loads(out) - log.info("pg is %s, query json is %s", pg, j) - - if pg['state'].count('down'): - num_down_pgs += 1 - # verify that it is blocked on osd.1 - rs = j['recovery_state'] - assert len(rs) > 0 - assert rs[0]['name'] == 'Started/Primary/Peering/GetInfo' - assert rs[1]['name'] == 'Started/Primary/Peering' - assert rs[1]['blocked'] - assert rs[1]['down_osds_we_would_probe'] == [1] - assert len(rs[1]['peering_blocked_by']) == 1 - assert rs[1]['peering_blocked_by'][0]['osd'] == 1 - - assert num_down_pgs > 0 - - # bring it all back - manager.revive_osd(1) - manager.wait_till_osd_is_up(1) - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_clean() diff --git a/tasks/peering_speed_test.py b/tasks/peering_speed_test.py deleted file mode 100644 index 6c885f1c961..00000000000 --- a/tasks/peering_speed_test.py +++ /dev/null @@ -1,93 +0,0 @@ -""" -Remotely run peering tests. -""" -import logging -import time -from teuthology import misc as teuthology -import ceph_manager - -log = logging.getLogger(__name__) - -from args import argify - -POOLNAME = "POOLNAME" -ARGS = [ - ('num_pgs', 'number of pgs to create', 256, int), - ('max_time', 'seconds to complete peering', 0, int), - ('runs', 'trials to run', 10, int), - ('num_objects', 'objects to create', 256 * 1024, int), - ('object_size', 'size in bytes for objects', 64, int), - ('creation_time_limit', 'time limit for pool population', 60*60, int), - ('create_threads', 'concurrent writes for create', 256, int) - ] - -def setup(ctx, config): - """ - Setup peering test on remotes. - """ - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - ctx.manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - ctx.manager.clear_pools() - ctx.manager.create_pool(POOLNAME, config.num_pgs) - log.info("populating pool") - ctx.manager.rados_write_objects( - POOLNAME, - config.num_objects, - config.object_size, - config.creation_time_limit, - config.create_threads) - log.info("done populating pool") - -def do_run(ctx, config): - """ - Perform the test. - """ - start = time.time() - # mark in osd - ctx.manager.mark_in_osd(0) - log.info("writing out objects") - ctx.manager.rados_write_objects( - POOLNAME, - config.num_pgs, # write 1 object per pg or so - 1, - config.creation_time_limit, - config.num_pgs, # lots of concurrency - cleanup = True) - peering_end = time.time() - - log.info("peering done, waiting on recovery") - ctx.manager.wait_for_clean() - - log.info("recovery done") - recovery_end = time.time() - if config.max_time: - assert(peering_end - start < config.max_time) - ctx.manager.mark_out_osd(0) - ctx.manager.wait_for_clean() - return { - 'time_to_active': peering_end - start, - 'time_to_clean': recovery_end - start - } - -@argify("peering_speed_test", ARGS) -def task(ctx, config): - """ - Peering speed test - """ - setup(ctx, config) - ctx.manager.mark_out_osd(0) - ctx.manager.wait_for_clean() - ret = [] - for i in range(config.runs): - log.info("Run {i}".format(i = i)) - ret.append(do_run(ctx, config)) - - ctx.manager.mark_in_osd(0) - ctx.summary['recovery_times'] = { - 'runs': ret - } diff --git a/tasks/qemu.py b/tasks/qemu.py deleted file mode 100644 index bcd79caa64f..00000000000 --- a/tasks/qemu.py +++ /dev/null @@ -1,412 +0,0 @@ -""" -Qemu task -""" -from cStringIO import StringIO - -import contextlib -import logging -import os - -from teuthology import misc as teuthology -from teuthology import contextutil -from tasks import rbd -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - -DEFAULT_NUM_RBD = 1 -DEFAULT_IMAGE_URL = 'http://ceph.com/qa/ubuntu-12.04.qcow2' -DEFAULT_MEM = 4096 # in megabytes - -@contextlib.contextmanager -def create_dirs(ctx, config): - """ - Handle directory creation and cleanup - """ - testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): - assert 'test' in client_config, 'You must specify a test to run' - (remote,) = ctx.cluster.only(client).remotes.keys() - remote.run( - args=[ - 'install', '-d', '-m0755', '--', - '{tdir}/qemu'.format(tdir=testdir), - '{tdir}/archive/qemu'.format(tdir=testdir), - ] - ) - try: - yield - finally: - for client, client_config in config.iteritems(): - assert 'test' in client_config, 'You must specify a test to run' - (remote,) = ctx.cluster.only(client).remotes.keys() - remote.run( - args=[ - 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true', - ] - ) - -@contextlib.contextmanager -def generate_iso(ctx, config): - """Execute system commands to generate iso""" - log.info('generating iso...') - testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): - assert 'test' in client_config, 'You must specify a test to run' - (remote,) = ctx.cluster.only(client).remotes.keys() - src_dir = os.path.dirname(__file__) - userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client) - metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client) - - with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f: - test_setup = ''.join(f.readlines()) - # configuring the commands to setup the nfs mount - mnt_dir = "/export/{client}".format(client=client) - test_setup = test_setup.format( - mnt_dir=mnt_dir - ) - - with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f: - test_teardown = ''.join(f.readlines()) - - user_data = test_setup - if client_config.get('type', 'filesystem') == 'filesystem': - for i in xrange(0, client_config.get('num_rbd', DEFAULT_NUM_RBD)): - dev_letter = chr(ord('b') + i) - user_data += """ -- | - #!/bin/bash - mkdir /mnt/test_{dev_letter} - mkfs -t xfs /dev/vd{dev_letter} - mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter} -""".format(dev_letter=dev_letter) - - # this may change later to pass the directories as args to the - # script or something. xfstests needs that. - user_data += """ -- | - #!/bin/bash - test -d /mnt/test_b && cd /mnt/test_b - /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success -""" + test_teardown - - teuthology.write_file(remote, userdata_path, StringIO(user_data)) - - with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f: - teuthology.write_file(remote, metadata_path, f) - - test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client) - remote.run( - args=[ - 'wget', '-nv', '-O', test_file, - client_config['test'], - run.Raw('&&'), - 'chmod', '755', test_file, - ], - ) - remote.run( - args=[ - 'genisoimage', '-quiet', '-input-charset', 'utf-8', - '-volid', 'cidata', '-joliet', '-rock', - '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), - '-graft-points', - 'user-data={userdata}'.format(userdata=userdata_path), - 'meta-data={metadata}'.format(metadata=metadata_path), - 'test.sh={file}'.format(file=test_file), - ], - ) - try: - yield - finally: - for client in config.iterkeys(): - (remote,) = ctx.cluster.only(client).remotes.keys() - remote.run( - args=[ - 'rm', '-f', - '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), - os.path.join(testdir, 'qemu', 'userdata.' + client), - os.path.join(testdir, 'qemu', 'metadata.' + client), - '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client), - ], - ) - -@contextlib.contextmanager -def download_image(ctx, config): - """Downland base image, remove image file when done""" - log.info('downloading base image') - testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() - base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client) - remote.run( - args=[ - 'wget', '-nv', '-O', base_file, DEFAULT_IMAGE_URL, - ] - ) - try: - yield - finally: - log.debug('cleaning up base image files') - for client in config.iterkeys(): - base_file = '{tdir}/qemu/base.{client}.qcow2'.format( - tdir=testdir, - client=client, - ) - (remote,) = ctx.cluster.only(client).remotes.keys() - remote.run( - args=[ - 'rm', '-f', base_file, - ], - ) - - -def _setup_nfs_mount(remote, client, mount_dir): - """ - Sets up an nfs mount on the remote that the guest can use to - store logs. This nfs mount is also used to touch a file - at the end of the test to indiciate if the test was successful - or not. - """ - export_dir = "/export/{client}".format(client=client) - log.info("Creating the nfs export directory...") - remote.run(args=[ - 'sudo', 'mkdir', '-p', export_dir, - ]) - log.info("Mounting the test directory...") - remote.run(args=[ - 'sudo', 'mount', '--bind', mount_dir, export_dir, - ]) - log.info("Adding mount to /etc/exports...") - export = "{dir} *(rw,no_root_squash,no_subtree_check,insecure)".format( - dir=export_dir - ) - remote.run(args=[ - 'echo', export, run.Raw("|"), - 'sudo', 'tee', '-a', "/etc/exports", - ]) - log.info("Restarting NFS...") - if remote.os.package_type == "deb": - remote.run(args=['sudo', 'service', 'nfs-kernel-server', 'restart']) - else: - remote.run(args=['sudo', 'systemctl', 'restart', 'nfs']) - - -def _teardown_nfs_mount(remote, client): - """ - Tears down the nfs mount on the remote used for logging and reporting the - status of the tests being ran in the guest. - """ - log.info("Tearing down the nfs mount for {remote}".format(remote=remote)) - export_dir = "/export/{client}".format(client=client) - log.info("Stopping NFS...") - if remote.os.package_type == "deb": - remote.run(args=[ - 'sudo', 'service', 'nfs-kernel-server', 'stop' - ]) - else: - remote.run(args=[ - 'sudo', 'systemctl', 'stop', 'nfs' - ]) - log.info("Unmounting exported directory...") - remote.run(args=[ - 'sudo', 'umount', export_dir - ]) - log.info("Deleting exported directory...") - remote.run(args=[ - 'sudo', 'rm', '-r', '/export' - ]) - log.info("Deleting export from /etc/exports...") - remote.run(args=[ - 'sudo', 'sed', '-i', '$ d', '/etc/exports' - ]) - log.info("Starting NFS...") - if remote.os.package_type == "deb": - remote.run(args=[ - 'sudo', 'service', 'nfs-kernel-server', 'start' - ]) - else: - remote.run(args=[ - 'sudo', 'systemctl', 'start', 'nfs' - ]) - - -@contextlib.contextmanager -def run_qemu(ctx, config): - """Setup kvm environment and start qemu""" - procs = [] - testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() - log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client) - remote.run( - args=[ - 'mkdir', log_dir, run.Raw('&&'), - 'sudo', 'modprobe', 'kvm', - ] - ) - - # make an nfs mount to use for logging and to - # allow to test to tell teuthology the tests outcome - _setup_nfs_mount(remote, client, log_dir) - - base_file = '{tdir}/qemu/base.{client}.qcow2'.format( - tdir=testdir, - client=client - ) - qemu_cmd = 'qemu-system-x86_64' - if remote.os.package_type == "rpm": - qemu_cmd = "/usr/libexec/qemu-kvm" - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'daemon-helper', - 'term', - qemu_cmd, '-enable-kvm', '-nographic', - '-m', str(client_config.get('memory', DEFAULT_MEM)), - # base OS device - '-drive', - 'file={base},format=qcow2,if=virtio'.format(base=base_file), - # cd holding metadata for cloud-init - '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), - ] - - cachemode = 'none' - ceph_config = ctx.ceph.conf.get('global', {}) - ceph_config.update(ctx.ceph.conf.get('client', {})) - ceph_config.update(ctx.ceph.conf.get(client, {})) - if ceph_config.get('rbd cache'): - if ceph_config.get('rbd cache max dirty', 1) > 0: - cachemode = 'writeback' - else: - cachemode = 'writethrough' - - for i in xrange(client_config.get('num_rbd', DEFAULT_NUM_RBD)): - args.extend([ - '-drive', - 'file=rbd:rbd/{img}:id={id},format=raw,if=virtio,cache={cachemode}'.format( - img='{client}.{num}'.format(client=client, num=i), - id=client[len('client.'):], - cachemode=cachemode, - ), - ]) - - log.info('starting qemu...') - procs.append( - remote.run( - args=args, - logger=log.getChild(client), - stdin=run.PIPE, - wait=False, - ) - ) - - try: - yield - finally: - log.info('waiting for qemu tests to finish...') - run.wait(procs) - - log.debug('checking that qemu tests succeeded...') - for client in config.iterkeys(): - (remote,) = ctx.cluster.only(client).remotes.keys() - # teardown nfs mount - _teardown_nfs_mount(remote, client) - # check for test status - remote.run( - args=[ - 'test', '-f', - '{tdir}/archive/qemu/{client}/success'.format( - tdir=testdir, - client=client - ), - ], - ) - - -@contextlib.contextmanager -def task(ctx, config): - """ - Run a test inside of QEMU on top of rbd. Only one test - is supported per client. - - For example, you can specify which clients to run on:: - - tasks: - - ceph: - - qemu: - client.0: - test: http://ceph.com/qa/test.sh - client.1: - test: http://ceph.com/qa/test2.sh - - Or use the same settings on all clients: - - tasks: - - ceph: - - qemu: - all: - test: http://ceph.com/qa/test.sh - - For tests that don't need a filesystem, set type to block:: - - tasks: - - ceph: - - qemu: - client.0: - test: http://ceph.com/qa/test.sh - type: block - - The test should be configured to run on /dev/vdb and later - devices. - - If you want to run a test that uses more than one rbd image, - specify how many images to use:: - - tasks: - - ceph: - - qemu: - client.0: - test: http://ceph.com/qa/test.sh - type: block - num_rbd: 2 - - You can set the amount of memory the VM has (default is 1024 MB):: - - tasks: - - ceph: - - qemu: - client.0: - test: http://ceph.com/qa/test.sh - memory: 512 # megabytes - """ - assert isinstance(config, dict), \ - "task qemu only supports a dictionary for configuration" - - config = teuthology.replace_all_with_clients(ctx.cluster, config) - - managers = [] - for client, client_config in config.iteritems(): - num_rbd = client_config.get('num_rbd', 1) - assert num_rbd > 0, 'at least one rbd device must be used' - for i in xrange(num_rbd): - create_config = { - client: { - 'image_name': - '{client}.{num}'.format(client=client, num=i), - } - } - managers.append( - lambda create_config=create_config: - rbd.create_image(ctx=ctx, config=create_config) - ) - - managers.extend([ - lambda: create_dirs(ctx=ctx, config=config), - lambda: generate_iso(ctx=ctx, config=config), - lambda: download_image(ctx=ctx, config=config), - lambda: run_qemu(ctx=ctx, config=config), - ]) - - with contextutil.nested(*managers): - yield diff --git a/tasks/rados.py b/tasks/rados.py deleted file mode 100644 index 3d44fdff1c3..00000000000 --- a/tasks/rados.py +++ /dev/null @@ -1,160 +0,0 @@ -""" -Rados modle-based integration tests -""" -import contextlib -import logging -import gevent -from teuthology import misc as teuthology - -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Run RadosModel-based integration tests. - - The config should be as follows:: - - rados: - clients: [client list] - ops: - objects: - max_in_flight: - object_size: - min_stride_size: - max_stride_size: - op_weights: - runs: - the pool is remade between runs - ec_pool: use an ec pool - - For example:: - - tasks: - - ceph: - - rados: - clients: [client.0] - ops: 1000 - max_seconds: 0 # 0 for no limit - objects: 25 - max_in_flight: 16 - object_size: 4000000 - min_stride_size: 1024 - max_stride_size: 4096 - op_weights: - read: 20 - write: 10 - delete: 2 - snap_create: 3 - rollback: 2 - snap_remove: 0 - ec_pool: true - runs: 10 - - interactive: - - Optionally, you can provide the pool name to run against: - - tasks: - - ceph: - - exec: - client.0: - - ceph osd pool create foo - - rados: - clients: [client.0] - pools: [foo] - ... - - Alternatively, you can provide a pool prefix: - - tasks: - - ceph: - - exec: - client.0: - - ceph osd pool create foo.client.0 - - rados: - clients: [client.0] - pool_prefix: foo - ... - - """ - log.info('Beginning rados...') - assert isinstance(config, dict), \ - "please list clients to run on" - - object_size = int(config.get('object_size', 4000000)) - op_weights = config.get('op_weights', {}) - testdir = teuthology.get_testdir(ctx) - args = [ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph_test_rados'] - if config.get('ec_pool', False): - args.extend(['--ec-pool']) - args.extend([ - '--op', 'read', str(op_weights.get('read', 100)), - '--op', 'write', str(op_weights.get('write', 100)), - '--op', 'delete', str(op_weights.get('delete', 10)), - '--max-ops', str(config.get('ops', 10000)), - '--objects', str(config.get('objects', 500)), - '--max-in-flight', str(config.get('max_in_flight', 16)), - '--size', str(object_size), - '--min-stride-size', str(config.get('min_stride_size', object_size / 10)), - '--max-stride-size', str(config.get('max_stride_size', object_size / 5)), - '--max-seconds', str(config.get('max_seconds', 0)) - ]) - for field in [ - 'copy_from', 'is_dirty', 'undirty', 'cache_flush', - 'cache_try_flush', 'cache_evict', - 'snap_create', 'snap_remove', 'rollback', 'setattr', 'rmattr', - 'watch', 'append', - ]: - if field in op_weights: - args.extend([ - '--op', field, str(op_weights[field]), - ]) - - def thread(): - """Thread spawned by gevent""" - clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - log.info('clients are %s' % clients) - for i in range(int(config.get('runs', '1'))): - log.info("starting run %s out of %s", str(i), config.get('runs', '1')) - tests = {} - existing_pools = config.get('pools', []) - created_pools = [] - for role in config.get('clients', clients): - assert isinstance(role, basestring) - PREFIX = 'client.' - assert role.startswith(PREFIX) - id_ = role[len(PREFIX):] - - pool = config.get('pool', None) - if not pool and existing_pools: - pool = existing_pools.pop() - else: - pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False)) - created_pools.append(pool) - - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - proc = remote.run( - args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args + - ["--pool", pool], - logger=log.getChild("rados.{id}".format(id=id_)), - stdin=run.PIPE, - wait=False - ) - tests[id_] = proc - run.wait(tests.itervalues()) - - for pool in created_pools: - ctx.manager.remove_pool(pool) - - running = gevent.spawn(thread) - - try: - yield - finally: - log.info('joining rados') - running.get() diff --git a/tasks/radosbench.py b/tasks/radosbench.py deleted file mode 100644 index 1c5bd5486ab..00000000000 --- a/tasks/radosbench.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Rados benchmarking -""" -import contextlib -import logging - -from teuthology.orchestra import run -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Run radosbench - - The config should be as follows: - - radosbench: - clients: [client list] - time: - pool: - size: write size to use - unique_pool: use a unique pool, defaults to False - ec_pool: create ec pool, defaults to False - create_pool: create pool, defaults to False - - example: - - tasks: - - ceph: - - radosbench: - clients: [client.0] - time: 360 - - interactive: - """ - log.info('Beginning radosbench...') - assert isinstance(config, dict), \ - "please list clients to run on" - radosbench = {} - - testdir = teuthology.get_testdir(ctx) - - for role in config.get('clients', ['client.0']): - assert isinstance(role, basestring) - PREFIX = 'client.' - assert role.startswith(PREFIX) - id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - - pool = 'data' - if config.get('create_pool', True): - if config.get('pool'): - pool = config.get('pool') - if pool != 'data': - ctx.manager.create_pool(pool, ec_pool=config.get('ec_pool', False)) - else: - pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False)) - - proc = remote.run( - args=[ - "/bin/sh", "-c", - " ".join(['adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage', - 'rados', - '--name', role, - '-b', str(config.get('size', 4<<20)), - '-p' , pool, - 'bench', str(config.get('time', 360)), 'write', - ]).format(tdir=testdir), - ], - logger=log.getChild('radosbench.{id}'.format(id=id_)), - stdin=run.PIPE, - wait=False - ) - radosbench[id_] = proc - - try: - yield - finally: - timeout = config.get('time', 360) * 5 - log.info('joining radosbench (timing out after %ss)', timeout) - run.wait(radosbench.itervalues(), timeout=timeout) - - if pool is not 'data': - ctx.manager.remove_pool(pool) diff --git a/tasks/radosgw_admin.py b/tasks/radosgw_admin.py deleted file mode 100644 index 453f4f561de..00000000000 --- a/tasks/radosgw_admin.py +++ /dev/null @@ -1,991 +0,0 @@ -""" -Rgw admin testing against a running instance -""" -# The test cases in this file have been annotated for inventory. -# To extract the inventory (in csv format) use the command: -# -# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //' -# - -import copy -import json -import logging -import time - -from cStringIO import StringIO - -import boto.exception -import boto.s3.connection -import boto.s3.acl - -import util.rgw as rgw_utils - -from teuthology import misc as teuthology -from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops - -log = logging.getLogger(__name__) - -def get_acl(key): - """ - Helper function to get the xml acl from a key, ensuring that the xml - version tag is removed from the acl response - """ - raw_acl = key.get_xml_acl() - - def remove_version(string): - return string.split( - '' - )[-1] - - def remove_newlines(string): - return string.strip('\n') - - return remove_version( - remove_newlines(raw_acl) - ) - - -def task(ctx, config): - """ - Test radosgw-admin functionality against a running rgw instance. - """ - global log - assert config is None or isinstance(config, list) \ - or isinstance(config, dict), \ - "task s3tests only supports a list or dictionary for configuration" - all_clients = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - if config is None: - config = all_clients - if isinstance(config, list): - config = dict.fromkeys(config) - clients = config.keys() - - multi_region_run = rgw_utils.multi_region_enabled(ctx) - - client = clients[0]; # default choice, multi-region code may overwrite this - if multi_region_run: - client = rgw_utils.get_master_client(ctx, clients) - - # once the client is chosen, pull the host name and assigned port out of - # the role_endpoints that were assigned by the rgw task - (remote_host, remote_port) = ctx.rgw.role_endpoints[client] - - ## - user1='foo' - user2='fud' - subuser1='foo:foo1' - subuser2='foo:foo2' - display_name1='Foo' - display_name2='Fud' - email='foo@foo.com' - email2='bar@bar.com' - access_key='9te6NH5mcdcq0Tc5i8i1' - secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu' - access_key2='p5YnriCv1nAtykxBrupQ' - secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh' - swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL' - swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy' - - bucket_name='myfoo' - bucket_name2='mybar' - - # connect to rgw - connection = boto.s3.connection.S3Connection( - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - is_secure=False, - port=remote_port, - host=remote_host, - calling_format=boto.s3.connection.OrdinaryCallingFormat(), - ) - connection2 = boto.s3.connection.S3Connection( - aws_access_key_id=access_key2, - aws_secret_access_key=secret_key2, - is_secure=False, - port=remote_port, - host=remote_host, - calling_format=boto.s3.connection.OrdinaryCallingFormat(), - ) - - # legend (test cases can be easily grep-ed out) - # TESTCASE 'testname','object','method','operation','assertion' - # TESTCASE 'info-nosuch','user','info','non-existent user','fails' - (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1]) - assert err - - # TESTCASE 'create-ok','user','create','w/all valid info','succeeds' - (err, out) = rgwadmin(ctx, client, [ - 'user', 'create', - '--uid', user1, - '--display-name', display_name1, - '--email', email, - '--access-key', access_key, - '--secret', secret_key, - '--max-buckets', '4' - ], - check_status=True) - - # TESTCASE 'duplicate email','user','create','existing user email','fails' - (err, out) = rgwadmin(ctx, client, [ - 'user', 'create', - '--uid', user2, - '--display-name', display_name2, - '--email', email, - ]) - assert err - - # TESTCASE 'info-existing','user','info','existing user','returns correct info' - (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) - assert out['user_id'] == user1 - assert out['email'] == email - assert out['display_name'] == display_name1 - assert len(out['keys']) == 1 - assert out['keys'][0]['access_key'] == access_key - assert out['keys'][0]['secret_key'] == secret_key - assert not out['suspended'] - - # this whole block should only be run if regions have been configured - if multi_region_run: - rgw_utils.radosgw_agent_sync_all(ctx) - # post-sync, validate that user1 exists on the sync destination host - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - dest_client = c_config['dest'] - (err, out) = rgwadmin(ctx, dest_client, ['metadata', 'list', 'user']) - (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True) - assert out['user_id'] == user1 - assert out['email'] == email - assert out['display_name'] == display_name1 - assert len(out['keys']) == 1 - assert out['keys'][0]['access_key'] == access_key - assert out['keys'][0]['secret_key'] == secret_key - assert not out['suspended'] - - # compare the metadata between different regions, make sure it matches - log.debug('compare the metadata between different regions, make sure it matches') - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - source_client = c_config['src'] - dest_client = c_config['dest'] - (err1, out1) = rgwadmin(ctx, source_client, - ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True) - (err2, out2) = rgwadmin(ctx, dest_client, - ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True) - assert out1 == out2 - - # suspend a user on the master, then check the status on the destination - log.debug('suspend a user on the master, then check the status on the destination') - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - source_client = c_config['src'] - dest_client = c_config['dest'] - (err, out) = rgwadmin(ctx, source_client, ['user', 'suspend', '--uid', user1]) - rgw_utils.radosgw_agent_sync_all(ctx) - (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True) - assert out['suspended'] - - # delete a user on the master, then check that it's gone on the destination - log.debug('delete a user on the master, then check that it\'s gone on the destination') - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - source_client = c_config['src'] - dest_client = c_config['dest'] - (err, out) = rgwadmin(ctx, source_client, ['user', 'rm', '--uid', user1], check_status=True) - rgw_utils.radosgw_agent_sync_all(ctx) - (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user1]) - assert out is None - (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1]) - assert out is None - - # then recreate it so later tests pass - (err, out) = rgwadmin(ctx, client, [ - 'user', 'create', - '--uid', user1, - '--display-name', display_name1, - '--email', email, - '--access-key', access_key, - '--secret', secret_key, - '--max-buckets', '4' - ], - check_status=True) - - # now do the multi-region bucket tests - log.debug('now do the multi-region bucket tests') - - # Create a second user for the following tests - log.debug('Create a second user for the following tests') - (err, out) = rgwadmin(ctx, client, [ - 'user', 'create', - '--uid', user2, - '--display-name', display_name2, - '--email', email2, - '--access-key', access_key2, - '--secret', secret_key2, - '--max-buckets', '4' - ], - check_status=True) - (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user2], check_status=True) - assert out is not None - - # create a bucket and do a sync - log.debug('create a bucket and do a sync') - bucket = connection.create_bucket(bucket_name2) - rgw_utils.radosgw_agent_sync_all(ctx) - - # compare the metadata for the bucket between different regions, make sure it matches - log.debug('compare the metadata for the bucket between different regions, make sure it matches') - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - source_client = c_config['src'] - dest_client = c_config['dest'] - (err1, out1) = rgwadmin(ctx, source_client, - ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - check_status=True) - (err2, out2) = rgwadmin(ctx, dest_client, - ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - check_status=True) - assert out1 == out2 - - # get the bucket.instance info and compare that - src_bucket_id = out1['data']['bucket']['bucket_id'] - dest_bucket_id = out2['data']['bucket']['bucket_id'] - (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get', - 'bucket.instance:{bucket_name}:{bucket_instance}'.format( - bucket_name=bucket_name2,bucket_instance=src_bucket_id)], - check_status=True) - (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get', - 'bucket.instance:{bucket_name}:{bucket_instance}'.format( - bucket_name=bucket_name2,bucket_instance=dest_bucket_id)], - check_status=True) - del out1['data']['bucket_info']['bucket']['pool'] - del out1['data']['bucket_info']['bucket']['index_pool'] - del out2['data']['bucket_info']['bucket']['pool'] - del out2['data']['bucket_info']['bucket']['index_pool'] - assert out1 == out2 - - same_region = 0 - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - source_client = c_config['src'] - dest_client = c_config['dest'] - - source_region = rgw_utils.region_for_client(ctx, source_client) - dest_region = rgw_utils.region_for_client(ctx, dest_client) - - # 301 is only returned for requests to something in a different region - if source_region == dest_region: - log.debug('301 is only returned for requests to something in a different region') - same_region += 1 - continue - - # Attempt to create a new connection with user1 to the destination RGW - log.debug('Attempt to create a new connection with user1 to the destination RGW') - # and use that to attempt a delete (that should fail) - exception_encountered = False - try: - (dest_remote_host, dest_remote_port) = ctx.rgw.role_endpoints[dest_client] - connection_dest = boto.s3.connection.S3Connection( - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - is_secure=False, - port=dest_remote_port, - host=dest_remote_host, - calling_format=boto.s3.connection.OrdinaryCallingFormat(), - ) - - # this should fail - connection_dest.delete_bucket(bucket_name2) - except boto.exception.S3ResponseError as e: - assert e.status == 301 - exception_encountered = True - - # confirm that the expected exception was seen - assert exception_encountered - - # now delete the bucket on the source RGW and do another sync - log.debug('now delete the bucket on the source RGW and do another sync') - bucket.delete() - rgw_utils.radosgw_agent_sync_all(ctx) - - if same_region == len(ctx.radosgw_agent.config): - bucket.delete() - rgw_utils.radosgw_agent_sync_all(ctx) - - # make sure that the bucket no longer exists in either region - log.debug('make sure that the bucket no longer exists in either region') - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - source_client = c_config['src'] - dest_client = c_config['dest'] - (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get', - 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)]) - (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get', - 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)]) - # Both of the previous calls should have errors due to requesting - # metadata for non-existent buckets - assert err1 - assert err2 - - # create a bucket and then sync it - log.debug('create a bucket and then sync it') - bucket = connection.create_bucket(bucket_name2) - rgw_utils.radosgw_agent_sync_all(ctx) - - # compare the metadata for the bucket between different regions, make sure it matches - log.debug('compare the metadata for the bucket between different regions, make sure it matches') - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - source_client = c_config['src'] - dest_client = c_config['dest'] - (err1, out1) = rgwadmin(ctx, source_client, - ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - check_status=True) - (err2, out2) = rgwadmin(ctx, dest_client, - ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - check_status=True) - assert out1 == out2 - - # Now delete the bucket and recreate it with a different user - log.debug('Now delete the bucket and recreate it with a different user') - # within the same window of time and then sync. - bucket.delete() - bucket = connection2.create_bucket(bucket_name2) - rgw_utils.radosgw_agent_sync_all(ctx) - - # compare the metadata for the bucket between different regions, make sure it matches - log.debug('compare the metadata for the bucket between different regions, make sure it matches') - # user2 should own the bucket in both regions - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - source_client = c_config['src'] - dest_client = c_config['dest'] - (err1, out1) = rgwadmin(ctx, source_client, - ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - check_status=True) - (err2, out2) = rgwadmin(ctx, dest_client, - ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - check_status=True) - assert out1 == out2 - assert out1['data']['owner'] == user2 - assert out1['data']['owner'] != user1 - - # now we're going to use this bucket to test meta-data update propagation - log.debug('now we\'re going to use this bucket to test meta-data update propagation') - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - source_client = c_config['src'] - dest_client = c_config['dest'] - - # get the metadata so we can tweak it - log.debug('get the metadata so we can tweak it') - (err, orig_data) = rgwadmin(ctx, source_client, - ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - check_status=True) - - # manually edit mtime for this bucket to be 300 seconds in the past - log.debug('manually edit mtime for this bucket to be 300 seconds in the past') - new_data = copy.deepcopy(orig_data) - new_data['mtime'] = orig_data['mtime'] - 300 - assert new_data != orig_data - (err, out) = rgwadmin(ctx, source_client, - ['metadata', 'put', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - stdin=StringIO(json.dumps(new_data)), - check_status=True) - - # get the metadata and make sure that the 'put' worked - log.debug('get the metadata and make sure that the \'put\' worked') - (err, out) = rgwadmin(ctx, source_client, - ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - check_status=True) - assert out == new_data - - # sync to propagate the new metadata - log.debug('sync to propagate the new metadata') - rgw_utils.radosgw_agent_sync_all(ctx) - - # get the metadata from the dest and compare it to what we just set - log.debug('get the metadata from the dest and compare it to what we just set') - # and what the source region has. - (err1, out1) = rgwadmin(ctx, source_client, - ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - check_status=True) - (err2, out2) = rgwadmin(ctx, dest_client, - ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)], - check_status=True) - # yeah for the transitive property - assert out1 == out2 - assert out1 == new_data - - # now we delete the bucket - log.debug('now we delete the bucket') - bucket.delete() - - log.debug('sync to propagate the deleted bucket') - rgw_utils.radosgw_agent_sync_all(ctx) - - # Delete user2 as later tests do not expect it to exist. - # Verify that it is gone on both regions - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - source_client = c_config['src'] - dest_client = c_config['dest'] - (err, out) = rgwadmin(ctx, source_client, - ['user', 'rm', '--uid', user2], check_status=True) - rgw_utils.radosgw_agent_sync_all(ctx) - # The two 'user info' calls should fail and not return any data - # since we just deleted this user. - (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user2]) - assert out is None - (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user2]) - assert out is None - - # Test data sync - - # First create a bucket for data sync test purpose - bucket = connection.create_bucket(bucket_name + 'data') - - # Create a tiny file and check if in sync - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - if c_config.get('metadata-only'): - continue - - source_client = c_config['src'] - dest_client = c_config['dest'] - k = boto.s3.key.Key(bucket) - k.key = 'tiny_file' - k.set_contents_from_string("123456789") - time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client)) - rgw_utils.radosgw_agent_sync_all(ctx, data=True) - (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client] - dest_connection = boto.s3.connection.S3Connection( - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - is_secure=False, - port=dest_port, - host=dest_host, - calling_format=boto.s3.connection.OrdinaryCallingFormat(), - ) - dest_k = dest_connection.get_bucket(bucket_name + 'data').get_key('tiny_file') - assert k.get_contents_as_string() == dest_k.get_contents_as_string() - - # check that deleting it removes it from the dest zone - k.delete() - time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client)) - rgw_utils.radosgw_agent_sync_all(ctx, data=True) - - dest_bucket = dest_connection.get_bucket(bucket_name + 'data') - dest_k = dest_bucket.get_key('tiny_file') - assert dest_k == None, 'object not deleted from destination zone' - - # finally we delete the bucket - bucket.delete() - - bucket = connection.create_bucket(bucket_name + 'data2') - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - if c_config.get('metadata-only'): - continue - - source_client = c_config['src'] - dest_client = c_config['dest'] - (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client] - dest_connection = boto.s3.connection.S3Connection( - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - is_secure=False, - port=dest_port, - host=dest_host, - calling_format=boto.s3.connection.OrdinaryCallingFormat(), - ) - for i in range(20): - k = boto.s3.key.Key(bucket) - k.key = 'tiny_file_' + str(i) - k.set_contents_from_string(str(i) * 100) - - time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client)) - rgw_utils.radosgw_agent_sync_all(ctx, data=True) - - for i in range(20): - dest_k = dest_connection.get_bucket(bucket_name + 'data2').get_key('tiny_file_' + str(i)) - assert (str(i) * 100) == dest_k.get_contents_as_string() - k = boto.s3.key.Key(bucket) - k.key = 'tiny_file_' + str(i) - k.delete() - - # check that deleting removes the objects from the dest zone - time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client)) - rgw_utils.radosgw_agent_sync_all(ctx, data=True) - - for i in range(20): - dest_bucket = dest_connection.get_bucket(bucket_name + 'data2') - dest_k = dest_bucket.get_key('tiny_file_' + str(i)) - assert dest_k == None, 'object %d not deleted from destination zone' % i - bucket.delete() - - # end of 'if multi_region_run:' - - # TESTCASE 'suspend-ok','user','suspend','active user','succeeds' - (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1], - check_status=True) - - # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory' - (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) - assert out['suspended'] - - # TESTCASE 're-enable','user','enable','suspended user','succeeds' - (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], check_status=True) - - # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended' - (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) - assert not out['suspended'] - - # TESTCASE 'add-keys','key','create','w/valid info','succeeds' - (err, out) = rgwadmin(ctx, client, [ - 'key', 'create', '--uid', user1, - '--access-key', access_key2, '--secret', secret_key2, - ], check_status=True) - - # TESTCASE 'info-new-key','user','info','after key addition','returns all keys' - (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], - check_status=True) - assert len(out['keys']) == 2 - assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2 - assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2 - - # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed' - (err, out) = rgwadmin(ctx, client, [ - 'key', 'rm', '--uid', user1, - '--access-key', access_key2, - ], check_status=True) - assert len(out['keys']) == 1 - assert out['keys'][0]['access_key'] == access_key - assert out['keys'][0]['secret_key'] == secret_key - - # TESTCASE 'add-swift-key','key','create','swift key','succeeds' - subuser_access = 'full' - subuser_perm = 'full-control' - - (err, out) = rgwadmin(ctx, client, [ - 'subuser', 'create', '--subuser', subuser1, - '--access', subuser_access - ], check_status=True) - - # TESTCASE 'add-swift-key','key','create','swift key','succeeds' - (err, out) = rgwadmin(ctx, client, [ - 'subuser', 'modify', '--subuser', subuser1, - '--secret', swift_secret1, - '--key-type', 'swift', - ], check_status=True) - - # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds' - (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1]) - - assert out['subusers'][0]['permissions'] == subuser_perm - - # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys' - (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) - assert len(out['swift_keys']) == 1 - assert out['swift_keys'][0]['user'] == subuser1 - assert out['swift_keys'][0]['secret_key'] == swift_secret1 - - # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds' - (err, out) = rgwadmin(ctx, client, [ - 'subuser', 'create', '--subuser', subuser2, - '--secret', swift_secret2, - '--key-type', 'swift', - ], check_status=True) - - # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys' - (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) - assert len(out['swift_keys']) == 2 - assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2 - assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2 - - # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed' - (err, out) = rgwadmin(ctx, client, [ - 'key', 'rm', '--subuser', subuser1, - '--key-type', 'swift', - ], check_status=True) - assert len(out['swift_keys']) == 1 - - # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed' - (err, out) = rgwadmin(ctx, client, [ - 'subuser', 'rm', '--subuser', subuser1, - ], check_status=True) - assert len(out['subusers']) == 1 - - # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed' - (err, out) = rgwadmin(ctx, client, [ - 'subuser', 'rm', '--subuser', subuser2, - '--key-type', 'swift', '--purge-keys', - ], check_status=True) - assert len(out['swift_keys']) == 0 - assert len(out['subusers']) == 0 - - # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list' - (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], - check_status=True) - assert len(out) == 0 - - if multi_region_run: - rgw_utils.radosgw_agent_sync_all(ctx) - - # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list' - (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True) - assert len(out) == 0 - - # create a first bucket - bucket = connection.create_bucket(bucket_name) - - # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list' - (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True) - assert len(out) == 1 - assert out[0] == bucket_name - - # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list' - (err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True) - assert len(out) >= 1 - assert bucket_name in out; - - # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4' - bucket2 = connection.create_bucket(bucket_name + '2') - bucket3 = connection.create_bucket(bucket_name + '3') - bucket4 = connection.create_bucket(bucket_name + '4') - # the 5th should fail. - failed = False - try: - connection.create_bucket(bucket_name + '5') - except Exception: - failed = True - assert failed - - # delete the buckets - bucket2.delete() - bucket3.delete() - bucket4.delete() - - # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list' - (err, out) = rgwadmin(ctx, client, [ - 'bucket', 'stats', '--bucket', bucket_name], check_status=True) - assert out['owner'] == user1 - bucket_id = out['id'] - - # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID' - (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], check_status=True) - assert len(out) == 1 - assert out[0]['id'] == bucket_id # does it return the same ID twice in a row? - - # use some space - key = boto.s3.key.Key(bucket) - key.set_contents_from_string('one') - - # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object' - (err, out) = rgwadmin(ctx, client, [ - 'bucket', 'stats', '--bucket', bucket_name], check_status=True) - assert out['id'] == bucket_id - assert out['usage']['rgw.main']['num_objects'] == 1 - assert out['usage']['rgw.main']['size_kb'] > 0 - - # reclaim it - key.delete() - - # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error' - (err, out) = rgwadmin(ctx, client, - ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name], - check_status=True) - - # create a second user to link the bucket to - (err, out) = rgwadmin(ctx, client, [ - 'user', 'create', - '--uid', user2, - '--display-name', display_name2, - '--access-key', access_key2, - '--secret', secret_key2, - '--max-buckets', '1', - ], - check_status=True) - - # try creating an object with the first user before the bucket is relinked - denied = False - key = boto.s3.key.Key(bucket) - - try: - key.set_contents_from_string('two') - except boto.exception.S3ResponseError: - denied = True - - assert not denied - - # delete the object - key.delete() - - # link the bucket to another user - (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name], - check_status=True) - - # try to remove user, should fail (has a linked bucket) - (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2]) - assert err - - # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked' - (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name], - check_status=True) - - # relink the bucket to the first user and delete the second user - (err, out) = rgwadmin(ctx, client, - ['bucket', 'link', '--uid', user1, '--bucket', bucket_name], - check_status=True) - - (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2], - check_status=True) - - # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed' - - # upload an object - object_name = 'four' - key = boto.s3.key.Key(bucket, object_name) - key.set_contents_from_string(object_name) - - # now delete it - (err, out) = rgwadmin(ctx, client, - ['object', 'rm', '--bucket', bucket_name, '--object', object_name], - check_status=True) - - # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects' - (err, out) = rgwadmin(ctx, client, [ - 'bucket', 'stats', '--bucket', bucket_name], - check_status=True) - assert out['id'] == bucket_id - assert out['usage']['rgw.main']['num_objects'] == 0 - - # list log objects - # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects' - (err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True) - assert len(out) > 0 - - for obj in out: - # TESTCASE 'log-show','log','show','after activity','returns expected info' - if obj[:4] == 'meta' or obj[:4] == 'data': - continue - - (err, rgwlog) = rgwadmin(ctx, client, ['log', 'show', '--object', obj], - check_status=True) - assert len(rgwlog) > 0 - - # exempt bucket_name2 from checking as it was only used for multi-region tests - assert rgwlog['bucket'].find(bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0 - assert rgwlog['bucket'] != bucket_name or rgwlog['bucket_id'] == bucket_id - assert rgwlog['bucket_owner'] == user1 or rgwlog['bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2 - for entry in rgwlog['log_entries']: - log.debug('checking log entry: ', entry) - assert entry['bucket'] == rgwlog['bucket'] - possible_buckets = [bucket_name + '5', bucket_name2] - user = entry['user'] - assert user == user1 or user.endswith('system-user') or \ - rgwlog['bucket'] in possible_buckets - - # TESTCASE 'log-rm','log','rm','delete log objects','succeeds' - (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj], - check_status=True) - - # TODO: show log by bucket+date - - # need to wait for all usage data to get flushed, should take up to 30 seconds - timestamp = time.time() - while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes - (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj']) # last operation we did is delete obj, wait for it to flush - if get_user_successful_ops(out, user1) > 0: - break - time.sleep(1) - - assert time.time() - timestamp <= (20 * 60) - - # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds' - (err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True) - assert len(out['entries']) > 0 - assert len(out['summary']) > 0 - - user_summary = get_user_summary(out, user1) - - total = user_summary['total'] - assert total['successful_ops'] > 0 - - # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds' - (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1], - check_status=True) - assert len(out['entries']) > 0 - assert len(out['summary']) > 0 - user_summary = out['summary'][0] - for entry in user_summary['categories']: - assert entry['successful_ops'] > 0 - assert user_summary['user'] == user1 - - # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds' - test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket'] - for cat in test_categories: - (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat], - check_status=True) - assert len(out['summary']) > 0 - user_summary = out['summary'][0] - assert user_summary['user'] == user1 - assert len(user_summary['categories']) == 1 - entry = user_summary['categories'][0] - assert entry['category'] == cat - assert entry['successful_ops'] > 0 - - # the usage flush interval is 30 seconds, wait that much an then some - # to make sure everything has been flushed - time.sleep(35) - - # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed' - (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1], - check_status=True) - (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1], - check_status=True) - assert len(out['entries']) == 0 - assert len(out['summary']) == 0 - - # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds' - (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1], - check_status=True) - - # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects' - try: - key = boto.s3.key.Key(bucket) - key.set_contents_from_string('five') - except boto.exception.S3ResponseError as e: - assert e.status == 403 - - # TESTCASE 'user-renable2','user','enable','suspended user','succeeds' - (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], - check_status=True) - - # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects' - key = boto.s3.key.Key(bucket) - key.set_contents_from_string('six') - - # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection' - - # create an object large enough to be split into multiple parts - test_string = 'foo'*10000000 - - big_key = boto.s3.key.Key(bucket) - big_key.set_contents_from_string(test_string) - - # now delete the head - big_key.delete() - - # wait a bit to give the garbage collector time to cycle - time.sleep(15) - - (err, out) = rgwadmin(ctx, client, ['gc', 'list']) - - assert len(out) > 0 - - # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage' - (err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True) - - #confirm - (err, out) = rgwadmin(ctx, client, ['gc', 'list']) - - assert len(out) == 0 - - # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets' - (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1]) - assert err - - # delete should fail because ``key`` still exists - try: - bucket.delete() - except boto.exception.S3ResponseError as e: - assert e.status == 409 - - key.delete() - bucket.delete() - - # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy' - bucket = connection.create_bucket(bucket_name) - - # create an object - key = boto.s3.key.Key(bucket) - key.set_contents_from_string('seven') - - # should be private already but guarantee it - key.set_acl('private') - - (err, out) = rgwadmin(ctx, client, - ['policy', '--bucket', bucket.name, '--object', key.key], - check_status=True) - - acl = get_acl(key) - - assert acl == out.strip('\n') - - # add another grantee by making the object public read - key.set_acl('public-read') - - (err, out) = rgwadmin(ctx, client, - ['policy', '--bucket', bucket.name, '--object', key.key], - check_status=True) - - acl = get_acl(key) - - assert acl == out.strip('\n') - - # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds' - bucket = connection.create_bucket(bucket_name) - key_name = ['eight', 'nine', 'ten', 'eleven'] - for i in range(4): - key = boto.s3.key.Key(bucket) - key.set_contents_from_string(key_name[i]) - - (err, out) = rgwadmin(ctx, client, - ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'], - check_status=True) - - # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds' - caps='user=read' - (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps]) - - assert out['caps'][0]['perm'] == 'read' - - # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds' - (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps]) - - assert not out['caps'] - - # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets' - bucket = connection.create_bucket(bucket_name) - key = boto.s3.key.Key(bucket) - - (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1]) - assert err - - # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds' - bucket = connection.create_bucket(bucket_name) - key = boto.s3.key.Key(bucket) - key.set_contents_from_string('twelve') - - (err, out) = rgwadmin(ctx, client, - ['user', 'rm', '--uid', user1, '--purge-data' ], - check_status=True) - - # TESTCASE 'rm-user3','user','rm','deleted user','fails' - (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1]) - assert err - - # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule' - # - - (err, out) = rgwadmin(ctx, client, ['zone', 'get']) - orig_placement_pools = len(out['placement_pools']) - - # removed this test, it is not correct to assume that zone has default placement, it really - # depends on how we set it up before - # - # assert len(out) > 0 - # assert len(out['placement_pools']) == 1 - - # default_rule = out['placement_pools'][0] - # assert default_rule['key'] == 'default-placement' - - rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}} - - out['placement_pools'].append(rule) - - (err, out) = rgwadmin(ctx, client, ['zone', 'set'], - stdin=StringIO(json.dumps(out)), - check_status=True) - - (err, out) = rgwadmin(ctx, client, ['zone', 'get']) - assert len(out) > 0 - assert len(out['placement_pools']) == orig_placement_pools + 1 diff --git a/tasks/radosgw_admin_rest.py b/tasks/radosgw_admin_rest.py deleted file mode 100644 index 7bd72d19536..00000000000 --- a/tasks/radosgw_admin_rest.py +++ /dev/null @@ -1,668 +0,0 @@ -""" -Run a series of rgw admin commands through the rest interface. - -The test cases in this file have been annotated for inventory. -To extract the inventory (in csv format) use the command: - - grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //' - -""" -from cStringIO import StringIO -import logging -import json - -import boto.exception -import boto.s3.connection -import boto.s3.acl - -import requests -import time - -from boto.connection import AWSAuthConnection -from teuthology import misc as teuthology -from util.rgw import get_user_summary, get_user_successful_ops - -log = logging.getLogger(__name__) - -def rgwadmin(ctx, client, cmd): - """ - Perform rgw admin command - - :param client: client - :param cmd: command to execute. - :return: command exit status, json result. - """ - log.info('radosgw-admin: %s' % cmd) - testdir = teuthology.get_testdir(ctx) - pre = [ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'radosgw-admin', - '--log-to-stderr', - '--format', 'json', - ] - pre.extend(cmd) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() - proc = remote.run( - args=pre, - check_status=False, - stdout=StringIO(), - stderr=StringIO(), - ) - r = proc.exitstatus - out = proc.stdout.getvalue() - j = None - if not r and out != '': - try: - j = json.loads(out) - log.info(' json result: %s' % j) - except ValueError: - j = out - log.info(' raw result: %s' % j) - return (r, j) - - -def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False): - """ - perform a rest command - """ - log.info('radosgw-admin-rest: %s %s' % (cmd, params)) - put_cmds = ['create', 'link', 'add'] - post_cmds = ['unlink', 'modify'] - delete_cmds = ['trim', 'rm', 'process'] - get_cmds = ['check', 'info', 'show', 'list'] - - bucket_sub_resources = ['object', 'policy', 'index'] - user_sub_resources = ['subuser', 'key', 'caps'] - zone_sub_resources = ['pool', 'log', 'garbage'] - - def get_cmd_method_and_handler(cmd): - """ - Get the rest command and handler from information in cmd and - from the imported requests object. - """ - if cmd[1] in put_cmds: - return 'PUT', requests.put - elif cmd[1] in delete_cmds: - return 'DELETE', requests.delete - elif cmd[1] in post_cmds: - return 'POST', requests.post - elif cmd[1] in get_cmds: - return 'GET', requests.get - - def get_resource(cmd): - """ - Get the name of the resource from information in cmd. - """ - if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources: - if cmd[0] == 'bucket': - return 'bucket', '' - else: - return 'bucket', cmd[0] - elif cmd[0] == 'user' or cmd[0] in user_sub_resources: - if cmd[0] == 'user': - return 'user', '' - else: - return 'user', cmd[0] - elif cmd[0] == 'usage': - return 'usage', '' - elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources: - if cmd[0] == 'zone': - return 'zone', '' - else: - return 'zone', cmd[0] - - def build_admin_request(conn, method, resource = '', headers=None, data='', - query_args=None, params=None): - """ - Build an administative request adapted from the build_request() - method of boto.connection - """ - - path = conn.calling_format.build_path_base('admin', resource) - auth_path = conn.calling_format.build_auth_path('admin', resource) - host = conn.calling_format.build_host(conn.server_name(), 'admin') - if query_args: - path += '?' + query_args - boto.log.debug('path=%s' % path) - auth_path += '?' + query_args - boto.log.debug('auth_path=%s' % auth_path) - return AWSAuthConnection.build_base_http_request(conn, method, path, - auth_path, params, headers, data, host) - - method, handler = get_cmd_method_and_handler(cmd) - resource, query_args = get_resource(cmd) - request = build_admin_request(connection, method, resource, - query_args=query_args, headers=headers) - - url = '{protocol}://{host}{path}'.format(protocol=request.protocol, - host=request.host, path=request.path) - - request.authorize(connection=connection) - result = handler(url, params=params, headers=request.headers) - - if raw: - log.info(' text result: %s' % result.txt) - return result.status_code, result.txt - else: - log.info(' json result: %s' % result.json()) - return result.status_code, result.json() - - -def task(ctx, config): - """ - Test radosgw-admin functionality through the RESTful interface - """ - assert config is None or isinstance(config, list) \ - or isinstance(config, dict), \ - "task s3tests only supports a list or dictionary for configuration" - all_clients = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - if config is None: - config = all_clients - if isinstance(config, list): - config = dict.fromkeys(config) - clients = config.keys() - - # just use the first client... - client = clients[0] - - ## - admin_user = 'ada' - admin_display_name = 'Ms. Admin User' - admin_access_key = 'MH1WC2XQ1S8UISFDZC8W' - admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG' - admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write' - - user1 = 'foo' - user2 = 'fud' - subuser1 = 'foo:foo1' - subuser2 = 'foo:foo2' - display_name1 = 'Foo' - display_name2 = 'Fud' - email = 'foo@foo.com' - access_key = '9te6NH5mcdcq0Tc5i8i1' - secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu' - access_key2 = 'p5YnriCv1nAtykxBrupQ' - secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh' - swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL' - swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy' - - bucket_name = 'myfoo' - - # legend (test cases can be easily grep-ed out) - # TESTCASE 'testname','object','method','operation','assertion' - # TESTCASE 'create-admin-user','user','create','administrative user','succeeds' - (err, out) = rgwadmin(ctx, client, [ - 'user', 'create', - '--uid', admin_user, - '--display-name', admin_display_name, - '--access-key', admin_access_key, - '--secret', admin_secret_key, - '--max-buckets', '0', - '--caps', admin_caps - ]) - logging.error(out) - logging.error(err) - assert not err - - (remote,) = ctx.cluster.only(client).remotes.iterkeys() - remote_host = remote.name.split('@')[1] - admin_conn = boto.s3.connection.S3Connection( - aws_access_key_id=admin_access_key, - aws_secret_access_key=admin_secret_key, - is_secure=False, - port=7280, - host=remote_host, - calling_format=boto.s3.connection.OrdinaryCallingFormat(), - ) - - # TESTCASE 'info-nosuch','user','info','non-existent user','fails' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1}) - assert ret == 404 - - # TESTCASE 'create-ok','user','create','w/all valid info','succeeds' - (ret, out) = rgwadmin_rest(admin_conn, - ['user', 'create'], - {'uid' : user1, - 'display-name' : display_name1, - 'email' : email, - 'access-key' : access_key, - 'secret-key' : secret_key, - 'max-buckets' : '4' - }) - - assert ret == 200 - - # TESTCASE 'info-existing','user','info','existing user','returns correct info' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - - assert out['user_id'] == user1 - assert out['email'] == email - assert out['display_name'] == display_name1 - assert len(out['keys']) == 1 - assert out['keys'][0]['access_key'] == access_key - assert out['keys'][0]['secret_key'] == secret_key - assert not out['suspended'] - - # TESTCASE 'suspend-ok','user','suspend','active user','succeeds' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True}) - assert ret == 200 - - # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - assert ret == 200 - assert out['suspended'] - - # TESTCASE 're-enable','user','enable','suspended user','succeeds' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'}) - assert not err - - # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - assert ret == 200 - assert not out['suspended'] - - # TESTCASE 'add-keys','key','create','w/valid info','succeeds' - (ret, out) = rgwadmin_rest(admin_conn, - ['key', 'create'], - {'uid' : user1, - 'access-key' : access_key2, - 'secret-key' : secret_key2 - }) - - - assert ret == 200 - - # TESTCASE 'info-new-key','user','info','after key addition','returns all keys' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - assert ret == 200 - assert len(out['keys']) == 2 - assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2 - assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2 - - # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed' - (ret, out) = rgwadmin_rest(admin_conn, - ['key', 'rm'], - {'uid' : user1, - 'access-key' : access_key2 - }) - - assert ret == 200 - - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - - assert len(out['keys']) == 1 - assert out['keys'][0]['access_key'] == access_key - assert out['keys'][0]['secret_key'] == secret_key - - # TESTCASE 'add-swift-key','key','create','swift key','succeeds' - (ret, out) = rgwadmin_rest(admin_conn, - ['subuser', 'create'], - {'subuser' : subuser1, - 'secret-key' : swift_secret1, - 'key-type' : 'swift' - }) - - assert ret == 200 - - # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - assert ret == 200 - assert len(out['swift_keys']) == 1 - assert out['swift_keys'][0]['user'] == subuser1 - assert out['swift_keys'][0]['secret_key'] == swift_secret1 - - # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds' - (ret, out) = rgwadmin_rest(admin_conn, - ['subuser', 'create'], - {'subuser' : subuser2, - 'secret-key' : swift_secret2, - 'key-type' : 'swift' - }) - - assert ret == 200 - - # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - assert ret == 200 - assert len(out['swift_keys']) == 2 - assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2 - assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2 - - # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed' - (ret, out) = rgwadmin_rest(admin_conn, - ['key', 'rm'], - {'subuser' : subuser1, - 'key-type' :'swift' - }) - - assert ret == 200 - - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - assert len(out['swift_keys']) == 1 - - # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed' - (ret, out) = rgwadmin_rest(admin_conn, - ['subuser', 'rm'], - {'subuser' : subuser1 - }) - - assert ret == 200 - - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - assert len(out['subusers']) == 1 - - # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed' - (ret, out) = rgwadmin_rest(admin_conn, - ['subuser', 'rm'], - {'subuser' : subuser2, - 'key-type' : 'swift', - '{purge-keys' :True - }) - - assert ret == 200 - - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - assert len(out['swift_keys']) == 0 - assert len(out['subusers']) == 0 - - # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list' - (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1}) - assert ret == 200 - assert len(out) == 0 - - # connect to rgw - connection = boto.s3.connection.S3Connection( - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - is_secure=False, - port=7280, - host=remote_host, - calling_format=boto.s3.connection.OrdinaryCallingFormat(), - ) - - # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list' - (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True}) - assert ret == 200 - assert len(out) == 0 - - # create a first bucket - bucket = connection.create_bucket(bucket_name) - - # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list' - (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1}) - assert ret == 200 - assert len(out) == 1 - assert out[0] == bucket_name - - # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list' - (ret, out) = rgwadmin_rest(admin_conn, - ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True}) - - assert ret == 200 - assert out['owner'] == user1 - bucket_id = out['id'] - - # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID' - (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True}) - assert ret == 200 - assert len(out) == 1 - assert out[0]['id'] == bucket_id # does it return the same ID twice in a row? - - # use some space - key = boto.s3.key.Key(bucket) - key.set_contents_from_string('one') - - # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object' - (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True}) - assert ret == 200 - assert out['id'] == bucket_id - assert out['usage']['rgw.main']['num_objects'] == 1 - assert out['usage']['rgw.main']['size_kb'] > 0 - - # reclaim it - key.delete() - - # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error' - (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name}) - - assert ret == 200 - - # create a second user to link the bucket to - (ret, out) = rgwadmin_rest(admin_conn, - ['user', 'create'], - {'uid' : user2, - 'display-name' : display_name2, - 'access-key' : access_key2, - 'secret-key' : secret_key2, - 'max-buckets' : '1', - }) - - assert ret == 200 - - # try creating an object with the first user before the bucket is relinked - denied = False - key = boto.s3.key.Key(bucket) - - try: - key.set_contents_from_string('two') - except boto.exception.S3ResponseError: - denied = True - - assert not denied - - # delete the object - key.delete() - - # link the bucket to another user - (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user2, 'bucket' : bucket_name}) - - assert ret == 200 - - # try creating an object with the first user which should cause an error - key = boto.s3.key.Key(bucket) - - try: - key.set_contents_from_string('three') - except boto.exception.S3ResponseError: - denied = True - - assert denied - - # relink the bucket to the first user and delete the second user - (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user1, 'bucket' : bucket_name}) - assert ret == 200 - - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2}) - assert ret == 200 - - # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed' - - # upload an object - object_name = 'four' - key = boto.s3.key.Key(bucket, object_name) - key.set_contents_from_string(object_name) - - # now delete it - (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name}) - assert ret == 200 - - # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects' - (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True}) - assert ret == 200 - assert out['id'] == bucket_id - assert out['usage']['rgw.main']['num_objects'] == 0 - - # create a bucket for deletion stats - useless_bucket = connection.create_bucket('useless_bucket') - useless_key = useless_bucket.new_key('useless_key') - useless_key.set_contents_from_string('useless string') - - # delete it - useless_key.delete() - useless_bucket.delete() - - # wait for the statistics to flush - time.sleep(60) - - # need to wait for all usage data to get flushed, should take up to 30 seconds - timestamp = time.time() - while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes - (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'}) # last operation we did is delete obj, wait for it to flush - - if get_user_successful_ops(out, user1) > 0: - break - time.sleep(1) - - assert time.time() - timestamp <= (20 * 60) - - # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds' - (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show']) - assert ret == 200 - assert len(out['entries']) > 0 - assert len(out['summary']) > 0 - user_summary = get_user_summary(out, user1) - total = user_summary['total'] - assert total['successful_ops'] > 0 - - # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds' - (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1}) - assert ret == 200 - assert len(out['entries']) > 0 - assert len(out['summary']) > 0 - user_summary = out['summary'][0] - for entry in user_summary['categories']: - assert entry['successful_ops'] > 0 - assert user_summary['user'] == user1 - - # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds' - test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket'] - for cat in test_categories: - (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat}) - assert ret == 200 - assert len(out['summary']) > 0 - user_summary = out['summary'][0] - assert user_summary['user'] == user1 - assert len(user_summary['categories']) == 1 - entry = user_summary['categories'][0] - assert entry['category'] == cat - assert entry['successful_ops'] > 0 - - # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed' - (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1}) - assert ret == 200 - (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1}) - assert ret == 200 - assert len(out['entries']) == 0 - assert len(out['summary']) == 0 - - # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True}) - assert ret == 200 - - # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects' - try: - key = boto.s3.key.Key(bucket) - key.set_contents_from_string('five') - except boto.exception.S3ResponseError as e: - assert e.status == 403 - - # TESTCASE 'user-renable2','user','enable','suspended user','succeeds' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'}) - assert ret == 200 - - # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects' - key = boto.s3.key.Key(bucket) - key.set_contents_from_string('six') - - # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection' - - # create an object large enough to be split into multiple parts - test_string = 'foo'*10000000 - - big_key = boto.s3.key.Key(bucket) - big_key.set_contents_from_string(test_string) - - # now delete the head - big_key.delete() - - # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1}) - assert ret == 409 - - # delete should fail because ``key`` still exists - try: - bucket.delete() - except boto.exception.S3ResponseError as e: - assert e.status == 409 - - key.delete() - bucket.delete() - - # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy' - bucket = connection.create_bucket(bucket_name) - - # create an object - key = boto.s3.key.Key(bucket) - key.set_contents_from_string('seven') - - # should be private already but guarantee it - key.set_acl('private') - - (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key}) - assert ret == 200 - - acl = key.get_xml_acl() - assert acl == out.strip('\n') - - # add another grantee by making the object public read - key.set_acl('public-read') - - (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key}) - assert ret == 200 - - acl = key.get_xml_acl() - assert acl == out.strip('\n') - - # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds' - bucket = connection.create_bucket(bucket_name) - key_name = ['eight', 'nine', 'ten', 'eleven'] - for i in range(4): - key = boto.s3.key.Key(bucket) - key.set_contents_from_string(key_name[i]) - - (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True}) - assert ret == 200 - - # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds' - caps = 'usage=read' - (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' : user1, 'user-caps' : caps}) - assert ret == 200 - assert out[0]['perm'] == 'read' - - # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds' - (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' : user1, 'user-caps' : caps}) - assert ret == 200 - assert not out - - # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets' - bucket = connection.create_bucket(bucket_name) - key = boto.s3.key.Key(bucket) - - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1}) - assert ret == 409 - - # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds' - bucket = connection.create_bucket(bucket_name) - key = boto.s3.key.Key(bucket) - key.set_contents_from_string('twelve') - - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True}) - assert ret == 200 - - # TESTCASE 'rm-user3','user','info','deleted user','fails' - (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) - assert ret == 404 - diff --git a/tasks/radosgw_agent.py b/tasks/radosgw_agent.py deleted file mode 100644 index 0254805d2af..00000000000 --- a/tasks/radosgw_agent.py +++ /dev/null @@ -1,211 +0,0 @@ -""" -Run rados gateway agent in test mode -""" -import contextlib -import logging -import argparse - -from teuthology.orchestra import run -from teuthology import misc as teuthology -import util.rgw as rgw_utils - -log = logging.getLogger(__name__) - -def run_radosgw_agent(ctx, config): - """ - Run a single radosgw-agent. See task() for config format. - """ - return_list = list() - for (client, cconf) in config.items(): - # don't process entries that are not clients - if not client.startswith('client.'): - log.debug('key {data} does not start with \'client.\', moving on'.format( - data=client)) - continue - - src_client = cconf['src'] - dest_client = cconf['dest'] - - src_zone = rgw_utils.zone_for_client(ctx, src_client) - dest_zone = rgw_utils.zone_for_client(ctx, dest_client) - - log.info("source is %s", src_zone) - log.info("dest is %s", dest_zone) - - testdir = teuthology.get_testdir(ctx) - (remote,) = ctx.cluster.only(client).remotes.keys() - # figure out which branch to pull from - branch = cconf.get('force-branch', None) - if not branch: - branch = cconf.get('branch', 'master') - sha1 = cconf.get('sha1') - remote.run( - args=[ - 'cd', testdir, run.Raw('&&'), - 'git', 'clone', - '-b', branch, -# 'https://github.com/ceph/radosgw-agent.git', - 'git://git.ceph.com/radosgw-agent.git', - 'radosgw-agent.{client}'.format(client=client), - ] - ) - if sha1 is not None: - remote.run( - args=[ - 'cd', testdir, run.Raw('&&'), - run.Raw('&&'), - 'git', 'reset', '--hard', sha1, - ] - ) - remote.run( - args=[ - 'cd', testdir, run.Raw('&&'), - 'cd', 'radosgw-agent.{client}'.format(client=client), - run.Raw('&&'), - './bootstrap', - ] - ) - - src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client, - src_zone) - dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client, - dest_zone) - src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client, - src_zone) - dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client, - dest_zone) - sync_scope = cconf.get('sync-scope', None) - port = cconf.get('port', 8000) - daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port) - in_args=[ - 'daemon-helper', - 'kill', - '{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir, - client=client), - '-v', - '--src-access-key', src_access, - '--src-secret-key', src_secret, - '--source', "http://{addr}:{port}".format(addr=src_host, port=src_port), - '--dest-access-key', dest_access, - '--dest-secret-key', dest_secret, - '--max-entries', str(cconf.get('max-entries', 1000)), - '--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format( - tdir=testdir, - client=client), - '--object-sync-timeout', '30', - ] - - if cconf.get('metadata-only', False): - in_args.append('--metadata-only') - - # the test server and full/incremental flags are mutually exclusive - if sync_scope is None: - in_args.append('--test-server-host') - in_args.append('0.0.0.0') - in_args.append('--test-server-port') - in_args.append(str(port)) - log.debug('Starting a sync test server on {client}'.format(client=client)) - # Stash the radosgw-agent server / port # for use by subsequent tasks - ctx.radosgw_agent.endpoint = (client, str(port)) - else: - in_args.append('--sync-scope') - in_args.append(sync_scope) - log.debug('Starting a {scope} sync on {client}'.format(scope=sync_scope,client=client)) - - # positional arg for destination must come last - in_args.append("http://{addr}:{port}".format(addr=dest_host, - port=dest_port)) - - return_list.append((client, remote.run( - args=in_args, - wait=False, - stdin=run.PIPE, - logger=log.getChild(daemon_name), - ))) - return return_list - - -@contextlib.contextmanager -def task(ctx, config): - """ - Run radosgw-agents in test mode. - - Configuration is clients to run the agents on, with settings for - source client, destination client, and port to listen on. Binds - to 0.0.0.0. Port defaults to 8000. This must be run on clients - that have the correct zone root pools and rgw zone set in - ceph.conf, or the task cannot read the region information from the - cluster. - - By default, this task will start an HTTP server that will trigger full - or incremental syncs based on requests made to it. - Alternatively, a single full sync can be triggered by - specifying 'sync-scope: full' or a loop of incremental syncs can be triggered - by specifying 'sync-scope: incremental' (the loop will sleep - '--incremental-sync-delay' seconds between each sync, default is 30 seconds). - - By default, both data and metadata are synced. To only sync - metadata, for example because you want to sync between regions, - set metadata-only: true. - - An example:: - - tasks: - - ceph: - conf: - client.0: - rgw zone = foo - rgw zone root pool = .root.pool - client.1: - rgw zone = bar - rgw zone root pool = .root.pool2 - - rgw: # region configuration omitted for brevity - - radosgw-agent: - client.0: - branch: wip-next-feature-branch - src: client.0 - dest: client.1 - sync-scope: full - metadata-only: true - # port: 8000 (default) - client.1: - src: client.1 - dest: client.0 - port: 8001 - """ - assert isinstance(config, dict), 'rgw_sync_agent requires a dictionary config' - log.debug("config is %s", config) - - overrides = ctx.config.get('overrides', {}) - # merge each client section, but only if it exists in config since there isn't - # a sensible default action for this task - for client in config.iterkeys(): - if config[client]: - log.debug('config[{client}]: {data}'.format(client=client, data=config[client])) - teuthology.deep_merge(config[client], overrides.get('radosgw-agent', {})) - - ctx.radosgw_agent = argparse.Namespace() - ctx.radosgw_agent.config = config - - procs = run_radosgw_agent(ctx, config) - - ctx.radosgw_agent.procs = procs - - try: - yield - finally: - testdir = teuthology.get_testdir(ctx) - try: - for client, proc in procs: - log.info("shutting down sync agent on %s", client) - proc.stdin.close() - proc.wait() - finally: - for client, proc in procs: - ctx.cluster.only(client).run( - args=[ - 'rm', '-rf', - '{tdir}/radosgw-agent.{client}'.format(tdir=testdir, - client=client) - ] - ) diff --git a/tasks/rbd.py b/tasks/rbd.py deleted file mode 100644 index 92db23278ab..00000000000 --- a/tasks/rbd.py +++ /dev/null @@ -1,506 +0,0 @@ -""" -Rbd testing task -""" -import contextlib -import logging -import os - -from cStringIO import StringIO -from teuthology.orchestra import run -from teuthology import misc as teuthology -from teuthology import contextutil -from teuthology.parallel import parallel -from teuthology.task.common_fs_utils import generic_mkfs -from teuthology.task.common_fs_utils import generic_mount -from teuthology.task.common_fs_utils import default_image_name - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def create_image(ctx, config): - """ - Create an rbd image. - - For example:: - - tasks: - - ceph: - - rbd.create_image: - client.0: - image_name: testimage - image_size: 100 - image_format: 1 - client.1: - - Image size is expressed as a number of megabytes; default value - is 10240. - - Image format value must be either 1 or 2; default value is 1. - - """ - assert isinstance(config, dict) or isinstance(config, list), \ - "task create_image only supports a list or dictionary for configuration" - - if isinstance(config, dict): - images = config.items() - else: - images = [(role, None) for role in config] - - testdir = teuthology.get_testdir(ctx) - for role, properties in images: - if properties is None: - properties = {} - name = properties.get('image_name', default_image_name(role)) - size = properties.get('image_size', 10240) - fmt = properties.get('image_format', 1) - (remote,) = ctx.cluster.only(role).remotes.keys() - log.info('Creating image {name} with size {size}'.format(name=name, - size=size)) - args = [ - 'adjust-ulimits', - 'ceph-coverage'.format(tdir=testdir), - '{tdir}/archive/coverage'.format(tdir=testdir), - 'rbd', - '-p', 'rbd', - 'create', - '--size', str(size), - name, - ] - # omit format option if using the default (format 1) - # since old versions of don't support it - if int(fmt) != 1: - args += ['--format', str(fmt)] - remote.run(args=args) - try: - yield - finally: - log.info('Deleting rbd images...') - for role, properties in images: - if properties is None: - properties = {} - name = properties.get('image_name', default_image_name(role)) - (remote,) = ctx.cluster.only(role).remotes.keys() - remote.run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'rbd', - '-p', 'rbd', - 'rm', - name, - ], - ) - -@contextlib.contextmanager -def modprobe(ctx, config): - """ - Load the rbd kernel module.. - - For example:: - - tasks: - - ceph: - - rbd.create_image: [client.0] - - rbd.modprobe: [client.0] - """ - log.info('Loading rbd kernel module...') - for role in config: - (remote,) = ctx.cluster.only(role).remotes.keys() - remote.run( - args=[ - 'sudo', - 'modprobe', - 'rbd', - ], - ) - try: - yield - finally: - log.info('Unloading rbd kernel module...') - for role in config: - (remote,) = ctx.cluster.only(role).remotes.keys() - remote.run( - args=[ - 'sudo', - 'modprobe', - '-r', - 'rbd', - # force errors to be ignored; necessary if more - # than one device was created, which may mean - # the module isn't quite ready to go the first - # time through. - run.Raw('||'), - 'true', - ], - ) - -@contextlib.contextmanager -def dev_create(ctx, config): - """ - Map block devices to rbd images. - - For example:: - - tasks: - - ceph: - - rbd.create_image: [client.0] - - rbd.modprobe: [client.0] - - rbd.dev_create: - client.0: testimage.client.0 - """ - assert isinstance(config, dict) or isinstance(config, list), \ - "task dev_create only supports a list or dictionary for configuration" - - if isinstance(config, dict): - role_images = config.items() - else: - role_images = [(role, None) for role in config] - - log.info('Creating rbd block devices...') - - testdir = teuthology.get_testdir(ctx) - - for role, image in role_images: - if image is None: - image = default_image_name(role) - (remote,) = ctx.cluster.only(role).remotes.keys() - - remote.run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'rbd', - '--user', role.rsplit('.')[-1], - '-p', 'rbd', - 'map', - image, - run.Raw('&&'), - # wait for the symlink to be created by udev - 'while', 'test', '!', '-e', '/dev/rbd/rbd/{image}'.format(image=image), run.Raw(';'), 'do', - 'sleep', '1', run.Raw(';'), - 'done', - ], - ) - try: - yield - finally: - log.info('Unmapping rbd devices...') - for role, image in role_images: - if image is None: - image = default_image_name(role) - (remote,) = ctx.cluster.only(role).remotes.keys() - remote.run( - args=[ - 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'rbd', - '-p', 'rbd', - 'unmap', - '/dev/rbd/rbd/{imgname}'.format(imgname=image), - run.Raw('&&'), - # wait for the symlink to be deleted by udev - 'while', 'test', '-e', '/dev/rbd/rbd/{image}'.format(image=image), - run.Raw(';'), - 'do', - 'sleep', '1', run.Raw(';'), - 'done', - ], - ) - - -def rbd_devname_rtn(ctx, image): - return '/dev/rbd/rbd/{image}'.format(image=image) - -def canonical_path(ctx, role, path): - """ - Determine the canonical path for a given path on the host - representing the given role. A canonical path contains no - . or .. components, and includes no symbolic links. - """ - version_fp = StringIO() - ctx.cluster.only(role).run( - args=[ 'readlink', '-f', path ], - stdout=version_fp, - ) - canonical_path = version_fp.getvalue().rstrip('\n') - version_fp.close() - return canonical_path - -@contextlib.contextmanager -def run_xfstests(ctx, config): - """ - Run xfstests over specified devices. - - Warning: both the test and scratch devices specified will be - overwritten. Normally xfstests modifies (but does not destroy) - the test device, but for now the run script used here re-makes - both filesystems. - - Note: Only one instance of xfstests can run on a single host at - a time, although this is not enforced. - - This task in its current form needs some improvement. For - example, it assumes all roles provided in the config are - clients, and that the config provided is a list of key/value - pairs. For now please use the xfstests() interface, below. - - For example:: - - tasks: - - ceph: - - rbd.run_xfstests: - client.0: - count: 2 - test_dev: 'test_dev' - scratch_dev: 'scratch_dev' - fs_type: 'xfs' - tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48' - """ - with parallel() as p: - for role, properties in config.items(): - p.spawn(run_xfstests_one_client, ctx, role, properties) - yield - -def run_xfstests_one_client(ctx, role, properties): - """ - Spawned routine to handle xfs tests for a single client - """ - testdir = teuthology.get_testdir(ctx) - try: - count = properties.get('count') - test_dev = properties.get('test_dev') - assert test_dev is not None, \ - "task run_xfstests requires test_dev to be defined" - test_dev = canonical_path(ctx, role, test_dev) - - scratch_dev = properties.get('scratch_dev') - assert scratch_dev is not None, \ - "task run_xfstests requires scratch_dev to be defined" - scratch_dev = canonical_path(ctx, role, scratch_dev) - - fs_type = properties.get('fs_type') - tests = properties.get('tests') - - (remote,) = ctx.cluster.only(role).remotes.keys() - - # Fetch the test script - test_root = teuthology.get_testdir(ctx) - test_script = 'run_xfstests.sh' - test_path = os.path.join(test_root, test_script) - - git_branch = 'master' - test_url = 'https://raw.github.com/ceph/ceph/{branch}/qa/{script}'.format(branch=git_branch, script=test_script) - # test_url = 'http://ceph.newdream.net/git/?p=ceph.git;a=blob_plain;hb=refs/heads/{branch};f=qa/{script}'.format(branch=git_branch, script=test_script) - - log.info('Fetching {script} for {role} from {url}'.format(script=test_script, - role=role, - url=test_url)) - args = [ 'wget', '-O', test_path, '--', test_url ] - remote.run(args=args) - - log.info('Running xfstests on {role}:'.format(role=role)) - log.info(' iteration count: {count}:'.format(count=count)) - log.info(' test device: {dev}'.format(dev=test_dev)) - log.info(' scratch device: {dev}'.format(dev=scratch_dev)) - log.info(' using fs_type: {fs_type}'.format(fs_type=fs_type)) - log.info(' tests to run: {tests}'.format(tests=tests)) - - # Note that the device paths are interpreted using - # readlink -f in order to get their canonical - # pathname (so it matches what the kernel remembers). - args = [ - '/usr/bin/sudo', - 'TESTDIR={tdir}'.format(tdir=testdir), - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - '/bin/bash', - test_path, - '-c', str(count), - '-f', fs_type, - '-t', test_dev, - '-s', scratch_dev, - ] - if tests: - args.append(tests) - remote.run(args=args, logger=log.getChild(role)) - finally: - log.info('Removing {script} on {role}'.format(script=test_script, - role=role)) - remote.run(args=['rm', '-f', test_path]) - -@contextlib.contextmanager -def xfstests(ctx, config): - """ - Run xfstests over rbd devices. This interface sets up all - required configuration automatically if not otherwise specified. - Note that only one instance of xfstests can run on a single host - at a time. By default, the set of tests specified is run once. - If a (non-zero) count value is supplied, the complete set of - tests will be run that number of times. - - For example:: - - tasks: - - ceph: - # Image sizes are in MB - - rbd.xfstests: - client.0: - count: 3 - test_image: 'test_image' - test_size: 250 - test_format: 2 - scratch_image: 'scratch_image' - scratch_size: 250 - scratch_format: 1 - fs_type: 'xfs' - tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48' - """ - if config is None: - config = { 'all': None } - assert isinstance(config, dict) or isinstance(config, list), \ - "task xfstests only supports a list or dictionary for configuration" - if isinstance(config, dict): - config = teuthology.replace_all_with_clients(ctx.cluster, config) - runs = config.items() - else: - runs = [(role, None) for role in config] - - running_xfstests = {} - for role, properties in runs: - assert role.startswith('client.'), \ - "task xfstests can only run on client nodes" - for host, roles_for_host in ctx.cluster.remotes.items(): - if role in roles_for_host: - assert host not in running_xfstests, \ - "task xfstests allows only one instance at a time per host" - running_xfstests[host] = True - - images_config = {} - scratch_config = {} - modprobe_config = {} - image_map_config = {} - scratch_map_config = {} - xfstests_config = {} - for role, properties in runs: - if properties is None: - properties = {} - - test_image = properties.get('test_image', 'test_image.{role}'.format(role=role)) - test_size = properties.get('test_size', 2000) # 2G - test_fmt = properties.get('test_format', 1) - scratch_image = properties.get('scratch_image', 'scratch_image.{role}'.format(role=role)) - scratch_size = properties.get('scratch_size', 10000) # 10G - scratch_fmt = properties.get('scratch_format', 1) - - images_config[role] = dict( - image_name=test_image, - image_size=test_size, - image_format=test_fmt, - ) - - scratch_config[role] = dict( - image_name=scratch_image, - image_size=scratch_size, - image_format=scratch_fmt, - ) - - xfstests_config[role] = dict( - count=properties.get('count', 1), - test_dev='/dev/rbd/rbd/{image}'.format(image=test_image), - scratch_dev='/dev/rbd/rbd/{image}'.format(image=scratch_image), - fs_type=properties.get('fs_type', 'xfs'), - tests=properties.get('tests'), - ) - - log.info('Setting up xfstests using RBD images:') - log.info(' test ({size} MB): {image}'.format(size=test_size, - image=test_image)) - log.info(' scratch ({size} MB): {image}'.format(size=scratch_size, - image=scratch_image)) - modprobe_config[role] = None - image_map_config[role] = test_image - scratch_map_config[role] = scratch_image - - with contextutil.nested( - lambda: create_image(ctx=ctx, config=images_config), - lambda: create_image(ctx=ctx, config=scratch_config), - lambda: modprobe(ctx=ctx, config=modprobe_config), - lambda: dev_create(ctx=ctx, config=image_map_config), - lambda: dev_create(ctx=ctx, config=scratch_map_config), - lambda: run_xfstests(ctx=ctx, config=xfstests_config), - ): - yield - - -@contextlib.contextmanager -def task(ctx, config): - """ - Create and mount an rbd image. - - For example, you can specify which clients to run on:: - - tasks: - - ceph: - - rbd: [client.0, client.1] - - There are a few image options:: - - tasks: - - ceph: - - rbd: - client.0: # uses defaults - client.1: - image_name: foo - image_size: 2048 - image_format: 2 - fs_type: xfs - - To use default options on all clients:: - - tasks: - - ceph: - - rbd: - all: - - To create 20GiB images and format them with xfs on all clients:: - - tasks: - - ceph: - - rbd: - all: - image_size: 20480 - fs_type: xfs - """ - if config is None: - config = { 'all': None } - norm_config = config - if isinstance(config, dict): - norm_config = teuthology.replace_all_with_clients(ctx.cluster, config) - if isinstance(norm_config, dict): - role_images = {} - for role, properties in norm_config.iteritems(): - if properties is None: - properties = {} - role_images[role] = properties.get('image_name') - else: - role_images = norm_config - - log.debug('rbd config is: %s', norm_config) - - with contextutil.nested( - lambda: create_image(ctx=ctx, config=norm_config), - lambda: modprobe(ctx=ctx, config=norm_config), - lambda: dev_create(ctx=ctx, config=role_images), - lambda: generic_mkfs(ctx=ctx, config=norm_config, - devname_rtn=rbd_devname_rtn), - lambda: generic_mount(ctx=ctx, config=role_images, - devname_rtn=rbd_devname_rtn), - ): - yield diff --git a/tasks/rbd_fsx.py b/tasks/rbd_fsx.py deleted file mode 100644 index 6d55b5cf457..00000000000 --- a/tasks/rbd_fsx.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Run fsx on an rbd image -""" -import contextlib -import logging - -from teuthology.parallel import parallel -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Run fsx on an rbd image. - - Currently this requires running as client.admin - to create a pool. - - Specify which clients to run on as a list:: - - tasks: - ceph: - rbd_fsx: - clients: [client.0, client.1] - - You can optionally change some properties of fsx: - - tasks: - ceph: - rbd_fsx: - clients: - seed: - ops: - size: - """ - log.info('starting rbd_fsx...') - with parallel() as p: - for role in config['clients']: - p.spawn(_run_one_client, ctx, config, role) - yield - -def _run_one_client(ctx, config, role): - """Spawned task that runs the client""" - testdir = teuthology.get_testdir(ctx) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - remote.run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph_test_librbd_fsx', - '-d', - '-W', '-R', # mmap doesn't work with rbd - '-p', str(config.get('progress_interval', 100)), # show progress - '-P', '{tdir}/archive'.format(tdir=testdir), - '-t', str(config.get('truncbdy',1)), - '-l', str(config.get('size', 250000000)), - '-S', str(config.get('seed', 0)), - '-N', str(config.get('ops', 1000)), - 'pool_{pool}'.format(pool=role), - 'image_{image}'.format(image=role), - ], - ) diff --git a/tasks/recovery_bench.py b/tasks/recovery_bench.py deleted file mode 100644 index 1984b97d31e..00000000000 --- a/tasks/recovery_bench.py +++ /dev/null @@ -1,208 +0,0 @@ -""" -Recovery system benchmarking -""" -from cStringIO import StringIO - -import contextlib -import gevent -import json -import logging -import random -import time - -import ceph_manager -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Benchmark the recovery system. - - Generates objects with smalliobench, runs it normally to get a - baseline performance measurement, then marks an OSD out and reruns - to measure performance during recovery. - - The config should be as follows: - - recovery_bench: - duration: - num_objects: - io_size: - - example: - - tasks: - - ceph: - - recovery_bench: - duration: 60 - num_objects: 500 - io_size: 4096 - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'recovery_bench task only accepts a dict for configuration' - - log.info('Beginning recovery bench...') - - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') - while len(manager.get_osd_status()['up']) < num_osds: - manager.sleep(10) - - bench_proc = RecoveryBencher( - manager, - config, - ) - try: - yield - finally: - log.info('joining recovery bencher') - bench_proc.do_join() - -class RecoveryBencher: - """ - RecoveryBencher - """ - def __init__(self, manager, config): - self.ceph_manager = manager - self.ceph_manager.wait_for_clean() - - osd_status = self.ceph_manager.get_osd_status() - self.osds = osd_status['up'] - - self.config = config - if self.config is None: - self.config = dict() - - else: - def tmp(x): - """ - Local wrapper to print value. - """ - print x - self.log = tmp - - log.info("spawning thread") - - self.thread = gevent.spawn(self.do_bench) - - def do_join(self): - """ - Join the recovery bencher. This is called after the main - task exits. - """ - self.thread.get() - - def do_bench(self): - """ - Do the benchmarking. - """ - duration = self.config.get("duration", 60) - num_objects = self.config.get("num_objects", 500) - io_size = self.config.get("io_size", 4096) - - osd = str(random.choice(self.osds)) - (osd_remote,) = self.ceph_manager.ctx.cluster.only('osd.%s' % osd).remotes.iterkeys() - - testdir = teuthology.get_testdir(self.ceph_manager.ctx) - - # create the objects - osd_remote.run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'smalliobench'.format(tdir=testdir), - '--use-prefix', 'recovery_bench', - '--init-only', '1', - '--num-objects', str(num_objects), - '--io-size', str(io_size), - ], - wait=True, - ) - - # baseline bench - log.info('non-recovery (baseline)') - p = osd_remote.run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'smalliobench', - '--use-prefix', 'recovery_bench', - '--do-not-init', '1', - '--duration', str(duration), - '--io-size', str(io_size), - ], - stdout=StringIO(), - stderr=StringIO(), - wait=True, - ) - self.process_samples(p.stderr.getvalue()) - - self.ceph_manager.raw_cluster_cmd('osd', 'out', osd) - time.sleep(5) - - # recovery bench - log.info('recovery active') - p = osd_remote.run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'smalliobench', - '--use-prefix', 'recovery_bench', - '--do-not-init', '1', - '--duration', str(duration), - '--io-size', str(io_size), - ], - stdout=StringIO(), - stderr=StringIO(), - wait=True, - ) - self.process_samples(p.stderr.getvalue()) - - self.ceph_manager.raw_cluster_cmd('osd', 'in', osd) - - def process_samples(self, input): - """ - Extract samples from the input and process the results - - :param input: input lines in JSON format - """ - lat = {} - for line in input.split('\n'): - try: - sample = json.loads(line) - samples = lat.setdefault(sample['type'], []) - samples.append(float(sample['latency'])) - except Exception: - pass - - for type in lat: - samples = lat[type] - samples.sort() - - num = len(samples) - - # median - if num & 1 == 1: # odd number of samples - median = samples[num / 2] - else: - median = (samples[num / 2] + samples[num / 2 - 1]) / 2 - - # 99% - ninety_nine = samples[int(num * 0.99)] - - log.info("%s: median %f, 99%% %f" % (type, median, ninety_nine)) diff --git a/tasks/rep_lost_unfound_delete.py b/tasks/rep_lost_unfound_delete.py deleted file mode 100644 index ae5a48d898a..00000000000 --- a/tasks/rep_lost_unfound_delete.py +++ /dev/null @@ -1,153 +0,0 @@ -""" -Lost_unfound -""" -import logging -import ceph_manager -from teuthology import misc as teuthology -from util.rados import rados - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test handling of lost objects. - - A pretty rigid cluseter is brought up andtested by this task - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'lost_unfound task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - while len(manager.get_osd_status()['up']) < 3: - manager.sleep(10) - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_clean() - - # something that is always there - dummyfile = '/etc/fstab' - - # take an osd out until the very end - manager.kill_osd(2) - manager.mark_down_osd(2) - manager.mark_out_osd(2) - - # kludge to make sure they get a map - rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile]) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.wait_for_recovery() - - # create old objects - for f in range(1, 10): - rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', 'data', 'rm', 'existed_%d' % f]) - - # delay recovery, and make the pg log very long (to prevent backfill) - manager.raw_cluster_cmd( - 'tell', 'osd.1', - 'injectargs', - '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000' - ) - - manager.kill_osd(0) - manager.mark_down_osd(0) - - for f in range(1, 10): - rados(ctx, mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) - rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) - - # bring osd.0 back up, let it peer, but don't replicate the new - # objects... - log.info('osd.0 command_args is %s' % 'foo') - log.info(ctx.daemons.get_daemon('osd', 0).command_args) - ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([ - '--osd-recovery-delay-start', '1000' - ]) - manager.revive_osd(0) - manager.mark_in_osd(0) - manager.wait_till_osd_is_up(0) - - manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.wait_till_active() - - # take out osd.1 and the only copy of those objects. - manager.kill_osd(1) - manager.mark_down_osd(1) - manager.mark_out_osd(1) - manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it') - - # bring up osd.2 so that things would otherwise, in theory, recovery fully - manager.revive_osd(2) - manager.mark_in_osd(2) - manager.wait_till_osd_is_up(2) - - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_till_active() - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - - # verify that there are unfound objects - unfound = manager.get_num_unfound_objects() - log.info("there are %d unfound objects" % unfound) - assert unfound - - # mark stuff lost - pgs = manager.get_pg_stats() - for pg in pgs: - if pg['stat_sum']['num_objects_unfound'] > 0: - primary = 'osd.%d' % pg['acting'][0] - - # verify that i can list them direct from the osd - log.info('listing missing/lost in %s state %s', pg['pgid'], - pg['state']); - m = manager.list_pg_missing(pg['pgid']) - #log.info('%s' % m) - assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound'] - num_unfound=0 - for o in m['objects']: - if len(o['locations']) == 0: - num_unfound += 1 - assert m['num_unfound'] == num_unfound - - log.info("reverting unfound in %s on %s", pg['pgid'], primary) - manager.raw_cluster_cmd('pg', pg['pgid'], - 'mark_unfound_lost', 'delete') - else: - log.info("no unfound in %s", pg['pgid']) - - manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5') - manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5') - manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') - manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats') - manager.wait_for_recovery() - - # verify result - for f in range(1, 10): - err = rados(ctx, mon, ['-p', 'data', 'get', 'new_%d' % f, '-']) - assert err - err = rados(ctx, mon, ['-p', 'data', 'get', 'existed_%d' % f, '-']) - assert err - err = rados(ctx, mon, ['-p', 'data', 'get', 'existing_%d' % f, '-']) - assert err - - # see if osd.1 can cope - manager.revive_osd(1) - manager.mark_in_osd(1) - manager.wait_till_osd_is_up(1) - manager.wait_for_clean() diff --git a/tasks/repair_test.py b/tasks/repair_test.py deleted file mode 100644 index 1dd8f2fdefa..00000000000 --- a/tasks/repair_test.py +++ /dev/null @@ -1,213 +0,0 @@ -import logging -import time - -import ceph_manager -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -def setup(ctx, config): - ctx.manager.wait_for_clean() - ctx.manager.create_pool("repair_test_pool", 1) - return "repair_test_pool" - -def teardown(ctx, config, pool): - ctx.manager.remove_pool(pool) - ctx.manager.wait_for_clean() - -def run_test(ctx, config, test): - s = setup(ctx, config) - test(ctx, config, s) - teardown(ctx, config, s) - -def choose_primary(ctx): - def ret(pool, num): - log.info("Choosing primary") - return ctx.manager.get_pg_primary(pool, num) - return ret - -def choose_replica(ctx): - def ret(pool, num): - log.info("Choosing replica") - return ctx.manager.get_pg_replica(pool, num) - return ret - -def trunc(ctx): - def ret(osd, pool, obj): - log.info("truncating object") - return ctx.manager.osd_admin_socket( - osd, - ['truncobj', pool, obj, '1']) - return ret - -def dataerr(ctx): - def ret(osd, pool, obj): - log.info("injecting data err on object") - return ctx.manager.osd_admin_socket( - osd, - ['injectdataerr', pool, obj]) - return ret - -def mdataerr(ctx): - def ret(osd, pool, obj): - log.info("injecting mdata err on object") - return ctx.manager.osd_admin_socket( - osd, - ['injectmdataerr', pool, obj]) - return ret - -def omaperr(ctx): - def ret(osd, pool, obj): - log.info("injecting omap err on object") - return ctx.manager.osd_admin_socket(osd, ['setomapval', pool, obj, 'badkey', 'badval']); - return ret - -def gen_repair_test_1(corrupter, chooser, scrub_type): - def ret(ctx, config, pool): - log.info("starting repair test type 1") - victim_osd = chooser(pool, 0) - - # create object - log.info("doing put") - ctx.manager.do_put(pool, 'repair_test_obj', '/etc/hosts') - - # corrupt object - log.info("corrupting object") - corrupter(victim_osd, pool, 'repair_test_obj') - - # verify inconsistent - log.info("scrubbing") - ctx.manager.do_pg_scrub(pool, 0, scrub_type) - - assert ctx.manager.pg_inconsistent(pool, 0) - - # repair - log.info("repairing") - ctx.manager.do_pg_scrub(pool, 0, "repair") - - log.info("re-scrubbing") - ctx.manager.do_pg_scrub(pool, 0, scrub_type) - - # verify consistent - assert not ctx.manager.pg_inconsistent(pool, 0) - log.info("done") - return ret - -def gen_repair_test_2(chooser): - def ret(ctx, config, pool): - log.info("starting repair test type 2") - victim_osd = chooser(pool, 0) - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - # create object - log.info("doing put and setomapval") - ctx.manager.do_put(pool, 'file1', '/etc/hosts') - ctx.manager.do_rados(mon, ['-p', pool, 'setomapval', 'file1', 'key', 'val']) - ctx.manager.do_put(pool, 'file2', '/etc/hosts') - ctx.manager.do_put(pool, 'file3', '/etc/hosts') - ctx.manager.do_put(pool, 'file4', '/etc/hosts') - ctx.manager.do_put(pool, 'file5', '/etc/hosts') - ctx.manager.do_rados(mon, ['-p', pool, 'setomapval', 'file5', 'key', 'val']) - ctx.manager.do_put(pool, 'file6', '/etc/hosts') - - # corrupt object - log.info("corrupting object") - omaperr(ctx)(victim_osd, pool, 'file1') - - # verify inconsistent - log.info("scrubbing") - ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub') - - assert ctx.manager.pg_inconsistent(pool, 0) - - # Regression test for bug #4778, should still - # be inconsistent after scrub - ctx.manager.do_pg_scrub(pool, 0, 'scrub') - - assert ctx.manager.pg_inconsistent(pool, 0) - - # Additional corruptions including 2 types for file1 - log.info("corrupting more objects") - dataerr(ctx)(victim_osd, pool, 'file1') - mdataerr(ctx)(victim_osd, pool, 'file2') - trunc(ctx)(victim_osd, pool, 'file3') - omaperr(ctx)(victim_osd, pool, 'file6') - - # see still inconsistent - log.info("scrubbing") - ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub') - - assert ctx.manager.pg_inconsistent(pool, 0) - - # repair - log.info("repairing") - ctx.manager.do_pg_scrub(pool, 0, "repair") - - # Let repair clear inconsistent flag - time.sleep(10) - - # verify consistent - assert not ctx.manager.pg_inconsistent(pool, 0) - - # In the future repair might determine state of - # inconsistency itself, verify with a deep-scrub - log.info("scrubbing") - ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub') - - # verify consistent - assert not ctx.manager.pg_inconsistent(pool, 0) - - log.info("done") - return ret - -def task(ctx, config): - """ - Test [deep] repair in several situations: - Repair [Truncate, Data EIO, MData EIO] on [Primary|Replica] - - The config should be as follows: - - Must include the log-whitelist below - Must enable filestore_debug_inject_read_err config - - example: - - tasks: - - chef: - - install: - - ceph: - log-whitelist: ['candidate had a read error', 'deep-scrub 0 missing, 1 inconsistent objects', 'deep-scrub 0 missing, 4 inconsistent objects', 'deep-scrub 1 errors', 'deep-scrub 4 errors', '!= known omap_digest', 'repair 0 missing, 1 inconsistent objects', 'repair 0 missing, 4 inconsistent objects', 'repair 1 errors, 1 fixed', 'repair 4 errors, 4 fixed', 'scrub 0 missing, 1 inconsistent', 'scrub 1 errors', 'size 1 != known size'] - conf: - osd: - filestore debug inject read err: true - - repair_test: - - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'repair_test task only accepts a dict for config' - - if not hasattr(ctx, 'manager'): - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - ctx.manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager') - ) - - tests = [ - gen_repair_test_1(mdataerr(ctx), choose_primary(ctx), "scrub"), - gen_repair_test_1(mdataerr(ctx), choose_replica(ctx), "scrub"), - gen_repair_test_1(dataerr(ctx), choose_primary(ctx), "deep-scrub"), - gen_repair_test_1(dataerr(ctx), choose_replica(ctx), "deep-scrub"), - gen_repair_test_1(trunc(ctx), choose_primary(ctx), "scrub"), - gen_repair_test_1(trunc(ctx), choose_replica(ctx), "scrub"), - gen_repair_test_2(choose_primary(ctx)), - gen_repair_test_2(choose_replica(ctx)) - ] - - for test in tests: - run_test(ctx, config, test) diff --git a/tasks/rest_api.py b/tasks/rest_api.py deleted file mode 100644 index f4de1866884..00000000000 --- a/tasks/rest_api.py +++ /dev/null @@ -1,183 +0,0 @@ -""" -Rest Api -""" -import logging -import contextlib -import time - -from teuthology import misc as teuthology -from teuthology import contextutil -from teuthology.orchestra import run -from tasks.ceph import DaemonGroup - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def run_rest_api_daemon(ctx, api_clients): - """ - Wrapper starts the rest api daemons - """ - if not hasattr(ctx, 'daemons'): - ctx.daemons = DaemonGroup() - remotes = ctx.cluster.only(teuthology.is_type('client')).remotes - for rems, roles in remotes.iteritems(): - for whole_id_ in roles: - if whole_id_ in api_clients: - id_ = whole_id_[len('clients'):] - run_cmd = [ - 'sudo', - 'daemon-helper', - 'kill', - 'ceph-rest-api', - '-n', - 'client.rest{id}'.format(id=id_), ] - cl_rest_id = 'client.rest{id}'.format(id=id_) - ctx.daemons.add_daemon(rems, 'restapi', - cl_rest_id, - args=run_cmd, - logger=log.getChild(cl_rest_id), - stdin=run.PIPE, - wait=False, - ) - for i in range(1, 12): - log.info('testing for ceph-rest-api try {0}'.format(i)) - run_cmd = [ - 'wget', - '-O', - '/dev/null', - '-q', - 'http://localhost:5000/api/v0.1/status' - ] - proc = rems.run( - args=run_cmd, - check_status=False - ) - if proc.exitstatus == 0: - break - time.sleep(5) - if proc.exitstatus != 0: - raise RuntimeError('Cannot contact ceph-rest-api') - try: - yield - - finally: - """ - TO DO: destroy daemons started -- modify iter_daemons_of_role - """ - teuthology.stop_daemons_of_type(ctx, 'restapi') - -@contextlib.contextmanager -def task(ctx, config): - """ - Start up rest-api. - - To start on on all clients:: - - tasks: - - ceph: - - rest-api: - - To only run on certain clients:: - - tasks: - - ceph: - - rest-api: [client.0, client.3] - - or - - tasks: - - ceph: - - rest-api: - client.0: - client.3: - - The general flow of things here is: - 1. Find clients on which rest-api is supposed to run (api_clients) - 2. Generate keyring values - 3. Start up ceph-rest-api daemons - On cleanup: - 4. Stop the daemons - 5. Delete keyring value files. - """ - api_clients = [] - remotes = ctx.cluster.only(teuthology.is_type('client')).remotes - log.info(remotes) - if config == None: - api_clients = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - else: - api_clients = config - log.info(api_clients) - testdir = teuthology.get_testdir(ctx) - coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) - for rems, roles in remotes.iteritems(): - for whole_id_ in roles: - if whole_id_ in api_clients: - id_ = whole_id_[len('client.'):] - keyring = '/etc/ceph/ceph.client.rest{id}.keyring'.format( - id=id_) - rems.run( - args=[ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - coverage_dir, - 'ceph-authtool', - '--create-keyring', - '--gen-key', - '--name=client.rest{id}'.format(id=id_), - '--set-uid=0', - '--cap', 'mon', 'allow *', - '--cap', 'osd', 'allow *', - '--cap', 'mds', 'allow', - keyring, - run.Raw('&&'), - 'sudo', - 'chmod', - '0644', - keyring, - ], - ) - rems.run( - args=[ - 'sudo', - 'sh', - '-c', - run.Raw("'"), - "echo", - '[client.rest{id}]'.format(id=id_), - run.Raw('>>'), - "/etc/ceph/ceph.conf", - run.Raw("'") - ] - ) - rems.run( - args=[ - 'sudo', - 'sh', - '-c', - run.Raw("'"), - 'echo', - 'restapi', - 'keyring', - '=', - '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_), - run.Raw('>>'), - '/etc/ceph/ceph.conf', - run.Raw("'"), - ] - ) - rems.run( - args=[ - 'ceph', - 'auth', - 'import', - '-i', - '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_), - ] - ) - with contextutil.nested( - lambda: run_rest_api_daemon(ctx=ctx, api_clients=api_clients),): - yield - diff --git a/tasks/restart.py b/tasks/restart.py deleted file mode 100644 index 697345a975b..00000000000 --- a/tasks/restart.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -Daemon restart -""" -import logging -import pipes - -from teuthology import misc as teuthology -from teuthology.orchestra import run as tor - -from teuthology.orchestra import run -log = logging.getLogger(__name__) - -def restart_daemon(ctx, config, role, id_, *args): - """ - Handle restart (including the execution of the command parameters passed) - """ - log.info('Restarting {r}.{i} daemon...'.format(r=role, i=id_)) - daemon = ctx.daemons.get_daemon(role, id_) - log.debug('Waiting for exit of {r}.{i} daemon...'.format(r=role, i=id_)) - try: - daemon.wait_for_exit() - except tor.CommandFailedError as e: - log.debug('Command Failed: {e}'.format(e=e)) - if len(args) > 0: - confargs = ['--{k}={v}'.format(k=k, v=v) for k,v in zip(args[0::2], args[1::2])] - log.debug('Doing restart of {r}.{i} daemon with args: {a}...'.format(r=role, i=id_, a=confargs)) - daemon.restart_with_args(confargs) - else: - log.debug('Doing restart of {r}.{i} daemon...'.format(r=role, i=id_)) - daemon.restart() - -def get_tests(ctx, config, role, remote, testdir): - """Download restart tests""" - srcdir = '{tdir}/restart.{role}'.format(tdir=testdir, role=role) - - refspec = config.get('branch') - if refspec is None: - refspec = config.get('sha1') - if refspec is None: - refspec = config.get('tag') - if refspec is None: - refspec = 'HEAD' - log.info('Pulling restart qa/workunits from ref %s', refspec) - - remote.run( - logger=log.getChild(role), - args=[ - 'mkdir', '--', srcdir, - run.Raw('&&'), - 'git', - 'archive', - '--remote=git://git.ceph.com/ceph.git', - '%s:qa/workunits' % refspec, - run.Raw('|'), - 'tar', - '-C', srcdir, - '-x', - '-f-', - run.Raw('&&'), - 'cd', '--', srcdir, - run.Raw('&&'), - 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi', - run.Raw('&&'), - 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir), - run.Raw('>{tdir}/restarts.list'.format(tdir=testdir)), - ], - ) - restarts = sorted(teuthology.get_file( - remote, - '{tdir}/restarts.list'.format(tdir=testdir)).split('\0')) - return (srcdir, restarts) - -def task(ctx, config): - """ - Execute commands and allow daemon restart with config options. - Each process executed can output to stdout restart commands of the form: - restart - This will restart the daemon . with the specified config values once - by modifying the conf file with those values, and then replacing the old conf file - once the daemon is restarted. - This task does not kill a running daemon, it assumes the daemon will abort on an - assert specified in the config. - - tasks: - - install: - - ceph: - - restart: - exec: - client.0: - - test_backtraces.py - - """ - assert isinstance(config, dict), "task kill got invalid config" - - testdir = teuthology.get_testdir(ctx) - - try: - assert 'exec' in config, "config requires exec key with : entries" - for role, task in config['exec'].iteritems(): - log.info('restart for role {r}'.format(r=role)) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - srcdir, restarts = get_tests(ctx, config, role, remote, testdir) - log.info('Running command on role %s host %s', role, remote.name) - spec = '{spec}'.format(spec=task[0]) - log.info('Restarts list: %s', restarts) - log.info('Spec is %s', spec) - to_run = [w for w in restarts if w == task or w.find(spec) != -1] - log.info('To run: %s', to_run) - for c in to_run: - log.info('Running restart script %s...', c) - args = [ - run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)), - ] - env = config.get('env') - if env is not None: - for var, val in env.iteritems(): - quoted_val = pipes.quote(val) - env_arg = '{var}={val}'.format(var=var, val=quoted_val) - args.append(run.Raw(env_arg)) - args.extend([ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - '{srcdir}/{c}'.format( - srcdir=srcdir, - c=c, - ), - ]) - proc = remote.run( - args=args, - stdout=tor.PIPE, - stdin=tor.PIPE, - stderr=log, - wait=False, - ) - log.info('waiting for a command from script') - while True: - l = proc.stdout.readline() - if not l or l == '': - break - log.debug('script command: {c}'.format(c=l)) - ll = l.strip() - cmd = ll.split(' ') - if cmd[0] == "done": - break - assert cmd[0] == 'restart', "script sent invalid command request to kill task" - # cmd should be: restart - # or to clear, just: restart - restart_daemon(ctx, config, cmd[1], cmd[2], *cmd[3:]) - proc.stdin.writelines(['restarted\n']) - proc.stdin.flush() - try: - proc.wait() - except tor.CommandFailedError: - raise Exception('restart task got non-zero exit status from script: {s}'.format(s=c)) - finally: - log.info('Finishing %s on %s...', task, role) - remote.run( - logger=log.getChild(role), - args=[ - 'rm', '-rf', '--', '{tdir}/restarts.list'.format(tdir=testdir), srcdir, - ], - ) diff --git a/tasks/rgw.py b/tasks/rgw.py deleted file mode 100644 index 8480380fbd6..00000000000 --- a/tasks/rgw.py +++ /dev/null @@ -1,837 +0,0 @@ -""" -rgw routines -""" -import argparse -import contextlib -import json -import logging -import os - -from cStringIO import StringIO - -from teuthology.orchestra import run -from teuthology import misc as teuthology -from teuthology import contextutil -from teuthology.orchestra.run import CommandFailedError -from util.rgw import rgwadmin -from util.rados import (rados, create_ec_pool, - create_replicated_pool, - create_cache_pool) - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def create_apache_dirs(ctx, config): - """ - Remotely create apache directories. Delete when finished. - """ - log.info('Creating apache directories...') - testdir = teuthology.get_testdir(ctx) - for client in config.iterkeys(): - ctx.cluster.only(client).run( - args=[ - 'mkdir', - '-p', - '{tdir}/apache/htdocs.{client}'.format(tdir=testdir, - client=client), - '{tdir}/apache/tmp.{client}/fastcgi_sock'.format( - tdir=testdir, - client=client), - run.Raw('&&'), - 'mkdir', - '{tdir}/archive/apache.{client}'.format(tdir=testdir, - client=client), - ], - ) - try: - yield - finally: - log.info('Cleaning up apache directories...') - for client in config.iterkeys(): - ctx.cluster.only(client).run( - args=[ - 'rm', - '-rf', - '{tdir}/apache/tmp.{client}'.format(tdir=testdir, - client=client), - run.Raw('&&'), - 'rmdir', - '{tdir}/apache/htdocs.{client}'.format(tdir=testdir, - client=client), - ], - ) - - for client in config.iterkeys(): - ctx.cluster.only(client).run( - args=[ - 'rmdir', - '{tdir}/apache'.format(tdir=testdir), - ], - check_status=False, # only need to remove once per host - ) - - -@contextlib.contextmanager -def ship_apache_configs(ctx, config, role_endpoints): - """ - Ship apache config and rgw.fgci to all clients. Clean up on termination - """ - assert isinstance(config, dict) - assert isinstance(role_endpoints, dict) - testdir = teuthology.get_testdir(ctx) - log.info('Shipping apache config and rgw.fcgi...') - src = os.path.join(os.path.dirname(__file__), 'apache.conf.template') - for client, conf in config.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() - system_type = teuthology.get_system_type(remote) - if not conf: - conf = {} - idle_timeout = conf.get('idle_timeout', ctx.rgw.default_idle_timeout) - if system_type == 'deb': - mod_path = '/usr/lib/apache2/modules' - print_continue = 'on' - user = 'www-data' - group = 'www-data' - apache24_modconfig = ''' - IncludeOptional /etc/apache2/mods-available/mpm_event.conf - IncludeOptional /etc/apache2/mods-available/mpm_event.load -''' - else: - mod_path = '/usr/lib64/httpd/modules' - print_continue = 'off' - user = 'apache' - group = 'apache' - apache24_modconfig = \ - 'IncludeOptional /etc/httpd/conf.modules.d/00-mpm.conf' - host, port = role_endpoints[client] - with file(src, 'rb') as f: - conf = f.read().format( - testdir=testdir, - mod_path=mod_path, - print_continue=print_continue, - host=host, - port=port, - client=client, - idle_timeout=idle_timeout, - user=user, - group=group, - apache24_modconfig=apache24_modconfig, - ) - teuthology.write_file( - remote=remote, - path='{tdir}/apache/apache.{client}.conf'.format( - tdir=testdir, - client=client), - data=conf, - ) - teuthology.write_file( - remote=remote, - path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format( - tdir=testdir, - client=client), - data="""#!/bin/sh -ulimit -c unlimited -exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring --rgw-socket-path {tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock - -""".format(tdir=testdir, client=client) - ) - remote.run( - args=[ - 'chmod', - 'a=rx', - '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir, - client=client), - ], - ) - try: - yield - finally: - log.info('Removing apache config...') - for client in config.iterkeys(): - ctx.cluster.only(client).run( - args=[ - 'rm', - '-f', - '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir, - client=client), - run.Raw('&&'), - 'rm', - '-f', - '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format( - tdir=testdir, - client=client), - ], - ) - - -@contextlib.contextmanager -def start_rgw(ctx, config): - """ - Start rgw on remote sites. - """ - log.info('Starting rgw...') - testdir = teuthology.get_testdir(ctx) - for client in config.iterkeys(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() - - client_config = config.get(client) - if client_config is None: - client_config = {} - log.info("rgw %s config is %s", client, client_config) - id_ = client.split('.', 1)[1] - log.info('client {client} is id {id}'.format(client=client, id=id_)) - cmd_prefix = [ - 'sudo', - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'daemon-helper', - 'term', - ] - - rgw_cmd = ['radosgw'] - - if ctx.rgw.frontend == 'apache': - rgw_cmd.extend([ - '--rgw-socket-path', - '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format( - tdir=testdir, - client=client, - ), - ]) - elif ctx.rgw.frontend == 'civetweb': - host, port = ctx.rgw.role_endpoints[client] - rgw_cmd.extend([ - '--rgw-frontends', - 'civetweb port={port}'.format(port=port), - ]) - - rgw_cmd.extend([ - '-n', client, - '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client), - '--log-file', - '/var/log/ceph/rgw.{client}.log'.format(client=client), - '--rgw_ops_log_socket_path', - '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, - client=client), - '--foreground', - run.Raw('|'), - 'sudo', - 'tee', - '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir, - client=client), - run.Raw('2>&1'), - ]) - - if client_config.get('valgrind'): - cmd_prefix = teuthology.get_valgrind_args( - testdir, - client, - cmd_prefix, - client_config.get('valgrind') - ) - - run_cmd = list(cmd_prefix) - run_cmd.extend(rgw_cmd) - - ctx.daemons.add_daemon( - remote, 'rgw', client, - args=run_cmd, - logger=log.getChild(client), - stdin=run.PIPE, - wait=False, - ) - - try: - yield - finally: - teuthology.stop_daemons_of_type(ctx, 'rgw') - for client in config.iterkeys(): - ctx.cluster.only(client).run( - args=[ - 'rm', - '-f', - '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, - client=client), - ], - ) - - -@contextlib.contextmanager -def start_apache(ctx, config): - """ - Start apache on remote sites. - """ - log.info('Starting apache...') - testdir = teuthology.get_testdir(ctx) - apaches = {} - for client in config.iterkeys(): - (remote,) = ctx.cluster.only(client).remotes.keys() - system_type = teuthology.get_system_type(remote) - if system_type == 'deb': - apache_name = 'apache2' - else: - try: - remote.run( - args=[ - 'stat', - '/usr/sbin/httpd.worker', - ], - ) - apache_name = '/usr/sbin/httpd.worker' - except CommandFailedError: - apache_name = '/usr/sbin/httpd' - - proc = remote.run( - args=[ - 'adjust-ulimits', - 'daemon-helper', - 'kill', - apache_name, - '-X', - '-f', - '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir, - client=client), - ], - logger=log.getChild(client), - stdin=run.PIPE, - wait=False, - ) - apaches[client] = proc - - try: - yield - finally: - log.info('Stopping apache...') - for client, proc in apaches.iteritems(): - proc.stdin.close() - - run.wait(apaches.itervalues()) - - -def extract_user_info(client_config): - """ - Extract user info from the client config specified. Returns a dict - that includes system key information. - """ - # test if there isn't a system user or if there isn't a name for that - # user, return None - if ('system user' not in client_config or - 'name' not in client_config['system user']): - return None - - user_info = dict() - user_info['system_key'] = dict( - user=client_config['system user']['name'], - access_key=client_config['system user']['access key'], - secret_key=client_config['system user']['secret key'], - ) - return user_info - - -def extract_zone_info(ctx, client, client_config): - """ - Get zone information. - :param client: dictionary of client information - :param client_config: dictionary of client configuration information - :returns: zone extracted from client and client_config information - """ - ceph_config = ctx.ceph.conf.get('global', {}) - ceph_config.update(ctx.ceph.conf.get('client', {})) - ceph_config.update(ctx.ceph.conf.get(client, {})) - for key in ['rgw zone', 'rgw region', 'rgw zone root pool']: - assert key in ceph_config, \ - 'ceph conf must contain {key} for {client}'.format(key=key, - client=client) - region = ceph_config['rgw region'] - zone = ceph_config['rgw zone'] - zone_info = dict() - for key in ['rgw control pool', 'rgw gc pool', 'rgw log pool', - 'rgw intent log pool', 'rgw usage log pool', - 'rgw user keys pool', 'rgw user email pool', - 'rgw user swift pool', 'rgw user uid pool', - 'rgw domain root']: - new_key = key.split(' ', 1)[1] - new_key = new_key.replace(' ', '_') - - if key in ceph_config: - value = ceph_config[key] - log.debug('{key} specified in ceph_config ({val})'.format( - key=key, val=value)) - zone_info[new_key] = value - else: - zone_info[new_key] = '.' + region + '.' + zone + '.' + new_key - - index_pool = '.' + region + '.' + zone + '.' + 'index_pool' - data_pool = '.' + region + '.' + zone + '.' + 'data_pool' - data_extra_pool = '.' + region + '.' + zone + '.' + 'data_extra_pool' - - zone_info['placement_pools'] = [{'key': 'default_placement', - 'val': {'index_pool': index_pool, - 'data_pool': data_pool, - 'data_extra_pool': data_extra_pool} - }] - - # these keys are meant for the zones argument in the region info. We - # insert them into zone_info with a different format and then remove them - # in the fill_in_endpoints() method - for key in ['rgw log meta', 'rgw log data']: - if key in ceph_config: - zone_info[key] = ceph_config[key] - - # these keys are meant for the zones argument in the region info. We - # insert them into zone_info with a different format and then remove them - # in the fill_in_endpoints() method - for key in ['rgw log meta', 'rgw log data']: - if key in ceph_config: - zone_info[key] = ceph_config[key] - - return region, zone, zone_info - - -def extract_region_info(region, region_info): - """ - Extract region information from the region_info parameter, using get - to set default values. - - :param region: name of the region - :param region_info: region information (in dictionary form). - :returns: dictionary of region information set from region_info, using - default values for missing fields. - """ - assert isinstance(region_info['zones'], list) and region_info['zones'], \ - 'zones must be a non-empty list' - return dict( - name=region, - api_name=region_info.get('api name', region), - is_master=region_info.get('is master', False), - log_meta=region_info.get('log meta', False), - log_data=region_info.get('log data', False), - master_zone=region_info.get('master zone', region_info['zones'][0]), - placement_targets=region_info.get('placement targets', - [{'name': 'default_placement', - 'tags': []}]), - default_placement=region_info.get('default placement', - 'default_placement'), - ) - - -def assign_ports(ctx, config): - """ - Assign port numberst starting with port 7280. - """ - port = 7280 - role_endpoints = {} - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): - for role in roles_for_host: - if role in config: - role_endpoints[role] = (remote.name.split('@')[1], port) - port += 1 - - return role_endpoints - - -def fill_in_endpoints(region_info, role_zones, role_endpoints): - """ - Iterate through the list of role_endpoints, filling in zone information - - :param region_info: region data - :param role_zones: region and zone information. - :param role_endpoints: endpoints being used - """ - for role, (host, port) in role_endpoints.iteritems(): - region, zone, zone_info, _ = role_zones[role] - host, port = role_endpoints[role] - endpoint = 'http://{host}:{port}/'.format(host=host, port=port) - # check if the region specified under client actually exists - # in region_info (it should, if properly configured). - # If not, throw a reasonable error - if region not in region_info: - raise Exception( - 'Region: {region} was specified but no corresponding' - ' entry was found under \'regions\''.format(region=region)) - - region_conf = region_info[region] - region_conf.setdefault('endpoints', []) - region_conf['endpoints'].append(endpoint) - - # this is the payload for the 'zones' field in the region field - zone_payload = dict() - zone_payload['endpoints'] = [endpoint] - zone_payload['name'] = zone - - # Pull the log meta and log data settings out of zone_info, if they - # exist, then pop them as they don't actually belong in the zone info - for key in ['rgw log meta', 'rgw log data']: - new_key = key.split(' ', 1)[1] - new_key = new_key.replace(' ', '_') - - if key in zone_info: - value = zone_info.pop(key) - else: - value = 'false' - - zone_payload[new_key] = value - - region_conf.setdefault('zones', []) - region_conf['zones'].append(zone_payload) - - -@contextlib.contextmanager -def configure_users(ctx, config, everywhere=False): - """ - Create users by remotely running rgwadmin commands using extracted - user information. - """ - log.info('Configuring users...') - - # extract the user info and append it to the payload tuple for the given - # client - for client, c_config in config.iteritems(): - if not c_config: - continue - user_info = extract_user_info(c_config) - if not user_info: - continue - - # For data sync the master zones and regions must have the - # system users of the secondary zones. To keep this simple, - # just create the system users on every client if regions are - # configured. - clients_to_create_as = [client] - if everywhere: - clients_to_create_as = config.keys() - for client_name in clients_to_create_as: - log.debug('Creating user {user} on {client}'.format( - user=user_info['system_key']['user'], client=client)) - rgwadmin(ctx, client_name, - cmd=[ - 'user', 'create', - '--uid', user_info['system_key']['user'], - '--access-key', user_info['system_key']['access_key'], - '--secret', user_info['system_key']['secret_key'], - '--display-name', user_info['system_key']['user'], - '--system', - ], - check_status=True, - ) - - yield - - -@contextlib.contextmanager -def create_nonregion_pools(ctx, config, regions): - """Create replicated or erasure coded data pools for rgw.""" - if regions: - yield - return - - log.info('creating data pools') - for client in config.keys(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() - data_pool = '.rgw.buckets' - if ctx.rgw.ec_data_pool: - create_ec_pool(remote, data_pool, client, 64) - else: - create_replicated_pool(remote, data_pool, 64) - if ctx.rgw.cache_pools: - create_cache_pool(remote, data_pool, data_pool + '.cache', 64, - 64*1024*1024) - yield - - -@contextlib.contextmanager -def configure_regions_and_zones(ctx, config, regions, role_endpoints): - """ - Configure regions and zones from rados and rgw. - """ - if not regions: - log.debug( - 'In rgw.configure_regions_and_zones() and regions is None. ' - 'Bailing') - yield - return - - log.info('Configuring regions and zones...') - - log.debug('config is %r', config) - log.debug('regions are %r', regions) - log.debug('role_endpoints = %r', role_endpoints) - # extract the zone info - role_zones = dict([(client, extract_zone_info(ctx, client, c_config)) - for client, c_config in config.iteritems()]) - log.debug('roles_zones = %r', role_zones) - - # extract the user info and append it to the payload tuple for the given - # client - for client, c_config in config.iteritems(): - if not c_config: - user_info = None - else: - user_info = extract_user_info(c_config) - - (region, zone, zone_info) = role_zones[client] - role_zones[client] = (region, zone, zone_info, user_info) - - region_info = dict([ - (region_name, extract_region_info(region_name, r_config)) - for region_name, r_config in regions.iteritems()]) - - fill_in_endpoints(region_info, role_zones, role_endpoints) - - # clear out the old defaults - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - # removing these objects from .rgw.root and the per-zone root pools - # may or may not matter - rados(ctx, mon, - cmd=['-p', '.rgw.root', 'rm', 'region_info.default']) - rados(ctx, mon, - cmd=['-p', '.rgw.root', 'rm', 'zone_info.default']) - - for client in config.iterkeys(): - for role, (_, zone, zone_info, user_info) in role_zones.iteritems(): - rados(ctx, mon, - cmd=['-p', zone_info['domain_root'], - 'rm', 'region_info.default']) - rados(ctx, mon, - cmd=['-p', zone_info['domain_root'], - 'rm', 'zone_info.default']) - - (remote,) = ctx.cluster.only(role).remotes.keys() - for pool_info in zone_info['placement_pools']: - remote.run(args=['ceph', 'osd', 'pool', 'create', - pool_info['val']['index_pool'], '64', '64']) - if ctx.rgw.ec_data_pool: - create_ec_pool(remote, pool_info['val']['data_pool'], - zone, 64) - else: - create_replicated_pool( - remote, pool_info['val']['data_pool'], - 64) - - rgwadmin(ctx, client, - cmd=['-n', client, 'zone', 'set', '--rgw-zone', zone], - stdin=StringIO(json.dumps(dict( - zone_info.items() + user_info.items()))), - check_status=True) - - for region, info in region_info.iteritems(): - region_json = json.dumps(info) - log.debug('region info is: %s', region_json) - rgwadmin(ctx, client, - cmd=['-n', client, 'region', 'set'], - stdin=StringIO(region_json), - check_status=True) - if info['is_master']: - rgwadmin(ctx, client, - cmd=['-n', client, - 'region', 'default', - '--rgw-region', region], - check_status=True) - - rgwadmin(ctx, client, cmd=['-n', client, 'regionmap', 'update']) - yield - - -@contextlib.contextmanager -def task(ctx, config): - """ - Either use configure apache to run a rados gateway, or use the built-in - civetweb server. - Only one should be run per machine, since it uses a hard-coded port for - now. - - For example, to run rgw on all clients:: - - tasks: - - ceph: - - rgw: - - To only run on certain clients:: - - tasks: - - ceph: - - rgw: [client.0, client.3] - - or - - tasks: - - ceph: - - rgw: - client.0: - client.3: - - You can adjust the idle timeout for fastcgi (default is 30 seconds): - - tasks: - - ceph: - - rgw: - client.0: - idle_timeout: 90 - - To run radosgw through valgrind: - - tasks: - - ceph: - - rgw: - client.0: - valgrind: [--tool=memcheck] - client.3: - valgrind: [--tool=memcheck] - - To use civetweb instead of apache: - - tasks: - - ceph: - - rgw: - - client.0 - overrides: - rgw: - frontend: civetweb - - Note that without a modified fastcgi module e.g. with the default - one on CentOS, you must have rgw print continue = false in ceph.conf:: - - tasks: - - ceph: - conf: - global: - rgw print continue: false - - rgw: [client.0] - - To run rgws for multiple regions or zones, describe the regions - and their zones in a regions section. The endpoints will be - generated by this task. Each client must have a region, zone, - and pools assigned in ceph.conf:: - - tasks: - - install: - - ceph: - conf: - client.0: - rgw region: foo - rgw zone: foo-1 - rgw region root pool: .rgw.rroot.foo - rgw zone root pool: .rgw.zroot.foo - rgw log meta: true - rgw log data: true - client.1: - rgw region: bar - rgw zone: bar-master - rgw region root pool: .rgw.rroot.bar - rgw zone root pool: .rgw.zroot.bar - rgw log meta: true - rgw log data: true - client.2: - rgw region: bar - rgw zone: bar-secondary - rgw region root pool: .rgw.rroot.bar - rgw zone root pool: .rgw.zroot.bar-secondary - - rgw: - default_idle_timeout: 30 - ec-data-pool: true - regions: - foo: - api name: api_name # default: region name - is master: true # default: false - master zone: foo-1 # default: first zone - zones: [foo-1] - log meta: true - log data: true - placement targets: [target1, target2] # default: [] - default placement: target2 # default: '' - bar: - api name: bar-api - zones: [bar-master, bar-secondary] - client.0: - system user: - name: foo-system - access key: X2IYPSTY1072DDY1SJMC - secret key: YIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm - client.1: - system user: - name: bar1 - access key: Y2IYPSTY1072DDY1SJMC - secret key: XIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm - client.2: - system user: - name: bar2 - access key: Z2IYPSTY1072DDY1SJMC - secret key: ZIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm - """ - if config is None: - config = dict(('client.{id}'.format(id=id_), None) - for id_ in teuthology.all_roles_of_type( - ctx.cluster, 'client')) - elif isinstance(config, list): - config = dict((name, None) for name in config) - - overrides = ctx.config.get('overrides', {}) - teuthology.deep_merge(config, overrides.get('rgw', {})) - - regions = {} - if 'regions' in config: - # separate region info so only clients are keys in config - regions = config['regions'] - del config['regions'] - - role_endpoints = assign_ports(ctx, config) - ctx.rgw = argparse.Namespace() - ctx.rgw.role_endpoints = role_endpoints - # stash the region info for later, since it was deleted from the config - # structure - ctx.rgw.regions = regions - - ctx.rgw.ec_data_pool = False - if 'ec-data-pool' in config: - ctx.rgw.ec_data_pool = bool(config['ec-data-pool']) - del config['ec-data-pool'] - ctx.rgw.default_idle_timeout = 30 - if 'default_idle_timeout' in config: - ctx.rgw.default_idle_timeout = int(config['default_idle_timeout']) - del config['default_idle_timeout'] - ctx.rgw.cache_pools = False - if 'cache-pools' in config: - ctx.rgw.cache_pools = bool(config['cache-pools']) - del config['cache-pools'] - - ctx.rgw.frontend = 'apache' - if 'frontend' in config: - ctx.rgw.frontend = config['frontend'] - del config['frontend'] - - subtasks = [ - lambda: configure_regions_and_zones( - ctx=ctx, - config=config, - regions=regions, - role_endpoints=role_endpoints, - ), - lambda: configure_users( - ctx=ctx, - config=config, - everywhere=bool(regions), - ), - lambda: create_nonregion_pools( - ctx=ctx, config=config, regions=regions), - ] - if ctx.rgw.frontend == 'apache': - subtasks.insert(0, lambda: create_apache_dirs(ctx=ctx, config=config)) - subtasks.extend([ - lambda: ship_apache_configs(ctx=ctx, config=config, - role_endpoints=role_endpoints), - lambda: start_rgw(ctx=ctx, config=config), - lambda: start_apache(ctx=ctx, config=config), - ]) - elif ctx.rgw.frontend == 'civetweb': - subtasks.extend([ - lambda: start_rgw(ctx=ctx, config=config), - ]) - else: - raise ValueError("frontend must be 'apache' or 'civetweb'") - - log.info("Using %s as radosgw frontend", ctx.rgw.frontend) - with contextutil.nested(*subtasks): - yield diff --git a/tasks/rgw_logsocket.py b/tasks/rgw_logsocket.py deleted file mode 100644 index 6f49b00d8a4..00000000000 --- a/tasks/rgw_logsocket.py +++ /dev/null @@ -1,161 +0,0 @@ -""" -rgw s3tests logging wrappers -""" -from cStringIO import StringIO -from configobj import ConfigObj -import contextlib -import logging -import s3tests - -from teuthology import misc as teuthology -from teuthology import contextutil - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def download(ctx, config): - """ - Run s3tests download function - """ - return s3tests.download(ctx, config) - -def _config_user(s3tests_conf, section, user): - """ - Run s3tests user config function - """ - return s3tests._config_user(s3tests_conf, section, user) - -@contextlib.contextmanager -def create_users(ctx, config): - """ - Run s3tests user create function - """ - return s3tests.create_users(ctx, config) - -@contextlib.contextmanager -def configure(ctx, config): - """ - Run s3tests user configure function - """ - return s3tests.configure(ctx, config) - -@contextlib.contextmanager -def run_tests(ctx, config): - """ - Run remote netcat tests - """ - assert isinstance(config, dict) - testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): - client_config['extra_args'] = [ - 's3tests.functional.test_s3:test_bucket_list_return_data', - ] -# args = [ -# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), -# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir), -# '-w', -# '{tdir}/s3-tests'.format(tdir=testdir), -# '-v', -# 's3tests.functional.test_s3:test_bucket_list_return_data', -# ] -# if client_config is not None and 'extra_args' in client_config: -# args.extend(client_config['extra_args']) -# -# ctx.cluster.only(client).run( -# args=args, -# ) - - s3tests.run_tests(ctx, config) - - netcat_out = StringIO() - - for client, client_config in config.iteritems(): - ctx.cluster.only(client).run( - args = [ - 'netcat', - '-w', '5', - '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir), - ], - stdout = netcat_out, - ) - - out = netcat_out.getvalue() - - assert len(out) > 100 - - log.info('Received', out) - - yield - - -@contextlib.contextmanager -def task(ctx, config): - """ - Run some s3-tests suite against rgw, verify opslog socket returns data - - Must restrict testing to a particular client:: - - tasks: - - ceph: - - rgw: [client.0] - - s3tests: [client.0] - - To pass extra arguments to nose (e.g. to run a certain test):: - - tasks: - - ceph: - - rgw: [client.0] - - s3tests: - client.0: - extra_args: ['test_s3:test_object_acl_grand_public_read'] - client.1: - extra_args: ['--exclude', 'test_100_continue'] - """ - assert config is None or isinstance(config, list) \ - or isinstance(config, dict), \ - "task s3tests only supports a list or dictionary for configuration" - all_clients = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - if config is None: - config = all_clients - if isinstance(config, list): - config = dict.fromkeys(config) - clients = config.keys() - - overrides = ctx.config.get('overrides', {}) - # merge each client section, not the top level. - for (client, cconf) in config.iteritems(): - teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {})) - - log.debug('config is %s', config) - - s3tests_conf = {} - for client in clients: - s3tests_conf[client] = ConfigObj( - indent_type='', - infile={ - 'DEFAULT': - { - 'port' : 7280, - 'is_secure' : 'no', - }, - 'fixtures' : {}, - 's3 main' : {}, - 's3 alt' : {}, - } - ) - - with contextutil.nested( - lambda: download(ctx=ctx, config=config), - lambda: create_users(ctx=ctx, config=dict( - clients=clients, - s3tests_conf=s3tests_conf, - )), - lambda: configure(ctx=ctx, config=dict( - clients=config, - s3tests_conf=s3tests_conf, - )), - lambda: run_tests(ctx=ctx, config=config), - ): - yield diff --git a/tasks/s3readwrite.py b/tasks/s3readwrite.py deleted file mode 100644 index 9f1507ef816..00000000000 --- a/tasks/s3readwrite.py +++ /dev/null @@ -1,346 +0,0 @@ -""" -Run rgw s3 readwite tests -""" -from cStringIO import StringIO -import base64 -import contextlib -import logging -import os -import random -import string -import yaml - -from teuthology import misc as teuthology -from teuthology import contextutil -from teuthology.config import config as teuth_config -from teuthology.orchestra import run -from teuthology.orchestra.connection import split_user - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def download(ctx, config): - """ - Download the s3 tests from the git builder. - Remove downloaded s3 file upon exit. - - The context passed in should be identical to the context - passed in to the main task. - """ - assert isinstance(config, dict) - log.info('Downloading s3-tests...') - testdir = teuthology.get_testdir(ctx) - for (client, cconf) in config.items(): - branch = cconf.get('force-branch', None) - if not branch: - branch = cconf.get('branch', 'master') - sha1 = cconf.get('sha1') - ctx.cluster.only(client).run( - args=[ - 'git', 'clone', - '-b', branch, - teuth_config.ceph_git_base_url + 's3-tests.git', - '{tdir}/s3-tests'.format(tdir=testdir), - ], - ) - if sha1 is not None: - ctx.cluster.only(client).run( - args=[ - 'cd', '{tdir}/s3-tests'.format(tdir=testdir), - run.Raw('&&'), - 'git', 'reset', '--hard', sha1, - ], - ) - try: - yield - finally: - log.info('Removing s3-tests...') - testdir = teuthology.get_testdir(ctx) - for client in config: - ctx.cluster.only(client).run( - args=[ - 'rm', - '-rf', - '{tdir}/s3-tests'.format(tdir=testdir), - ], - ) - - -def _config_user(s3tests_conf, section, user): - """ - Configure users for this section by stashing away keys, ids, and - email addresses. - """ - s3tests_conf[section].setdefault('user_id', user) - s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) - s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) - s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20))) - s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40))) - -@contextlib.contextmanager -def create_users(ctx, config): - """ - Create a default s3 user. - """ - assert isinstance(config, dict) - log.info('Creating rgw users...') - testdir = teuthology.get_testdir(ctx) - users = {'s3': 'foo'} - cached_client_user_names = dict() - for client in config['clients']: - cached_client_user_names[client] = dict() - s3tests_conf = config['s3tests_conf'][client] - s3tests_conf.setdefault('readwrite', {}) - s3tests_conf['readwrite'].setdefault('bucket', 'rwtest-' + client + '-{random}-') - s3tests_conf['readwrite'].setdefault('readers', 10) - s3tests_conf['readwrite'].setdefault('writers', 3) - s3tests_conf['readwrite'].setdefault('duration', 300) - s3tests_conf['readwrite'].setdefault('files', {}) - rwconf = s3tests_conf['readwrite'] - rwconf['files'].setdefault('num', 10) - rwconf['files'].setdefault('size', 2000) - rwconf['files'].setdefault('stddev', 500) - for section, user in users.iteritems(): - _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) - log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'], - client=client)) - - # stash the 'delete_user' flag along with user name for easier cleanup - delete_this_user = True - if 'delete_user' in s3tests_conf['s3']: - delete_this_user = s3tests_conf['s3']['delete_user'] - log.debug('delete_user set to {flag} for {client}'.format(flag=delete_this_user, client=client)) - cached_client_user_names[client][section+user] = (s3tests_conf[section]['user_id'], delete_this_user) - - # skip actual user creation if the create_user flag is set to false for this client - if 'create_user' in s3tests_conf['s3'] and s3tests_conf['s3']['create_user'] == False: - log.debug('create_user set to False, skipping user creation for {client}'.format(client=client)) - continue - else: - ctx.cluster.only(client).run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'radosgw-admin', - '-n', client, - 'user', 'create', - '--uid', s3tests_conf[section]['user_id'], - '--display-name', s3tests_conf[section]['display_name'], - '--access-key', s3tests_conf[section]['access_key'], - '--secret', s3tests_conf[section]['secret_key'], - '--email', s3tests_conf[section]['email'], - ], - ) - try: - yield - finally: - for client in config['clients']: - for section, user in users.iteritems(): - #uid = '{user}.{client}'.format(user=user, client=client) - real_uid, delete_this_user = cached_client_user_names[client][section+user] - if delete_this_user: - ctx.cluster.only(client).run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'radosgw-admin', - '-n', client, - 'user', 'rm', - '--uid', real_uid, - '--purge-data', - ], - ) - else: - log.debug('skipping delete for user {uid} on {client}'.format(uid=real_uid, client=client)) - -@contextlib.contextmanager -def configure(ctx, config): - """ - Configure the s3-tests. This includes the running of the - bootstrap code and the updating of local conf files. - """ - assert isinstance(config, dict) - log.info('Configuring s3-readwrite-tests...') - for client, properties in config['clients'].iteritems(): - s3tests_conf = config['s3tests_conf'][client] - if properties is not None and 'rgw_server' in properties: - host = None - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): - log.info('roles: ' + str(roles)) - log.info('target: ' + str(target)) - if properties['rgw_server'] in roles: - _, host = split_user(target) - assert host is not None, "Invalid client specified as the rgw_server" - s3tests_conf['s3']['host'] = host - else: - s3tests_conf['s3']['host'] = 'localhost' - - def_conf = s3tests_conf['DEFAULT'] - s3tests_conf['s3'].setdefault('port', def_conf['port']) - s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure']) - - (remote,) = ctx.cluster.only(client).remotes.keys() - remote.run( - args=[ - 'cd', - '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)), - run.Raw('&&'), - './bootstrap', - ], - ) - conf_fp = StringIO() - conf = dict( - s3=s3tests_conf['s3'], - readwrite=s3tests_conf['readwrite'], - ) - yaml.safe_dump(conf, conf_fp, default_flow_style=False) - teuthology.write_file( - remote=remote, - path='{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=teuthology.get_testdir(ctx), client=client), - data=conf_fp.getvalue(), - ) - yield - - -@contextlib.contextmanager -def run_tests(ctx, config): - """ - Run the s3readwrite tests after everything is set up. - - :param ctx: Context passed to task - :param config: specific configuration information - """ - assert isinstance(config, dict) - testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() - conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client)) - args = [ - '{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir), - ] - if client_config is not None and 'extra_args' in client_config: - args.extend(client_config['extra_args']) - - ctx.cluster.only(client).run( - args=args, - stdin=conf, - ) - yield - - -@contextlib.contextmanager -def task(ctx, config): - """ - Run the s3tests-test-readwrite suite against rgw. - - To run all tests on all clients:: - - tasks: - - ceph: - - rgw: - - s3readwrite: - - To restrict testing to particular clients:: - - tasks: - - ceph: - - rgw: [client.0] - - s3readwrite: [client.0] - - To run against a server on client.1:: - - tasks: - - ceph: - - rgw: [client.1] - - s3readwrite: - client.0: - rgw_server: client.1 - - To pass extra test arguments - - tasks: - - ceph: - - rgw: [client.0] - - s3readwrite: - client.0: - readwrite: - bucket: mybucket - readers: 10 - writers: 3 - duration: 600 - files: - num: 10 - size: 2000 - stddev: 500 - client.1: - ... - - To override s3 configuration - - tasks: - - ceph: - - rgw: [client.0] - - s3readwrite: - client.0: - s3: - user_id: myuserid - display_name: myname - email: my@email - access_key: myaccesskey - secret_key: mysecretkey - - """ - assert config is None or isinstance(config, list) \ - or isinstance(config, dict), \ - "task s3tests only supports a list or dictionary for configuration" - all_clients = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - if config is None: - config = all_clients - if isinstance(config, list): - config = dict.fromkeys(config) - clients = config.keys() - - overrides = ctx.config.get('overrides', {}) - # merge each client section, not the top level. - for client in config.iterkeys(): - if not config[client]: - config[client] = {} - teuthology.deep_merge(config[client], overrides.get('s3readwrite', {})) - - log.debug('in s3readwrite, config is %s', config) - - s3tests_conf = {} - for client in clients: - if config[client] is None: - config[client] = {} - config[client].setdefault('s3', {}) - config[client].setdefault('readwrite', {}) - - s3tests_conf[client] = ({ - 'DEFAULT': - { - 'port' : 7280, - 'is_secure' : False, - }, - 'readwrite' : config[client]['readwrite'], - 's3' : config[client]['s3'], - }) - - with contextutil.nested( - lambda: download(ctx=ctx, config=config), - lambda: create_users(ctx=ctx, config=dict( - clients=clients, - s3tests_conf=s3tests_conf, - )), - lambda: configure(ctx=ctx, config=dict( - clients=config, - s3tests_conf=s3tests_conf, - )), - lambda: run_tests(ctx=ctx, config=config), - ): - pass - yield diff --git a/tasks/s3roundtrip.py b/tasks/s3roundtrip.py deleted file mode 100644 index 4c17144dbae..00000000000 --- a/tasks/s3roundtrip.py +++ /dev/null @@ -1,302 +0,0 @@ -""" -Run rgw roundtrip message tests -""" -from cStringIO import StringIO -import base64 -import contextlib -import logging -import os -import random -import string -import yaml - -from teuthology import misc as teuthology -from teuthology import contextutil -from teuthology.config import config as teuth_config -from teuthology.orchestra import run -from teuthology.orchestra.connection import split_user - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def download(ctx, config): - """ - Download the s3 tests from the git builder. - Remove downloaded s3 file upon exit. - - The context passed in should be identical to the context - passed in to the main task. - """ - assert isinstance(config, list) - log.info('Downloading s3-tests...') - testdir = teuthology.get_testdir(ctx) - for client in config: - ctx.cluster.only(client).run( - args=[ - 'git', 'clone', - teuth_config.ceph_git_base_url + 's3-tests.git', - '{tdir}/s3-tests'.format(tdir=testdir), - ], - ) - try: - yield - finally: - log.info('Removing s3-tests...') - for client in config: - ctx.cluster.only(client).run( - args=[ - 'rm', - '-rf', - '{tdir}/s3-tests'.format(tdir=testdir), - ], - ) - -def _config_user(s3tests_conf, section, user): - """ - Configure users for this section by stashing away keys, ids, and - email addresses. - """ - s3tests_conf[section].setdefault('user_id', user) - s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) - s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) - s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20))) - s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40))) - -@contextlib.contextmanager -def create_users(ctx, config): - """ - Create a default s3 user. - """ - assert isinstance(config, dict) - log.info('Creating rgw users...') - testdir = teuthology.get_testdir(ctx) - users = {'s3': 'foo'} - for client in config['clients']: - s3tests_conf = config['s3tests_conf'][client] - s3tests_conf.setdefault('roundtrip', {}) - s3tests_conf['roundtrip'].setdefault('bucket', 'rttest-' + client + '-{random}-') - s3tests_conf['roundtrip'].setdefault('readers', 10) - s3tests_conf['roundtrip'].setdefault('writers', 3) - s3tests_conf['roundtrip'].setdefault('duration', 300) - s3tests_conf['roundtrip'].setdefault('files', {}) - rtconf = s3tests_conf['roundtrip'] - rtconf['files'].setdefault('num', 10) - rtconf['files'].setdefault('size', 2000) - rtconf['files'].setdefault('stddev', 500) - for section, user in [('s3', 'foo')]: - _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) - ctx.cluster.only(client).run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'radosgw-admin', - '-n', client, - 'user', 'create', - '--uid', s3tests_conf[section]['user_id'], - '--display-name', s3tests_conf[section]['display_name'], - '--access-key', s3tests_conf[section]['access_key'], - '--secret', s3tests_conf[section]['secret_key'], - '--email', s3tests_conf[section]['email'], - ], - ) - try: - yield - finally: - for client in config['clients']: - for user in users.itervalues(): - uid = '{user}.{client}'.format(user=user, client=client) - ctx.cluster.only(client).run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'radosgw-admin', - '-n', client, - 'user', 'rm', - '--uid', uid, - '--purge-data', - ], - ) - -@contextlib.contextmanager -def configure(ctx, config): - """ - Configure the s3-tests. This includes the running of the - bootstrap code and the updating of local conf files. - """ - assert isinstance(config, dict) - log.info('Configuring s3-roundtrip-tests...') - testdir = teuthology.get_testdir(ctx) - for client, properties in config['clients'].iteritems(): - s3tests_conf = config['s3tests_conf'][client] - if properties is not None and 'rgw_server' in properties: - host = None - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): - log.info('roles: ' + str(roles)) - log.info('target: ' + str(target)) - if properties['rgw_server'] in roles: - _, host = split_user(target) - assert host is not None, "Invalid client specified as the rgw_server" - s3tests_conf['s3']['host'] = host - else: - s3tests_conf['s3']['host'] = 'localhost' - - def_conf = s3tests_conf['DEFAULT'] - s3tests_conf['s3'].setdefault('port', def_conf['port']) - s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure']) - - (remote,) = ctx.cluster.only(client).remotes.keys() - remote.run( - args=[ - 'cd', - '{tdir}/s3-tests'.format(tdir=testdir), - run.Raw('&&'), - './bootstrap', - ], - ) - conf_fp = StringIO() - conf = dict( - s3=s3tests_conf['s3'], - roundtrip=s3tests_conf['roundtrip'], - ) - yaml.safe_dump(conf, conf_fp, default_flow_style=False) - teuthology.write_file( - remote=remote, - path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client), - data=conf_fp.getvalue(), - ) - yield - - -@contextlib.contextmanager -def run_tests(ctx, config): - """ - Run the s3 roundtrip after everything is set up. - - :param ctx: Context passed to task - :param config: specific configuration information - """ - assert isinstance(config, dict) - testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() - conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client)) - args = [ - '{tdir}/s3-tests/virtualenv/bin/s3tests-test-roundtrip'.format(tdir=testdir), - ] - if client_config is not None and 'extra_args' in client_config: - args.extend(client_config['extra_args']) - - ctx.cluster.only(client).run( - args=args, - stdin=conf, - ) - yield - - -@contextlib.contextmanager -def task(ctx, config): - """ - Run the s3tests-test-roundtrip suite against rgw. - - To run all tests on all clients:: - - tasks: - - ceph: - - rgw: - - s3roundtrip: - - To restrict testing to particular clients:: - - tasks: - - ceph: - - rgw: [client.0] - - s3roundtrip: [client.0] - - To run against a server on client.1:: - - tasks: - - ceph: - - rgw: [client.1] - - s3roundtrip: - client.0: - rgw_server: client.1 - - To pass extra test arguments - - tasks: - - ceph: - - rgw: [client.0] - - s3roundtrip: - client.0: - roundtrip: - bucket: mybucket - readers: 10 - writers: 3 - duration: 600 - files: - num: 10 - size: 2000 - stddev: 500 - client.1: - ... - - To override s3 configuration - - tasks: - - ceph: - - rgw: [client.0] - - s3roundtrip: - client.0: - s3: - user_id: myuserid - display_name: myname - email: my@email - access_key: myaccesskey - secret_key: mysecretkey - - """ - assert config is None or isinstance(config, list) \ - or isinstance(config, dict), \ - "task s3tests only supports a list or dictionary for configuration" - all_clients = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - if config is None: - config = all_clients - if isinstance(config, list): - config = dict.fromkeys(config) - clients = config.keys() - - s3tests_conf = {} - for client in clients: - if config[client] is None: - config[client] = {} - config[client].setdefault('s3', {}) - config[client].setdefault('roundtrip', {}) - - s3tests_conf[client] = ({ - 'DEFAULT': - { - 'port' : 7280, - 'is_secure' : False, - }, - 'roundtrip' : config[client]['roundtrip'], - 's3' : config[client]['s3'], - }) - - with contextutil.nested( - lambda: download(ctx=ctx, config=clients), - lambda: create_users(ctx=ctx, config=dict( - clients=clients, - s3tests_conf=s3tests_conf, - )), - lambda: configure(ctx=ctx, config=dict( - clients=config, - s3tests_conf=s3tests_conf, - )), - lambda: run_tests(ctx=ctx, config=config), - ): - pass - yield diff --git a/tasks/s3tests.py b/tasks/s3tests.py deleted file mode 100644 index d0f6431dd5f..00000000000 --- a/tasks/s3tests.py +++ /dev/null @@ -1,442 +0,0 @@ -""" -Run a set of s3 tests on rgw. -""" -from cStringIO import StringIO -from configobj import ConfigObj -import base64 -import contextlib -import logging -import os -import random -import string - -import util.rgw as rgw_utils - -from teuthology import misc as teuthology -from teuthology import contextutil -from teuthology.config import config as teuth_config -from teuthology.orchestra import run -from teuthology.orchestra.connection import split_user - -log = logging.getLogger(__name__) - -def extract_sync_client_data(ctx, client_name): - """ - Extract synchronized client rgw zone and rgw region information. - - :param ctx: Context passed to the s3tests task - :param name: Name of client that we are synching with - """ - return_region_name = None - return_dict = None - client = ctx.ceph.conf.get(client_name, None) - if client: - current_client_zone = client.get('rgw zone', None) - if current_client_zone: - (endpoint_host, endpoint_port) = ctx.rgw.role_endpoints.get(client_name, (None, None)) - # pull out the radosgw_agent stuff - regions = ctx.rgw.regions - for region in regions: - log.debug('jbuck, region is {region}'.format(region=region)) - region_data = ctx.rgw.regions[region] - log.debug('region data is {region}'.format(region=region_data)) - zones = region_data['zones'] - for zone in zones: - if current_client_zone in zone: - return_region_name = region - return_dict = dict() - return_dict['api_name'] = region_data['api name'] - return_dict['is_master'] = region_data['is master'] - return_dict['port'] = endpoint_port - return_dict['host'] = endpoint_host - - # The s3tests expect the sync_agent_[addr|port} to be - # set on the non-master node for some reason - if not region_data['is master']: - (rgwagent_host, rgwagent_port) = ctx.radosgw_agent.endpoint - (return_dict['sync_agent_addr'], _) = ctx.rgw.role_endpoints[rgwagent_host] - return_dict['sync_agent_port'] = rgwagent_port - - else: #if client_zone: - log.debug('No zone info for {host}'.format(host=client_name)) - else: # if client - log.debug('No ceph conf for {host}'.format(host=client_name)) - - return return_region_name, return_dict - -def update_conf_with_region_info(ctx, config, s3tests_conf): - """ - Scan for a client (passed in s3tests_conf) that is an s3agent - with which we can sync. Update information in local conf file - if such a client is found. - """ - for key in s3tests_conf.keys(): - # we'll assume that there's only one sync relationship (source / destination) with client.X - # as the key for now - - # Iterate through all of the radosgw_agent (rgwa) configs and see if a - # given client is involved in a relationship. - # If a given client isn't, skip it - this_client_in_rgwa_config = False - for rgwa in ctx.radosgw_agent.config.keys(): - rgwa_data = ctx.radosgw_agent.config[rgwa] - - if key in rgwa_data['src'] or key in rgwa_data['dest']: - this_client_in_rgwa_config = True - log.debug('{client} is in an radosgw-agent sync relationship'.format(client=key)) - radosgw_sync_data = ctx.radosgw_agent.config[key] - break - if not this_client_in_rgwa_config: - log.debug('{client} is NOT in an radosgw-agent sync relationship'.format(client=key)) - continue - - source_client = radosgw_sync_data['src'] - dest_client = radosgw_sync_data['dest'] - - # #xtract the pertinent info for the source side - source_region_name, source_region_dict = extract_sync_client_data(ctx, source_client) - log.debug('\t{key} source_region {source_region} source_dict {source_dict}'.format - (key=key,source_region=source_region_name,source_dict=source_region_dict)) - - # The source *should* be the master region, but test anyway and then set it as the default region - if source_region_dict['is_master']: - log.debug('Setting {region} as default_region'.format(region=source_region_name)) - s3tests_conf[key]['fixtures'].setdefault('default_region', source_region_name) - - # Extract the pertinent info for the destination side - dest_region_name, dest_region_dict = extract_sync_client_data(ctx, dest_client) - log.debug('\t{key} dest_region {dest_region} dest_dict {dest_dict}'.format - (key=key,dest_region=dest_region_name,dest_dict=dest_region_dict)) - - # now add these regions to the s3tests_conf object - s3tests_conf[key]['region {region_name}'.format(region_name=source_region_name)] = source_region_dict - s3tests_conf[key]['region {region_name}'.format(region_name=dest_region_name)] = dest_region_dict - -@contextlib.contextmanager -def download(ctx, config): - """ - Download the s3 tests from the git builder. - Remove downloaded s3 file upon exit. - - The context passed in should be identical to the context - passed in to the main task. - """ - assert isinstance(config, dict) - log.info('Downloading s3-tests...') - testdir = teuthology.get_testdir(ctx) - for (client, cconf) in config.items(): - branch = cconf.get('force-branch', None) - if not branch: - ceph_branch = ctx.config.get('branch') - suite_branch = ctx.config.get('suite_branch', ceph_branch) - branch = cconf.get('branch', suite_branch) - if not branch: - raise ValueError( - "Could not determine what branch to use for s3tests!") - else: - log.info("Using branch '%s' for s3tests", branch) - sha1 = cconf.get('sha1') - ctx.cluster.only(client).run( - args=[ - 'git', 'clone', - '-b', branch, - teuth_config.ceph_git_base_url + 's3-tests.git', - '{tdir}/s3-tests'.format(tdir=testdir), - ], - ) - if sha1 is not None: - ctx.cluster.only(client).run( - args=[ - 'cd', '{tdir}/s3-tests'.format(tdir=testdir), - run.Raw('&&'), - 'git', 'reset', '--hard', sha1, - ], - ) - try: - yield - finally: - log.info('Removing s3-tests...') - testdir = teuthology.get_testdir(ctx) - for client in config: - ctx.cluster.only(client).run( - args=[ - 'rm', - '-rf', - '{tdir}/s3-tests'.format(tdir=testdir), - ], - ) - - -def _config_user(s3tests_conf, section, user): - """ - Configure users for this section by stashing away keys, ids, and - email addresses. - """ - s3tests_conf[section].setdefault('user_id', user) - s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) - s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) - s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20))) - s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40))) - - -@contextlib.contextmanager -def create_users(ctx, config): - """ - Create a main and an alternate s3 user. - """ - assert isinstance(config, dict) - log.info('Creating rgw users...') - testdir = teuthology.get_testdir(ctx) - users = {'s3 main': 'foo', 's3 alt': 'bar'} - for client in config['clients']: - s3tests_conf = config['s3tests_conf'][client] - s3tests_conf.setdefault('fixtures', {}) - s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-') - for section, user in users.iteritems(): - _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) - log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client)) - ctx.cluster.only(client).run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'radosgw-admin', - '-n', client, - 'user', 'create', - '--uid', s3tests_conf[section]['user_id'], - '--display-name', s3tests_conf[section]['display_name'], - '--access-key', s3tests_conf[section]['access_key'], - '--secret', s3tests_conf[section]['secret_key'], - '--email', s3tests_conf[section]['email'], - ], - ) - try: - yield - finally: - for client in config['clients']: - for user in users.itervalues(): - uid = '{user}.{client}'.format(user=user, client=client) - ctx.cluster.only(client).run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'radosgw-admin', - '-n', client, - 'user', 'rm', - '--uid', uid, - '--purge-data', - ], - ) - - -@contextlib.contextmanager -def configure(ctx, config): - """ - Configure the s3-tests. This includes the running of the - bootstrap code and the updating of local conf files. - """ - assert isinstance(config, dict) - log.info('Configuring s3-tests...') - testdir = teuthology.get_testdir(ctx) - for client, properties in config['clients'].iteritems(): - s3tests_conf = config['s3tests_conf'][client] - if properties is not None and 'rgw_server' in properties: - host = None - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): - log.info('roles: ' + str(roles)) - log.info('target: ' + str(target)) - if properties['rgw_server'] in roles: - _, host = split_user(target) - assert host is not None, "Invalid client specified as the rgw_server" - s3tests_conf['DEFAULT']['host'] = host - else: - s3tests_conf['DEFAULT']['host'] = 'localhost' - - if properties is not None and 'slow_backend' in properties: - s3tests_conf['fixtures']['slow backend'] = properties['slow_backend'] - - (remote,) = ctx.cluster.only(client).remotes.keys() - remote.run( - args=[ - 'cd', - '{tdir}/s3-tests'.format(tdir=testdir), - run.Raw('&&'), - './bootstrap', - ], - ) - conf_fp = StringIO() - s3tests_conf.write(conf_fp) - teuthology.write_file( - remote=remote, - path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), - data=conf_fp.getvalue(), - ) - - log.info('Configuring boto...') - boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template') - for client, properties in config['clients'].iteritems(): - with file(boto_src, 'rb') as f: - (remote,) = ctx.cluster.only(client).remotes.keys() - conf = f.read().format( - idle_timeout=config.get('idle_timeout', 30) - ) - teuthology.write_file( - remote=remote, - path='{tdir}/boto.cfg'.format(tdir=testdir), - data=conf, - ) - - try: - yield - - finally: - log.info('Cleaning up boto...') - for client, properties in config['clients'].iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() - remote.run( - args=[ - 'rm', - '{tdir}/boto.cfg'.format(tdir=testdir), - ], - ) - -@contextlib.contextmanager -def sync_users(ctx, config): - """ - Sync this user. - """ - assert isinstance(config, dict) - # do a full sync if this is a multi-region test - if rgw_utils.multi_region_enabled(ctx): - log.debug('Doing a full sync') - rgw_utils.radosgw_agent_sync_all(ctx) - else: - log.debug('Not a multi-region config; skipping the metadata sync') - - yield - -@contextlib.contextmanager -def run_tests(ctx, config): - """ - Run the s3tests after everything is set up. - - :param ctx: Context passed to task - :param config: specific configuration information - """ - assert isinstance(config, dict) - testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): - args = [ - 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), - 'BOTO_CONFIG={tdir}/boto.cfg'.format(tdir=testdir), - '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir), - '-w', - '{tdir}/s3-tests'.format(tdir=testdir), - '-v', - '-a', '!fails_on_rgw', - ] - if client_config is not None and 'extra_args' in client_config: - args.extend(client_config['extra_args']) - - ctx.cluster.only(client).run( - args=args, - label="s3 tests against rgw" - ) - yield - -@contextlib.contextmanager -def task(ctx, config): - """ - Run the s3-tests suite against rgw. - - To run all tests on all clients:: - - tasks: - - ceph: - - rgw: - - s3tests: - - To restrict testing to particular clients:: - - tasks: - - ceph: - - rgw: [client.0] - - s3tests: [client.0] - - To run against a server on client.1 and increase the boto timeout to 10m:: - - tasks: - - ceph: - - rgw: [client.1] - - s3tests: - client.0: - rgw_server: client.1 - idle_timeout: 600 - - To pass extra arguments to nose (e.g. to run a certain test):: - - tasks: - - ceph: - - rgw: [client.0] - - s3tests: - client.0: - extra_args: ['test_s3:test_object_acl_grand_public_read'] - client.1: - extra_args: ['--exclude', 'test_100_continue'] - """ - assert config is None or isinstance(config, list) \ - or isinstance(config, dict), \ - "task s3tests only supports a list or dictionary for configuration" - all_clients = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - if config is None: - config = all_clients - if isinstance(config, list): - config = dict.fromkeys(config) - clients = config.keys() - - overrides = ctx.config.get('overrides', {}) - # merge each client section, not the top level. - for client in config.iterkeys(): - if not config[client]: - config[client] = {} - teuthology.deep_merge(config[client], overrides.get('s3tests', {})) - - log.debug('s3tests config is %s', config) - - s3tests_conf = {} - for client in clients: - s3tests_conf[client] = ConfigObj( - indent_type='', - infile={ - 'DEFAULT': - { - 'port' : 7280, - 'is_secure' : 'no', - }, - 'fixtures' : {}, - 's3 main' : {}, - 's3 alt' : {}, - } - ) - - # Only attempt to add in the region info if there's a radosgw_agent configured - if hasattr(ctx, 'radosgw_agent'): - update_conf_with_region_info(ctx, config, s3tests_conf) - - with contextutil.nested( - lambda: download(ctx=ctx, config=config), - lambda: create_users(ctx=ctx, config=dict( - clients=clients, - s3tests_conf=s3tests_conf, - )), - lambda: sync_users(ctx=ctx, config=config), - lambda: configure(ctx=ctx, config=dict( - clients=config, - s3tests_conf=s3tests_conf, - )), - lambda: run_tests(ctx=ctx, config=config), - ): - pass - yield diff --git a/tasks/samba.py b/tasks/samba.py deleted file mode 100644 index d79bb2a05e9..00000000000 --- a/tasks/samba.py +++ /dev/null @@ -1,243 +0,0 @@ -""" -Samba -""" -import contextlib -import logging -import sys - -from teuthology import misc as teuthology -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - -def get_sambas(ctx, roles): - """ - Scan for roles that are samba. Yield the id of the the samba role - (samba.0, samba.1...) and the associated remote site - - :param ctx: Context - :param roles: roles for this test (extracted from yaml files) - """ - for role in roles: - assert isinstance(role, basestring) - PREFIX = 'samba.' - assert role.startswith(PREFIX) - id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - yield (id_, remote) - -@contextlib.contextmanager -def task(ctx, config): - """ - Setup samba smbd with ceph vfs module. This task assumes the samba - package has already been installed via the install task. - - The config is optional and defaults to starting samba on all nodes. - If a config is given, it is expected to be a list of - samba nodes to start smbd servers on. - - Example that starts smbd on all samba nodes:: - - tasks: - - install: - - install: - project: samba - extra_packages: ['samba'] - - ceph: - - samba: - - interactive: - - Example that starts smbd on just one of the samba nodes and cifs on the other:: - - tasks: - - samba: [samba.0] - - cifs: [samba.1] - - An optional backend can be specified, and requires a path which smbd will - use as the backend storage location: - - roles: - - [osd.0, osd.1, osd.2, mon.0, mon.1, mon.2, mds.a] - - [client.0, samba.0] - - tasks: - - ceph: - - ceph-fuse: [client.0] - - samba: - samba.0: - cephfuse: "{testdir}/mnt.0" - - This mounts ceph to {testdir}/mnt.0 using fuse, and starts smbd with - a UNC of //localhost/cephfuse. Access through that UNC will be on - the ceph fuse mount point. - - If no arguments are specified in the samba - role, the default behavior is to enable the ceph UNC //localhost/ceph - and use the ceph vfs module as the smbd backend. - - :param ctx: Context - :param config: Configuration - """ - log.info("Setting up smbd with ceph vfs...") - assert config is None or isinstance(config, list) or isinstance(config, dict), \ - "task samba got invalid config" - - if config is None: - config = dict(('samba.{id}'.format(id=id_), None) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')) - elif isinstance(config, list): - config = dict((name, None) for name in config) - - samba_servers = list(get_sambas(ctx=ctx, roles=config.keys())) - - testdir = teuthology.get_testdir(ctx) - - from tasks.ceph import DaemonGroup - if not hasattr(ctx, 'daemons'): - ctx.daemons = DaemonGroup() - - for id_, remote in samba_servers: - - rolestr = "samba.{id_}".format(id_=id_) - - confextras = """vfs objects = ceph - ceph:config_file = /etc/ceph/ceph.conf""" - - unc = "ceph" - backend = "/" - - if config[rolestr] is not None: - # verify that there's just one parameter in role - if len(config[rolestr]) != 1: - log.error("samba config for role samba.{id_} must have only one parameter".format(id_=id_)) - raise Exception('invalid config') - confextras = "" - (unc, backendstr) = config[rolestr].items()[0] - backend = backendstr.format(testdir=testdir) - - # on first samba role, set ownership and permissions of ceph root - # so that samba tests succeed - if config[rolestr] is None and id_ == samba_servers[0][0]: - remote.run( - args=[ - 'mkdir', '-p', '/tmp/cmnt', run.Raw('&&'), - 'sudo', 'ceph-fuse', '/tmp/cmnt', run.Raw('&&'), - 'sudo', 'chown', 'ubuntu:ubuntu', '/tmp/cmnt/', run.Raw('&&'), - 'sudo', 'chmod', '1777', '/tmp/cmnt/', run.Raw('&&'), - 'sudo', 'umount', '/tmp/cmnt/', run.Raw('&&'), - 'rm', '-rf', '/tmp/cmnt', - ], - ) - else: - remote.run( - args=[ - 'sudo', 'chown', 'ubuntu:ubuntu', backend, run.Raw('&&'), - 'sudo', 'chmod', '1777', backend, - ], - ) - - teuthology.sudo_write_file(remote, "/usr/local/samba/etc/smb.conf", """ -[global] - workgroup = WORKGROUP - netbios name = DOMAIN - -[{unc}] - path = {backend} - {extras} - writeable = yes - valid users = ubuntu -""".format(extras=confextras, unc=unc, backend=backend)) - - # create ubuntu user - remote.run( - args=[ - 'sudo', '/usr/local/samba/bin/smbpasswd', '-e', 'ubuntu', - run.Raw('||'), - 'printf', run.Raw('"ubuntu\nubuntu\n"'), - run.Raw('|'), - 'sudo', '/usr/local/samba/bin/smbpasswd', '-s', '-a', 'ubuntu' - ]) - - smbd_cmd = [ - 'sudo', - 'daemon-helper', - 'term', - 'nostdin', - '/usr/local/samba/sbin/smbd', - '-F', - ] - ctx.daemons.add_daemon(remote, 'smbd', id_, - args=smbd_cmd, - logger=log.getChild("smbd.{id_}".format(id_=id_)), - stdin=run.PIPE, - wait=False, - ) - - # let smbd initialize, probably a better way... - import time - seconds_to_sleep = 100 - log.info('Sleeping for %s seconds...' % seconds_to_sleep) - time.sleep(seconds_to_sleep) - log.info('Sleeping stopped...') - - try: - yield - finally: - log.info('Stopping smbd processes...') - exc_info = (None, None, None) - for d in ctx.daemons.iter_daemons_of_role('smbd'): - try: - d.stop() - except (run.CommandFailedError, - run.CommandCrashedError, - run.ConnectionLostError): - exc_info = sys.exc_info() - log.exception('Saw exception from %s.%s', d.role, d.id_) - if exc_info != (None, None, None): - raise exc_info[0], exc_info[1], exc_info[2] - - for id_, remote in samba_servers: - remote.run( - args=[ - 'sudo', - 'rm', '-rf', - '/usr/local/samba/etc/smb.conf', - '/usr/local/samba/private/*', - '/usr/local/samba/var/run/', - '/usr/local/samba/var/locks', - '/usr/local/samba/var/lock', - ], - ) - # make sure daemons are gone - try: - remote.run( - args=[ - 'while', - 'sudo', 'killall', '-9', 'smbd', - run.Raw(';'), - 'do', 'sleep', '1', - run.Raw(';'), - 'done', - ], - ) - - remote.run( - args=[ - 'sudo', - 'lsof', - backend, - ], - check_status=False - ) - remote.run( - args=[ - 'sudo', - 'fuser', - '-M', - backend, - ], - check_status=False - ) - except Exception: - log.exception("Saw exception") - pass diff --git a/tasks/scrub.py b/tasks/scrub.py deleted file mode 100644 index 7a25300a677..00000000000 --- a/tasks/scrub.py +++ /dev/null @@ -1,117 +0,0 @@ -""" -Scrub osds -""" -import contextlib -import gevent -import logging -import random -import time - -import ceph_manager -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - Run scrub periodically. Randomly chooses an OSD to scrub. - - The config should be as follows: - - scrub: - frequency: - deep: - - example: - - tasks: - - ceph: - - scrub: - frequency: 30 - deep: 0 - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'scrub task only accepts a dict for configuration' - - log.info('Beginning scrub...') - - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') - while len(manager.get_osd_status()['up']) < num_osds: - manager.sleep(10) - - scrub_proc = Scrubber( - manager, - config, - ) - try: - yield - finally: - log.info('joining scrub') - scrub_proc.do_join() - -class Scrubber: - """ - Scrubbing is actually performed during initialzation - """ - def __init__(self, manager, config): - """ - Spawn scrubbing thread upon completion. - """ - self.ceph_manager = manager - self.ceph_manager.wait_for_clean() - - osd_status = self.ceph_manager.get_osd_status() - self.osds = osd_status['up'] - - self.config = config - if self.config is None: - self.config = dict() - - else: - def tmp(x): - """Local display""" - print x - self.log = tmp - - self.stopping = False - - log.info("spawning thread") - - self.thread = gevent.spawn(self.do_scrub) - - def do_join(self): - """Scrubbing thread finished""" - self.stopping = True - self.thread.get() - - def do_scrub(self): - """Perform the scrub operation""" - frequency = self.config.get("frequency", 30) - deep = self.config.get("deep", 0) - - log.info("stopping %s" % self.stopping) - - while not self.stopping: - osd = str(random.choice(self.osds)) - - if deep: - cmd = 'deep-scrub' - else: - cmd = 'scrub' - - log.info('%sbing %s' % (cmd, osd)) - self.ceph_manager.raw_cluster_cmd('osd', cmd, osd) - - time.sleep(frequency) diff --git a/tasks/scrub_test.py b/tasks/scrub_test.py deleted file mode 100644 index 3443ae9f45e..00000000000 --- a/tasks/scrub_test.py +++ /dev/null @@ -1,199 +0,0 @@ -"""Scrub testing""" -from cStringIO import StringIO - -import logging -import os -import time - -import ceph_manager -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -def task(ctx, config): - """ - Test [deep] scrub - - tasks: - - chef: - - install: - - ceph: - log-whitelist: - - '!= known digest' - - '!= known omap_digest' - - deep-scrub 0 missing, 1 inconsistent objects - - deep-scrub 1 errors - - repair 0 missing, 1 inconsistent objects - - repair 1 errors, 1 fixed - - scrub_test: - - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'scrub_test task only accepts a dict for configuration' - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - - num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') - log.info('num_osds is %s' % num_osds) - - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - logger=log.getChild('ceph_manager'), - ) - - while len(manager.get_osd_status()['up']) < num_osds: - time.sleep(10) - - for i in range(num_osds): - manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'flush_pg_stats') - manager.wait_for_clean() - - # write some data - p = manager.do_rados(mon, ['-p', 'rbd', 'bench', '--no-cleanup', '1', 'write', '-b', '4096']) - err = p.exitstatus - log.info('err is %d' % err) - - # wait for some PG to have data that we can mess with - victim = None - osd = None - while victim is None: - stats = manager.get_pg_stats() - for pg in stats: - size = pg['stat_sum']['num_bytes'] - if size > 0: - victim = pg['pgid'] - osd = pg['acting'][0] - break - - if victim is None: - time.sleep(3) - - log.info('messing with PG %s on osd %d' % (victim, osd)) - - (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys() - data_path = os.path.join( - '/var/lib/ceph/osd', - 'ceph-{id}'.format(id=osd), - 'current', - '{pg}_head'.format(pg=victim) - ) - - # fuzz time - ls_fp = StringIO() - osd_remote.run( - args=[ 'ls', data_path ], - stdout=ls_fp, - ) - ls_out = ls_fp.getvalue() - ls_fp.close() - - # find an object file we can mess with - osdfilename = None - for line in ls_out.split('\n'): - if 'object' in line: - osdfilename = line - break - assert osdfilename is not None - - # Get actual object name from osd stored filename - tmp=osdfilename.split('__') - objname=tmp[0] - objname=objname.replace('\u', '_') - log.info('fuzzing %s' % objname) - - # put a single \0 at the beginning of the file - osd_remote.run( - args=[ 'sudo', 'dd', - 'if=/dev/zero', - 'of=%s' % os.path.join(data_path, osdfilename), - 'bs=1', 'count=1', 'conv=notrunc' - ] - ) - - # scrub, verify inconsistent - manager.raw_cluster_cmd('pg', 'deep-scrub', victim) - # Give deep-scrub a chance to start - time.sleep(60) - - while True: - stats = manager.get_single_pg_stats(victim) - state = stats['state'] - - # wait for the scrub to finish - if 'scrubbing' in state: - time.sleep(3) - continue - - inconsistent = stats['state'].find('+inconsistent') != -1 - assert inconsistent - break - - - # repair, verify no longer inconsistent - manager.raw_cluster_cmd('pg', 'repair', victim) - # Give repair a chance to start - time.sleep(60) - - while True: - stats = manager.get_single_pg_stats(victim) - state = stats['state'] - - # wait for the scrub to finish - if 'scrubbing' in state: - time.sleep(3) - continue - - inconsistent = stats['state'].find('+inconsistent') != -1 - assert not inconsistent - break - - # Test deep-scrub with various omap modifications - manager.do_rados(mon, ['-p', 'rbd', 'setomapval', objname, 'key', 'val']) - manager.do_rados(mon, ['-p', 'rbd', 'setomapheader', objname, 'hdr']) - - # Modify omap on specific osd - log.info('fuzzing omap of %s' % objname) - manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'key']); - manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname, 'badkey', 'badval']); - manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'badhdr']); - - # scrub, verify inconsistent - manager.raw_cluster_cmd('pg', 'deep-scrub', victim) - # Give deep-scrub a chance to start - time.sleep(60) - - while True: - stats = manager.get_single_pg_stats(victim) - state = stats['state'] - - # wait for the scrub to finish - if 'scrubbing' in state: - time.sleep(3) - continue - - inconsistent = stats['state'].find('+inconsistent') != -1 - assert inconsistent - break - - # repair, verify no longer inconsistent - manager.raw_cluster_cmd('pg', 'repair', victim) - # Give repair a chance to start - time.sleep(60) - - while True: - stats = manager.get_single_pg_stats(victim) - state = stats['state'] - - # wait for the scrub to finish - if 'scrubbing' in state: - time.sleep(3) - continue - - inconsistent = stats['state'].find('+inconsistent') != -1 - assert not inconsistent - break - - log.info('test successful!') diff --git a/tasks/test/__init__.py b/tasks/test/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tasks/test/test_devstack.py b/tasks/test/test_devstack.py deleted file mode 100644 index 117b3076818..00000000000 --- a/tasks/test/test_devstack.py +++ /dev/null @@ -1,48 +0,0 @@ -from textwrap import dedent - -from .. import devstack - - -class TestDevstack(object): - def test_parse_os_table(self): - table_str = dedent(""" - +---------------------+--------------------------------------+ - | Property | Value | - +---------------------+--------------------------------------+ - | attachments | [] | - | availability_zone | nova | - | bootable | false | - | created_at | 2014-02-21T17:14:47.548361 | - | display_description | None | - | display_name | NAME | - | id | ffdbd1bb-60dc-4d95-acfe-88774c09ad3e | - | metadata | {} | - | size | 1 | - | snapshot_id | None | - | source_volid | None | - | status | creating | - | volume_type | None | - +---------------------+--------------------------------------+ - """).strip() - expected = { - 'Property': 'Value', - 'attachments': '[]', - 'availability_zone': 'nova', - 'bootable': 'false', - 'created_at': '2014-02-21T17:14:47.548361', - 'display_description': 'None', - 'display_name': 'NAME', - 'id': 'ffdbd1bb-60dc-4d95-acfe-88774c09ad3e', - 'metadata': '{}', - 'size': '1', - 'snapshot_id': 'None', - 'source_volid': 'None', - 'status': 'creating', - 'volume_type': 'None'} - - vol_info = devstack.parse_os_table(table_str) - assert vol_info == expected - - - - diff --git a/tasks/tgt.py b/tasks/tgt.py deleted file mode 100644 index c2b322e0829..00000000000 --- a/tasks/tgt.py +++ /dev/null @@ -1,177 +0,0 @@ -""" -Task to handle tgt - -Assumptions made: - The ceph-extras tgt package may need to get installed. - The open-iscsi package needs to get installed. -""" -import logging -import contextlib - -from teuthology import misc as teuthology -from teuthology import contextutil - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def start_tgt_remotes(ctx, start_tgtd): - """ - This subtask starts up a tgtd on the clients specified - """ - remotes = ctx.cluster.only(teuthology.is_type('client')).remotes - tgtd_list = [] - for rem, roles in remotes.iteritems(): - for _id in roles: - if _id in start_tgtd: - if not rem in tgtd_list: - tgtd_list.append(rem) - size = ctx.config.get('image_size', 10240) - rem.run( - args=[ - 'rbd', - 'create', - 'iscsi-image', - '--size', - str(size), - ]) - rem.run( - args=[ - 'sudo', - 'tgtadm', - '--lld', - 'iscsi', - '--mode', - 'target', - '--op', - 'new', - '--tid', - '1', - '--targetname', - 'rbd', - ]) - rem.run( - args=[ - 'sudo', - 'tgtadm', - '--lld', - 'iscsi', - '--mode', - 'logicalunit', - '--op', - 'new', - '--tid', - '1', - '--lun', - '1', - '--backing-store', - 'iscsi-image', - '--bstype', - 'rbd', - ]) - rem.run( - args=[ - 'sudo', - 'tgtadm', - '--lld', - 'iscsi', - '--op', - 'bind', - '--mode', - 'target', - '--tid', - '1', - '-I', - 'ALL', - ]) - try: - yield - - finally: - for rem in tgtd_list: - rem.run( - args=[ - 'sudo', - 'tgtadm', - '--lld', - 'iscsi', - '--mode', - 'target', - '--op', - 'delete', - '--force', - '--tid', - '1', - ]) - rem.run( - args=[ - 'rbd', - 'snap', - 'purge', - 'iscsi-image', - ]) - rem.run( - args=[ - 'sudo', - 'rbd', - 'rm', - 'iscsi-image', - ]) - - -@contextlib.contextmanager -def task(ctx, config): - """ - Start up tgt. - - To start on on all clients:: - - tasks: - - ceph: - - tgt: - - To start on certain clients:: - - tasks: - - ceph: - - tgt: [client.0, client.3] - - or - - tasks: - - ceph: - - tgt: - client.0: - client.3: - - An image blocksize size can also be specified:: - - tasks: - - ceph: - - tgt: - image_size = 20480 - - The general flow of things here is: - 1. Find clients on which tgt is supposed to run (start_tgtd) - 2. Remotely start up tgt daemon - On cleanup: - 3. Stop tgt daemon - - The iscsi administration is handled by the iscsi task. - """ - if config: - config = {key : val for key, val in config.items() - if key.startswith('client')} - # config at this point should only contain keys starting with 'client' - start_tgtd = [] - remotes = ctx.cluster.only(teuthology.is_type('client')).remotes - log.info(remotes) - if not config: - start_tgtd = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - else: - start_tgtd = config - log.info(start_tgtd) - with contextutil.nested( - lambda: start_tgt_remotes(ctx=ctx, start_tgtd=start_tgtd),): - yield diff --git a/tasks/thrashosds.py b/tasks/thrashosds.py deleted file mode 100644 index c20a6457c69..00000000000 --- a/tasks/thrashosds.py +++ /dev/null @@ -1,160 +0,0 @@ -""" -Thrash -- Simulate random osd failures. -""" -import contextlib -import logging -import ceph_manager -from teuthology import misc as teuthology - - -log = logging.getLogger(__name__) - -@contextlib.contextmanager -def task(ctx, config): - """ - "Thrash" the OSDs by randomly marking them out/down (and then back - in) until the task is ended. This loops, and every op_delay - seconds it randomly chooses to add or remove an OSD (even odds) - unless there are fewer than min_out OSDs out of the cluster, or - more than min_in OSDs in the cluster. - - All commands are run on mon0 and it stops when __exit__ is called. - - The config is optional, and is a dict containing some or all of: - - min_in: (default 3) the minimum number of OSDs to keep in the - cluster - - min_out: (default 0) the minimum number of OSDs to keep out of the - cluster - - op_delay: (5) the length of time to sleep between changing an - OSD's status - - min_dead: (0) minimum number of osds to leave down/dead. - - max_dead: (0) maximum number of osds to leave down/dead before waiting - for clean. This should probably be num_replicas - 1. - - clean_interval: (60) the approximate length of time to loop before - waiting until the cluster goes clean. (In reality this is used - to probabilistically choose when to wait, and the method used - makes it closer to -- but not identical to -- the half-life.) - - scrub_interval: (-1) the approximate length of time to loop before - waiting until a scrub is performed while cleaning. (In reality - this is used to probabilistically choose when to wait, and it - only applies to the cases where cleaning is being performed). - -1 is used to indicate that no scrubbing will be done. - - chance_down: (0.4) the probability that the thrasher will mark an - OSD down rather than marking it out. (The thrasher will not - consider that OSD out of the cluster, since presently an OSD - wrongly marked down will mark itself back up again.) This value - can be either an integer (eg, 75) or a float probability (eg - 0.75). - - chance_test_min_size: (0) chance to run test_pool_min_size, - which: - - kills all but one osd - - waits - - kills that osd - - revives all other osds - - verifies that the osds fully recover - - timeout: (360) the number of seconds to wait for the cluster - to become clean after each cluster change. If this doesn't - happen within the timeout, an exception will be raised. - - revive_timeout: (150) number of seconds to wait for an osd asok to - appear after attempting to revive the osd - - thrash_primary_affinity: (true) randomly adjust primary-affinity - - chance_pgnum_grow: (0) chance to increase a pool's size - chance_pgpnum_fix: (0) chance to adjust pgpnum to pg for a pool - pool_grow_by: (10) amount to increase pgnum by - max_pgs_per_pool_osd: (1200) don't expand pools past this size per osd - - pause_short: (3) duration of short pause - pause_long: (80) duration of long pause - pause_check_after: (50) assert osd down after this long - chance_inject_pause_short: (1) chance of injecting short stall - chance_inject_pause_long: (0) chance of injecting long stall - - clean_wait: (0) duration to wait before resuming thrashing once clean - - powercycle: (false) whether to power cycle the node instead - of just the osd process. Note that this assumes that a single - osd is the only important process on the node. - - chance_test_backfill_full: (0) chance to simulate full disks stopping - backfill - - chance_test_map_discontinuity: (0) chance to test map discontinuity - map_discontinuity_sleep_time: (40) time to wait for map trims - - ceph_objectstore_tool: (true) whether to export/import a pg while an osd is down - chance_move_pg: (1.0) chance of moving a pg if more than 1 osd is down (default 100%) - - example: - - tasks: - - ceph: - - thrashosds: - chance_down: 10 - op_delay: 3 - min_in: 1 - timeout: 600 - - interactive: - """ - if config is None: - config = {} - assert isinstance(config, dict), \ - 'thrashosds task only accepts a dict for configuration' - overrides = ctx.config.get('overrides', {}) - teuthology.deep_merge(config, overrides.get('thrashosds', {})) - - if 'powercycle' in config: - - # sync everyone first to avoid collateral damage to / etc. - log.info('Doing preliminary sync to avoid collateral damage...') - ctx.cluster.run(args=['sync']) - - if 'ipmi_user' in ctx.teuthology_config: - for remote in ctx.cluster.remotes.keys(): - log.debug('checking console status of %s' % remote.shortname) - if not remote.console.check_status(): - log.warn('Failed to get console status for %s', - remote.shortname) - - # check that all osd remotes have a valid console - osds = ctx.cluster.only(teuthology.is_type('osd')) - for remote in osds.remotes.keys(): - if not remote.console.has_ipmi_credentials: - raise Exception( - 'IPMI console required for powercycling, ' - 'but not available on osd role: {r}'.format( - r=remote.name)) - - log.info('Beginning thrashosds...') - first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() - manager = ceph_manager.CephManager( - mon, - ctx=ctx, - config=config, - logger=log.getChild('ceph_manager'), - ) - ctx.manager = manager - thrash_proc = ceph_manager.Thrasher( - manager, - config, - logger=log.getChild('thrasher') - ) - try: - yield - finally: - log.info('joining thrashosds') - thrash_proc.do_join() - manager.wait_for_recovery(config.get('timeout', 360)) diff --git a/tasks/userdata_setup.yaml b/tasks/userdata_setup.yaml deleted file mode 100644 index d39695bef0f..00000000000 --- a/tasks/userdata_setup.yaml +++ /dev/null @@ -1,25 +0,0 @@ -#cloud-config-archive - -- type: text/cloud-config - content: | - output: - all: '| tee -a /var/log/cloud-init-output.log' - -# allow passwordless access for debugging -- | - #!/bin/bash - exec passwd -d ubuntu - -- | - #!/bin/bash - - # mount a NFS share for storing logs - apt-get update - apt-get -y install nfs-common - mkdir /mnt/log - # 10.0.2.2 is the host - mount -v -t nfs -o proto=tcp 10.0.2.2:{mnt_dir} /mnt/log - - # mount the iso image that has the test script - mkdir /mnt/cdrom - mount -t auto /dev/cdrom /mnt/cdrom diff --git a/tasks/userdata_teardown.yaml b/tasks/userdata_teardown.yaml deleted file mode 100644 index 7f3d64ff742..00000000000 --- a/tasks/userdata_teardown.yaml +++ /dev/null @@ -1,11 +0,0 @@ -- | - #!/bin/bash - cp /var/log/cloud-init-output.log /mnt/log - -- | - #!/bin/bash - umount /mnt/log - -- | - #!/bin/bash - shutdown -h -P now diff --git a/tasks/util/__init__.py b/tasks/util/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tasks/util/kclient.py b/tasks/util/kclient.py deleted file mode 100644 index c6a259fc755..00000000000 --- a/tasks/util/kclient.py +++ /dev/null @@ -1,22 +0,0 @@ -from teuthology.misc import get_testdir -from teuthology.orchestra import run - - -def write_secret_file(ctx, remote, role, keyring, filename): - """ - Stash the kerying in the filename specified. - """ - testdir = get_testdir(ctx) - remote.run( - args=[ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'ceph-authtool', - '--name={role}'.format(role=role), - '--print-key', - keyring, - run.Raw('>'), - filename, - ], - ) diff --git a/tasks/util/rados.py b/tasks/util/rados.py deleted file mode 100644 index f6a806c95db..00000000000 --- a/tasks/util/rados.py +++ /dev/null @@ -1,50 +0,0 @@ -import logging - -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -def rados(ctx, remote, cmd, wait=True, check_status=False): - testdir = teuthology.get_testdir(ctx) - log.info("rados %s" % ' '.join(cmd)) - pre = [ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir), - 'rados', - ]; - pre.extend(cmd) - proc = remote.run( - args=pre, - check_status=check_status, - wait=wait, - ) - if wait: - return proc.exitstatus - else: - return proc - -def create_ec_pool(remote, name, profile_name, pgnum, m=1, k=2): - remote.run(args=[ - 'ceph', 'osd', 'erasure-code-profile', 'set', - profile_name, 'm=' + str(m), 'k=' + str(k), - 'ruleset-failure-domain=osd', - ]) - remote.run(args=[ - 'ceph', 'osd', 'pool', 'create', name, - str(pgnum), str(pgnum), 'erasure', profile_name, - ]) - -def create_replicated_pool(remote, name, pgnum): - remote.run(args=[ - 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), - ]) - -def create_cache_pool(remote, base_name, cache_name, pgnum, size): - remote.run(args=[ - 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum) - ]) - remote.run(args=[ - 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name, - str(size), - ]) diff --git a/tasks/util/rgw.py b/tasks/util/rgw.py deleted file mode 100644 index e5fba9f82f9..00000000000 --- a/tasks/util/rgw.py +++ /dev/null @@ -1,171 +0,0 @@ -from cStringIO import StringIO -import logging -import json -import requests -from urlparse import urlparse - -from teuthology.orchestra.connection import split_user -from teuthology import misc as teuthology - -log = logging.getLogger(__name__) - -# simple test to indicate if multi-region testing should occur -def multi_region_enabled(ctx): - # this is populated by the radosgw-agent task, seems reasonable to - # use that as an indicator that we're testing multi-region sync - return 'radosgw_agent' in ctx - -def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False): - log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd)) - testdir = teuthology.get_testdir(ctx) - pre = [ - 'adjust-ulimits', - 'ceph-coverage'.format(tdir=testdir), - '{tdir}/archive/coverage'.format(tdir=testdir), - 'radosgw-admin'.format(tdir=testdir), - '--log-to-stderr', - '--format', 'json', - '-n', client, - ] - pre.extend(cmd) - log.info('rgwadmin: cmd=%s' % pre) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() - proc = remote.run( - args=pre, - check_status=check_status, - stdout=StringIO(), - stderr=StringIO(), - stdin=stdin, - ) - r = proc.exitstatus - out = proc.stdout.getvalue() - j = None - if not r and out != '': - try: - j = json.loads(out) - log.info(' json result: %s' % j) - except ValueError: - j = out - log.info(' raw result: %s' % j) - return (r, j) - -def get_user_summary(out, user): - """Extract the summary for a given user""" - user_summary = None - for summary in out['summary']: - if summary.get('user') == user: - user_summary = summary - - if not user_summary: - raise AssertionError('No summary info found for user: %s' % user) - - return user_summary - -def get_user_successful_ops(out, user): - summary = out['summary'] - if len(summary) == 0: - return 0 - return get_user_summary(out, user)['total']['successful_ops'] - -def get_zone_host_and_port(ctx, client, zone): - _, region_map = rgwadmin(ctx, client, check_status=True, - cmd=['-n', client, 'region-map', 'get']) - regions = region_map['regions'] - for region in regions: - for zone_info in region['val']['zones']: - if zone_info['name'] == zone: - endpoint = urlparse(zone_info['endpoints'][0]) - host, port = endpoint.hostname, endpoint.port - if port is None: - port = 80 - return host, port - assert False, 'no endpoint for zone {zone} found'.format(zone=zone) - -def get_master_zone(ctx, client): - _, region_map = rgwadmin(ctx, client, check_status=True, - cmd=['-n', client, 'region-map', 'get']) - regions = region_map['regions'] - for region in regions: - is_master = (region['val']['is_master'] == "true") - log.info('region={r} is_master={ism}'.format(r=region, ism=is_master)) - if not is_master: - continue - master_zone = region['val']['master_zone'] - log.info('master_zone=%s' % master_zone) - for zone_info in region['val']['zones']: - if zone_info['name'] == master_zone: - return master_zone - log.info('couldn\'t find master zone') - return None - -def get_master_client(ctx, clients): - master_zone = get_master_zone(ctx, clients[0]) # can use any client for this as long as system configured correctly - if not master_zone: - return None - - for client in clients: - zone = zone_for_client(ctx, client) - if zone == master_zone: - return client - - return None - -def get_zone_system_keys(ctx, client, zone): - _, zone_info = rgwadmin(ctx, client, check_status=True, - cmd=['-n', client, - 'zone', 'get', '--rgw-zone', zone]) - system_key = zone_info['system_key'] - return system_key['access_key'], system_key['secret_key'] - -def zone_for_client(ctx, client): - ceph_config = ctx.ceph.conf.get('global', {}) - ceph_config.update(ctx.ceph.conf.get('client', {})) - ceph_config.update(ctx.ceph.conf.get(client, {})) - return ceph_config.get('rgw zone') - -def region_for_client(ctx, client): - ceph_config = ctx.ceph.conf.get('global', {}) - ceph_config.update(ctx.ceph.conf.get('client', {})) - ceph_config.update(ctx.ceph.conf.get(client, {})) - return ceph_config.get('rgw region') - -def radosgw_data_log_window(ctx, client): - ceph_config = ctx.ceph.conf.get('global', {}) - ceph_config.update(ctx.ceph.conf.get('client', {})) - ceph_config.update(ctx.ceph.conf.get(client, {})) - return ceph_config.get('rgw data log window', 30) - -def radosgw_agent_sync_data(ctx, agent_host, agent_port, full=False): - log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port)) - method = "full" if full else "incremental" - return requests.post('http://{addr}:{port}/data/{method}'.format(addr = agent_host, port = agent_port, method = method)) - -def radosgw_agent_sync_metadata(ctx, agent_host, agent_port, full=False): - log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port)) - method = "full" if full else "incremental" - return requests.post('http://{addr}:{port}/metadata/{method}'.format(addr = agent_host, port = agent_port, method = method)) - -def radosgw_agent_sync_all(ctx, full=False, data=False): - if ctx.radosgw_agent.procs: - for agent_client, c_config in ctx.radosgw_agent.config.iteritems(): - zone_for_client(ctx, agent_client) - sync_host, sync_port = get_sync_agent(ctx, agent_client) - log.debug('doing a sync via {host1}'.format(host1=sync_host)) - radosgw_agent_sync_metadata(ctx, sync_host, sync_port, full) - if (data): - radosgw_agent_sync_data(ctx, sync_host, sync_port, full) - -def host_for_role(ctx, role): - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): - if role in roles: - _, host = split_user(target) - return host - -def get_sync_agent(ctx, source): - for task in ctx.config['tasks']: - if 'radosgw-agent' not in task: - continue - for client, conf in task['radosgw-agent'].iteritems(): - if conf['src'] == source: - return host_for_role(ctx, source), conf.get('port', 8000) - return None, None diff --git a/tasks/watch_notify_stress.py b/tasks/watch_notify_stress.py deleted file mode 100644 index 6db313fea6d..00000000000 --- a/tasks/watch_notify_stress.py +++ /dev/null @@ -1,69 +0,0 @@ -""" -test_stress_watch task -""" -import contextlib -import logging -import proc_thrasher - -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - - -@contextlib.contextmanager -def task(ctx, config): - """ - Run test_stress_watch - - The config should be as follows: - - test_stress_watch: - clients: [client list] - - example: - - tasks: - - ceph: - - test_stress_watch: - clients: [client.0] - - interactive: - """ - log.info('Beginning test_stress_watch...') - assert isinstance(config, dict), \ - "please list clients to run on" - testwatch = {} - - remotes = [] - - for role in config.get('clients', ['client.0']): - assert isinstance(role, basestring) - PREFIX = 'client.' - assert role.startswith(PREFIX) - id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - remotes.append(remote) - - args =['CEPH_CLIENT_ID={id_}'.format(id_=id_), - 'CEPH_ARGS="{flags}"'.format(flags=config.get('flags', '')), - 'daemon-helper', - 'kill', - 'multi_stress_watch foo foo' - ] - - log.info("args are %s" % (args,)) - - proc = proc_thrasher.ProcThrasher({}, remote, - args=[run.Raw(i) for i in args], - logger=log.getChild('testwatch.{id}'.format(id=id_)), - stdin=run.PIPE, - wait=False - ) - proc.start() - testwatch[id_] = proc - - try: - yield - finally: - log.info('joining watch_notify_stress') - for i in testwatch.itervalues(): - i.join() diff --git a/tasks/workunit.py b/tasks/workunit.py deleted file mode 100644 index 548fc5898b8..00000000000 --- a/tasks/workunit.py +++ /dev/null @@ -1,374 +0,0 @@ -""" -Workunit task -- Run ceph on sets of specific clients -""" -import logging -import pipes -import os - -from teuthology import misc -from teuthology.orchestra.run import CommandFailedError -from teuthology.parallel import parallel -from teuthology.orchestra import run - -log = logging.getLogger(__name__) - -CLIENT_PREFIX = 'client.' - - -def task(ctx, config): - """ - Run ceph on all workunits found under the specified path. - - For example:: - - tasks: - - ceph: - - ceph-fuse: [client.0] - - workunit: - clients: - client.0: [direct_io, xattrs.sh] - client.1: [snaps] - branch: foo - - You can also run a list of workunits on all clients: - tasks: - - ceph: - - ceph-fuse: - - workunit: - tag: v0.47 - clients: - all: [direct_io, xattrs.sh, snaps] - - If you have an "all" section it will run all the workunits - on each client simultaneously, AFTER running any workunits specified - for individual clients. (This prevents unintended simultaneous runs.) - - To customize tests, you can specify environment variables as a dict. You - can also specify a time limit for each work unit (defaults to 3h): - - tasks: - - ceph: - - ceph-fuse: - - workunit: - sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6 - clients: - all: [snaps] - env: - FOO: bar - BAZ: quux - timeout: 3h - - :param ctx: Context - :param config: Configuration - """ - assert isinstance(config, dict) - assert isinstance(config.get('clients'), dict), \ - 'configuration must contain a dictionary of clients' - - overrides = ctx.config.get('overrides', {}) - misc.deep_merge(config, overrides.get('workunit', {})) - - refspec = config.get('branch') - if refspec is None: - refspec = config.get('sha1') - if refspec is None: - refspec = config.get('tag') - if refspec is None: - refspec = 'HEAD' - - timeout = config.get('timeout', '3h') - - log.info('Pulling workunits from ref %s', refspec) - - created_mountpoint = {} - - if config.get('env') is not None: - assert isinstance(config['env'], dict), 'env must be a dictionary' - clients = config['clients'] - - # Create scratch dirs for any non-all workunits - log.info('Making a separate scratch dir for every client...') - for role in clients.iterkeys(): - assert isinstance(role, basestring) - if role == "all": - continue - - assert role.startswith(CLIENT_PREFIX) - created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir')) - created_mountpoint[role] = created_mnt_dir - - # Execute any non-all workunits - with parallel() as p: - for role, tests in clients.iteritems(): - if role != "all": - p.spawn(_run_tests, ctx, refspec, role, tests, - config.get('env'), timeout=timeout) - - # Clean up dirs from any non-all workunits - for role, created in created_mountpoint.items(): - _delete_dir(ctx, role, created) - - # Execute any 'all' workunits - if 'all' in clients: - all_tasks = clients["all"] - _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'), - config.get('subdir'), timeout=timeout) - - -def _delete_dir(ctx, role, created_mountpoint): - """ - Delete file used by this role, and delete the directory that this - role appeared in. - - :param ctx: Context - :param role: "role.#" where # is used for the role id. - """ - testdir = misc.get_testdir(ctx) - id_ = role[len(CLIENT_PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - # Is there any reason why this is not: join(mnt, role) ? - client = os.path.join(mnt, 'client.{id}'.format(id=id_)) - - # Remove the directory inside the mount where the workunit ran - remote.run( - args=[ - 'sudo', - 'rm', - '-rf', - '--', - client, - ], - ) - log.info("Deleted dir {dir}".format(dir=client)) - - # If the mount was an artificially created dir, delete that too - if created_mountpoint: - remote.run( - args=[ - 'rmdir', - '--', - mnt, - ], - ) - log.info("Deleted artificial mount point {dir}".format(dir=client)) - - -def _make_scratch_dir(ctx, role, subdir): - """ - Make scratch directories for this role. This also makes the mount - point if that directory does not exist. - - :param ctx: Context - :param role: "role.#" where # is used for the role id. - :param subdir: use this subdir (False if not used) - """ - created_mountpoint = False - id_ = role[len(CLIENT_PREFIX):] - log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role)) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - dir_owner = remote.user - mnt = os.path.join(misc.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) - # if neither kclient nor ceph-fuse are required for a workunit, - # mnt may not exist. Stat and create the directory if it doesn't. - try: - remote.run( - args=[ - 'stat', - '--', - mnt, - ], - ) - log.info('Did not need to create dir {dir}'.format(dir=mnt)) - except CommandFailedError: - remote.run( - args=[ - 'mkdir', - '--', - mnt, - ], - ) - log.info('Created dir {dir}'.format(dir=mnt)) - created_mountpoint = True - - if not subdir: - subdir = 'client.{id}'.format(id=id_) - - if created_mountpoint: - remote.run( - args=[ - 'cd', - '--', - mnt, - run.Raw('&&'), - 'mkdir', - '--', - subdir, - ], - ) - else: - remote.run( - args=[ - # cd first so this will fail if the mount point does - # not exist; pure install -d will silently do the - # wrong thing - 'cd', - '--', - mnt, - run.Raw('&&'), - 'sudo', - 'install', - '-d', - '-m', '0755', - '--owner={user}'.format(user=dir_owner), - '--', - subdir, - ], - ) - - return created_mountpoint - - -def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None): - """ - Make a scratch directory for each client in the cluster, and then for each - test spawn _run_tests() for each role. - - See run_tests() for parameter documentation. - """ - client_generator = misc.all_roles_of_type(ctx.cluster, 'client') - client_remotes = list() - - created_mountpoint = {} - for client in client_generator: - (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys() - client_remotes.append((client_remote, 'client.{id}'.format(id=client))) - created_mountpoint[client] = _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir) - - for unit in tests: - with parallel() as p: - for remote, role in client_remotes: - p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir, - timeout=timeout) - - # cleanup the generated client directories - client_generator = misc.all_roles_of_type(ctx.cluster, 'client') - for client in client_generator: - _delete_dir(ctx, 'client.{id}'.format(id=client), created_mountpoint[client]) - - -def _run_tests(ctx, refspec, role, tests, env, subdir=None, timeout=None): - """ - Run the individual test. Create a scratch directory and then extract the - workunits from git. Make the executables, and then run the tests. - Clean up (remove files created) after the tests are finished. - - :param ctx: Context - :param refspec: branch, sha1, or version tag used to identify this - build - :param tests: specific tests specified. - :param env: environment set in yaml file. Could be None. - :param subdir: subdirectory set in yaml file. Could be None - :param timeout: If present, use the 'timeout' command on the remote host - to limit execution time. Must be specified by a number - followed by 's' for seconds, 'm' for minutes, 'h' for - hours, or 'd' for days. If '0' or anything that evaluates - to False is passed, the 'timeout' command is not used. - """ - testdir = misc.get_testdir(ctx) - assert isinstance(role, basestring) - assert role.startswith(CLIENT_PREFIX) - id_ = role[len(CLIENT_PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() - mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) - # subdir so we can remove and recreate this a lot without sudo - if subdir is None: - scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp') - else: - scratch_tmp = os.path.join(mnt, subdir) - srcdir = '{tdir}/workunit.{role}'.format(tdir=testdir, role=role) - - remote.run( - logger=log.getChild(role), - args=[ - 'mkdir', '--', srcdir, - run.Raw('&&'), - 'git', - 'archive', - '--remote=git://git.ceph.com/ceph.git', - '%s:qa/workunits' % refspec, - run.Raw('|'), - 'tar', - '-C', srcdir, - '-x', - '-f-', - run.Raw('&&'), - 'cd', '--', srcdir, - run.Raw('&&'), - 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi', - run.Raw('&&'), - 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir), - run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)), - ], - ) - - workunits = sorted(misc.get_file( - remote, - '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)).split('\0')) - assert workunits - - try: - assert isinstance(tests, list) - for spec in tests: - log.info('Running workunits matching %s on %s...', spec, role) - prefix = '{spec}/'.format(spec=spec) - to_run = [w for w in workunits if w == spec or w.startswith(prefix)] - if not to_run: - raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec)) - for workunit in to_run: - log.info('Running workunit %s...', workunit) - args = [ - 'mkdir', '-p', '--', scratch_tmp, - run.Raw('&&'), - 'cd', '--', scratch_tmp, - run.Raw('&&'), - run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'), - run.Raw('CEPH_REF={ref}'.format(ref=refspec)), - run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)), - run.Raw('CEPH_ID="{id}"'.format(id=id_)), - run.Raw('PATH=$PATH:/usr/sbin') - ] - if env is not None: - for var, val in env.iteritems(): - quoted_val = pipes.quote(val) - env_arg = '{var}={val}'.format(var=var, val=quoted_val) - args.append(run.Raw(env_arg)) - args.extend([ - 'adjust-ulimits', - 'ceph-coverage', - '{tdir}/archive/coverage'.format(tdir=testdir)]) - if timeout and timeout != '0': - args.extend(['timeout', timeout]) - args.extend([ - '{srcdir}/{workunit}'.format( - srcdir=srcdir, - workunit=workunit, - ), - ]) - remote.run( - logger=log.getChild(role), - args=args, - label="workunit test {workunit}".format(workunit=workunit) - ) - remote.run( - logger=log.getChild(role), - args=['sudo', 'rm', '-rf', '--', scratch_tmp], - ) - finally: - log.info('Stopping %s on %s...', tests, role) - remote.run( - logger=log.getChild(role), - args=[ - 'rm', '-rf', '--', '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role), srcdir, - ], - ) diff --git a/tox.ini b/tox.ini deleted file mode 100644 index 14399d0a302..00000000000 --- a/tox.ini +++ /dev/null @@ -1,8 +0,0 @@ -[tox] -envlist = flake8 -skipsdist = True - -[testenv:flake8] -deps= - flake8 -commands=flake8 --select=F ceph-qa-suite