+++ /dev/null
-*~
-.*.sw[nmop]
-*.pyc
-.tox
-__pycache__
+++ /dev/null
-ceph-qa-suite
--------------
-
-clusters/ - some predefined cluster layouts
-suites/ - set suite
-
-The suites directory has a hierarchical collection of tests. This can be
-freeform, but generally follows the convention of
-
- suites/<test suite name>/<test group>/...
-
-A test is described by a yaml fragment.
-
-A test can exist as a single .yaml file in the directory tree. For example:
-
- suites/foo/one.yaml
- suites/foo/two.yaml
-
-is a simple group of two tests.
-
-A directory with a magic '+' file represents a test that combines all
-other items in the directory into a single yaml fragment. For example:
-
- suites/foo/bar/+
- suites/foo/bar/a.yaml
- suites/foo/bar/b.yaml
- suites/foo/bar/c.yaml
-
-is a single test consisting of a + b + c.
-
-A directory with a magic '%' file represents a test matrix formed from
-all other items in the directory. For example,
-
- suites/baz/%
- suites/baz/a.yaml
- suites/baz/b/b1.yaml
- suites/baz/b/b2.yaml
- suites/baz/c.yaml
- suites/baz/d/d1.yaml
- suites/baz/d/d2.yaml
-
-is a 4-dimensional test matrix. Two dimensions (a, c) are trivial (1
-item), so this is really 2x2 = 4 tests, which are
-
- a + b1 + c + d1
- a + b1 + c + d2
- a + b2 + c + d1
- a + b2 + c + d2
-
-Symlinks are okay.
-
-The teuthology code can be found in https://github.com/ceph/teuthology.git
+++ /dev/null
-roles:
-- [mon.a, mon.c, osd.0, osd.1, osd.2]
-- [mon.b, mds.a, osd.3, osd.4, osd.5]
-- [client.0]
-- [client.1]
+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, client.0]
+++ /dev/null
-roles:
-- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
-- [mon.b, mds.a, osd.3, osd.4, osd.5, client.1]
+++ /dev/null
-roles:
-- [mon.a, mon.c, osd.0, osd.1, osd.2]
-- [mon.b, mds.a, osd.3, osd.4, osd.5]
-- [client.0]
+++ /dev/null
-overrides:
- ceph:
- conf:
- mds:
- debug ms: 1
- debug mds: 20
- client:
- debug ms: 1
- debug client: 20
\ No newline at end of file
+++ /dev/null
-os_type: centos
-os_version: "6.3"
+++ /dev/null
-os_type: centos
-os_version: "6.4"
+++ /dev/null
-os_type: centos
-os_version: "6.5"
+++ /dev/null
-os_type: debian
-os_version: "6.0"
+++ /dev/null
-os_type: debian
-os_version: "7.0"
+++ /dev/null
-os_type: fedora
-os_version: "17"
+++ /dev/null
-os_type: fedora
-os_version: "18"
+++ /dev/null
-os_type: fedora
-os_version: "19"
+++ /dev/null
-os_type: opensuse
-os_version: "12.2"
+++ /dev/null
-os_type: rhel
-os_version: "6.3"
+++ /dev/null
-os_type: rhel
-os_version: "6.4"
+++ /dev/null
-os_type: rhel
-os_version: "6.5"
+++ /dev/null
-os_type: sles
-os_version: "11-sp2"
+++ /dev/null
-os_type: ubuntu
-os_version: "12.04"
+++ /dev/null
-os_type: ubuntu
-os_version: "12.10"
+++ /dev/null
-os_type: ubuntu
-os_version: "14.04"
+++ /dev/null
-../all/centos_6.5.yaml
\ No newline at end of file
+++ /dev/null
-../all/debian_7.0.yaml
\ No newline at end of file
+++ /dev/null
-../all/ubuntu_12.04.yaml
\ No newline at end of file
+++ /dev/null
-../all/ubuntu_14.04.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- fs: btrfs
- conf:
- osd:
- osd sloppy crc: true
- osd op thread timeout: 60
+++ /dev/null
-overrides:
- ceph:
- fs: ext4
+++ /dev/null
-overrides:
- ceph:
- fs: xfs
- conf:
- osd:
- osd sloppy crc: true
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- conf:
- mds:
- debug mds: 20
- debug ms: 1
- client:
- debug client: 10
\ No newline at end of file
--- /dev/null
+*~
+.*.sw[nmop]
+*.pyc
+.tox
+__pycache__
--- /dev/null
+ceph-qa-suite
+-------------
+
+clusters/ - some predefined cluster layouts
+suites/ - set suite
+
+The suites directory has a hierarchical collection of tests. This can be
+freeform, but generally follows the convention of
+
+ suites/<test suite name>/<test group>/...
+
+A test is described by a yaml fragment.
+
+A test can exist as a single .yaml file in the directory tree. For example:
+
+ suites/foo/one.yaml
+ suites/foo/two.yaml
+
+is a simple group of two tests.
+
+A directory with a magic '+' file represents a test that combines all
+other items in the directory into a single yaml fragment. For example:
+
+ suites/foo/bar/+
+ suites/foo/bar/a.yaml
+ suites/foo/bar/b.yaml
+ suites/foo/bar/c.yaml
+
+is a single test consisting of a + b + c.
+
+A directory with a magic '%' file represents a test matrix formed from
+all other items in the directory. For example,
+
+ suites/baz/%
+ suites/baz/a.yaml
+ suites/baz/b/b1.yaml
+ suites/baz/b/b2.yaml
+ suites/baz/c.yaml
+ suites/baz/d/d1.yaml
+ suites/baz/d/d2.yaml
+
+is a 4-dimensional test matrix. Two dimensions (a, c) are trivial (1
+item), so this is really 2x2 = 4 tests, which are
+
+ a + b1 + c + d1
+ a + b1 + c + d2
+ a + b2 + c + d1
+ a + b2 + c + d2
+
+Symlinks are okay.
+
+The teuthology code can be found in https://github.com/ceph/teuthology.git
--- /dev/null
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, client.0]
--- /dev/null
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0]
+- [mon.b, mds.a, osd.3, osd.4, osd.5, client.1]
--- /dev/null
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mds:
+ debug ms: 1
+ debug mds: 20
+ client:
+ debug ms: 1
+ debug client: 20
\ No newline at end of file
--- /dev/null
+os_type: centos
+os_version: "6.3"
--- /dev/null
+os_type: centos
+os_version: "6.4"
--- /dev/null
+os_type: centos
+os_version: "6.5"
--- /dev/null
+os_type: debian
+os_version: "6.0"
--- /dev/null
+os_type: debian
+os_version: "7.0"
--- /dev/null
+os_type: fedora
+os_version: "17"
--- /dev/null
+os_type: fedora
+os_version: "18"
--- /dev/null
+os_type: fedora
+os_version: "19"
--- /dev/null
+os_type: opensuse
+os_version: "12.2"
--- /dev/null
+os_type: rhel
+os_version: "6.3"
--- /dev/null
+os_type: rhel
+os_version: "6.4"
--- /dev/null
+os_type: rhel
+os_version: "6.5"
--- /dev/null
+os_type: sles
+os_version: "11-sp2"
--- /dev/null
+os_type: ubuntu
+os_version: "12.04"
--- /dev/null
+os_type: ubuntu
+os_version: "12.10"
--- /dev/null
+os_type: ubuntu
+os_version: "14.04"
--- /dev/null
+../all/centos_6.5.yaml
\ No newline at end of file
--- /dev/null
+../all/debian_7.0.yaml
\ No newline at end of file
--- /dev/null
+../all/ubuntu_12.04.yaml
\ No newline at end of file
--- /dev/null
+../all/ubuntu_14.04.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ fs: btrfs
+ conf:
+ osd:
+ osd sloppy crc: true
+ osd op thread timeout: 60
--- /dev/null
+overrides:
+ ceph:
+ fs: ext4
--- /dev/null
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd sloppy crc: true
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ conf:
+ mds:
+ debug mds: 20
+ debug ms: 1
+ client:
+ debug client: 10
\ No newline at end of file
--- /dev/null
+overrides:
+ rgw:
+ ec-data-pool: true
+ s3tests:
+ slow_backend: true
--- /dev/null
+overrides:
+ rgw:
+ ec-data-pool: false
--- /dev/null
+tasks:
+- install:
+- ceph:
--- /dev/null
+roles:
+- [osd.0, osd.1, osd.2, client.0, mon.a]
+- [osd.3, osd.4, osd.5, client.1, mon.b]
+- [osd.6, osd.7, osd.8, client.2, mon.c]
+- [osd.9, osd.10, osd.11, client.3, mon.d]
+- [osd.12, osd.13, osd.14, client.4, mon.e]
+- [osd.15, osd.16, osd.17, client.5]
+- [osd.18, osd.19, osd.20, client.6]
+- [osd.21, osd.22, osd.23, client.7]
+- [osd.24, osd.25, osd.26, client.8]
+- [osd.27, osd.28, osd.29, client.9]
+- [osd.30, osd.31, osd.32, client.10]
+- [osd.33, osd.34, osd.35, client.11]
+- [osd.36, osd.37, osd.38, client.12]
+- [osd.39, osd.40, osd.41, client.13]
+- [osd.42, osd.43, osd.44, client.14]
+- [osd.45, osd.46, osd.47, client.15]
+- [osd.48, osd.49, osd.50, client.16]
+- [osd.51, osd.52, osd.53, client.17]
+- [osd.54, osd.55, osd.56, client.18]
+- [osd.57, osd.58, osd.59, client.19]
+- [osd.60, osd.61, osd.62, client.20]
+- [osd.63, osd.64, osd.65, client.21]
+- [osd.66, osd.67, osd.68, client.22]
+- [osd.69, osd.70, osd.71, client.23]
+- [osd.72, osd.73, osd.74, client.24]
+- [osd.75, osd.76, osd.77, client.25]
+- [osd.78, osd.79, osd.80, client.26]
+- [osd.81, osd.82, osd.83, client.27]
+- [osd.84, osd.85, osd.86, client.28]
+- [osd.87, osd.88, osd.89, client.29]
+- [osd.90, osd.91, osd.92, client.30]
+- [osd.93, osd.94, osd.95, client.31]
+- [osd.96, osd.97, osd.98, client.32]
+- [osd.99, osd.100, osd.101, client.33]
+- [osd.102, osd.103, osd.104, client.34]
+- [osd.105, osd.106, osd.107, client.35]
+- [osd.108, osd.109, osd.110, client.36]
+- [osd.111, osd.112, osd.113, client.37]
+- [osd.114, osd.115, osd.116, client.38]
+- [osd.117, osd.118, osd.119, client.39]
+- [osd.120, osd.121, osd.122, client.40]
+- [osd.123, osd.124, osd.125, client.41]
+- [osd.126, osd.127, osd.128, client.42]
+- [osd.129, osd.130, osd.131, client.43]
+- [osd.132, osd.133, osd.134, client.44]
+- [osd.135, osd.136, osd.137, client.45]
+- [osd.138, osd.139, osd.140, client.46]
+- [osd.141, osd.142, osd.143, client.47]
+- [osd.144, osd.145, osd.146, client.48]
+- [osd.147, osd.148, osd.149, client.49]
+- [osd.150, osd.151, osd.152, client.50]
+#- [osd.153, osd.154, osd.155, client.51]
+#- [osd.156, osd.157, osd.158, client.52]
+#- [osd.159, osd.160, osd.161, client.53]
+#- [osd.162, osd.163, osd.164, client.54]
+#- [osd.165, osd.166, osd.167, client.55]
+#- [osd.168, osd.169, osd.170, client.56]
+#- [osd.171, osd.172, osd.173, client.57]
+#- [osd.174, osd.175, osd.176, client.58]
+#- [osd.177, osd.178, osd.179, client.59]
+#- [osd.180, osd.181, osd.182, client.60]
+#- [osd.183, osd.184, osd.185, client.61]
+#- [osd.186, osd.187, osd.188, client.62]
+#- [osd.189, osd.190, osd.191, client.63]
+#- [osd.192, osd.193, osd.194, client.64]
+#- [osd.195, osd.196, osd.197, client.65]
+#- [osd.198, osd.199, osd.200, client.66]
--- /dev/null
+roles:
+- [osd.0, osd.1, osd.2, client.0, mon.a]
+- [osd.3, osd.4, osd.5, client.1, mon.b]
+- [osd.6, osd.7, osd.8, client.2, mon.c]
+- [osd.9, osd.10, osd.11, client.3, mon.d]
+- [osd.12, osd.13, osd.14, client.4, mon.e]
+- [osd.15, osd.16, osd.17, client.5]
+- [osd.18, osd.19, osd.20, client.6]
+- [osd.21, osd.22, osd.23, client.7]
+- [osd.24, osd.25, osd.26, client.8]
+- [osd.27, osd.28, osd.29, client.9]
+- [osd.30, osd.31, osd.32, client.10]
+- [osd.33, osd.34, osd.35, client.11]
+- [osd.36, osd.37, osd.38, client.12]
+- [osd.39, osd.40, osd.41, client.13]
+- [osd.42, osd.43, osd.44, client.14]
+- [osd.45, osd.46, osd.47, client.15]
+- [osd.48, osd.49, osd.50, client.16]
+- [osd.51, osd.52, osd.53, client.17]
+- [osd.54, osd.55, osd.56, client.18]
+- [osd.57, osd.58, osd.59, client.19]
+- [osd.60, osd.61, osd.62, client.20]
--- /dev/null
+roles:
+- [osd.0, osd.1, osd.2, client.0, mon.a]
+- [osd.3, osd.4, osd.5, client.1, mon.b]
+- [osd.6, osd.7, osd.8, client.2, mon.c]
+- [osd.9, osd.10, osd.11, client.3, mon.d]
+- [osd.12, osd.13, osd.14, client.4, mon.e]
--- /dev/null
+overrides:
+ ceph:
+ fs: btrfs
+ conf:
+ osd:
+ osd sloppy crc: true
+ osd op thread timeout: 60
--- /dev/null
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd sloppy crc: true
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
--- /dev/null
+tasks:
+- rados:
+ ops: 4000
+ max_seconds: 3600
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
--- /dev/null
+../../../distros/supported
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.3
+ - osd.4
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+exclude_arch: armv7l
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.4
+ - osd.3
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - suites/dbench.sh
+exclude_arch: armv7l
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.3
+ - osd.4
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - suites/fsstress.sh
+exclude_arch: armv7l
--- /dev/null
+../../../distros/supported
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.3
+ - osd.4
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.3
+ - osd.4
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- workunit:
+ clients:
+ client.0:
+ - rados/test_python.sh
+
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.3
+ - osd.4
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-big.sh
+
--- /dev/null
+../../../distros/supported
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.3
+ - osd.4
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.3
+ - osd.4
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- workunit:
+ clients:
+ client.0:
+ - rbd/run_cli_tests.sh
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.3
+ - osd.4
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.3
+ - osd.4
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ debug ms: 1
+ osd:
+ debug osd: 10
+ mon:
+ debug mon: 10
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - osd.3
+ - osd.4
+ - osd.5
+- - mon.c
+ - osd.6
+ - osd.7
+ - osd.8
+- - client.0
+
+tasks:
+- install:
+ extras: yes
+- ssh_keys:
+- ceph-deploy:
+- workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+
--- /dev/null
+../../../distros/supported
\ No newline at end of file
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1]
+- [samba.0, client.0, client.1]
--- /dev/null
+../../../debug/mds_client.yaml
\ No newline at end of file
--- /dev/null
+roles:
+ - [mon.a, mds.a, osd.0, osd.1, client.0]
+
+tasks:
+ - nop:
+
--- /dev/null
+roles:
+- [mon.a, mds.a, mds.a-s]
+- [mon.b, mds.b, mds.b-s]
+- [mon.c, mds.c, mds.c-s]
+- [osd.0]
+- [osd.1]
+- [osd.2]
+- [client.0]
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ mds:
+ mds thrash exports: 1
+ mds debug subtrees: 1
+ mds debug scatterstat: 1
+ mds verify scatter: 1
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - suites/fsstress.sh
+
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+../../../../debug/mds_client.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
--- /dev/null
+tasks:
+- install:
+- ceph:
+- exec:
+ client.0:
+ - ceph mds set inline_data true --yes-i-really-mean-it
--- /dev/null
+../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ timeout: 6h
+ clients:
+ all:
+ - fs/misc
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - fs/test_o_trunc.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ filestore flush min: 0
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/iogen.sh
+
--- /dev/null
+tasks:
+- ceph-fuse: [client.0]
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 1
+ debug client: 20
+ mds:
+ debug ms: 1
+ debug mds: 20
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client:
+ ms_inject_delay_probability: 1
+ ms_inject_delay_type: osd
+ ms_inject_delay_max: 5
+ client_oc_max_dirty_age: 1
+tasks:
+- ceph-fuse:
+- exec:
+ client.0:
+ - cd $TESTDIR/mnt.* && dd if=/dev/zero of=./foo count=100
+ - sleep 2
+ - cd $TESTDIR/mnt.* && truncate --size 0 ./foo
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - libcephfs/test.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - libcephfs-java/test.sh
--- /dev/null
+tasks:
+-mds_creation_failure:
+-ceph-fuse:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
+
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [client.2]
+- [client.1]
+- [client.0]
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [client.1]
+- [client.0]
--- /dev/null
+../../../../debug/mds_client.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+- kclient:
--- /dev/null
+# make sure we get the same MPI version on all hosts
+os_type: ubuntu
+os_version: "14.04"
+
+tasks:
+- pexec:
+ clients:
+ - cd $TESTDIR
+ - wget http://ceph.com/qa/fsx-mpi.c
+ - mpicc fsx-mpi.c -o fsx-mpi
+ - rm fsx-mpi.c
+ - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
+- ssh_keys:
+- mpi:
+ exec: $TESTDIR/fsx-mpi 1MB -N 50000 -p 10000 -l 1048576
+ workdir: $TESTDIR/gmnt
+- pexec:
+ all:
+ - rm $TESTDIR/gmnt
+ - rm $TESTDIR/fsx-mpi
--- /dev/null
+# make sure we get the same MPI version on all hosts
+os_type: ubuntu
+os_version: "14.04"
+
+tasks:
+- pexec:
+ clients:
+ - cd $TESTDIR
+ - wget http://ceph.com/qa/ior.tbz2
+ - tar xvfj ior.tbz2
+ - cd ior
+ - ./configure
+ - make
+ - make install DESTDIR=$TESTDIR/binary/
+ - cd $TESTDIR/
+ - rm ior.tbz2
+ - rm -r ior
+ - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
+- ssh_keys:
+- mpi:
+ exec: $TESTDIR/binary/usr/local/bin/ior -e -w -r -W -b 10m -a POSIX -o $TESTDIR/gmnt/ior.testfile
+- pexec:
+ all:
+ - rm -f $TESTDIR/gmnt/ior.testfile
+ - rm -f $TESTDIR/gmnt
+ - rm -rf $TESTDIR/binary
--- /dev/null
+# make sure we get the same MPI version on all hosts
+os_type: ubuntu
+os_version: "14.04"
+
+tasks:
+- pexec:
+ clients:
+ - cd $TESTDIR
+ - wget http://ceph.com/qa/mdtest-1.9.3.tgz
+ - mkdir mdtest-1.9.3
+ - cd mdtest-1.9.3
+ - tar xvfz $TESTDIR/mdtest-1.9.3.tgz
+ - rm $TESTDIR/mdtest-1.9.3.tgz
+ - MPI_CC=mpicc make
+ - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
+- ssh_keys:
+- mpi:
+ exec: $TESTDIR/mdtest-1.9.3/mdtest -d $TESTDIR/gmnt -I 20 -z 5 -b 2 -R
+- pexec:
+ all:
+ - rm -f $TESTDIR/gmnt
+ - rm -rf $TESTDIR/mdtest-1.9.3
+ - rm -rf $TESTDIR/._mdtest-1.9.3
\ No newline at end of file
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ fs: btrfs
+ conf:
+ osd:
+ osd op thread timeout: 60
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - snaps/snaptest-0.sh
+ - snaps/snaptest-1.sh
+ - snaps/snaptest-2.sh
--- /dev/null
+tasks:
+- mds_thrash:
--- /dev/null
+tasks:
+- install:
+- ceph:
--- /dev/null
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0, mds.b-s-a]
--- /dev/null
+../../../../debug/mds_client.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ mds inject delay type: osd mds
+ ms inject delay probability: .005
+ ms inject delay max: 1
--- /dev/null
+../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+../../../../debug/mds_client.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ filestore flush min: 0
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mds:
+ mds inject traceless reply probability: .5
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+../../../../debug/mds_client.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ debug client: 1/20
+ debug ms: 0/10
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - libcephfs/test.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
--- /dev/null
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ ceph:
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]
+ ceph-fuse:
+ client.0:
+ valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
--- /dev/null
+roles:
+- [mon.0, mds.0, osd.0, hadoop.master.0]
+- [mon.1, osd.1, hadoop.slave.0]
+- [mon.2, hadoop.slave.1, client.0]
+
--- /dev/null
+tasks:
+- ssh_keys:
+- install:
+- ceph:
+- hadoop:
+- workunit:
+ clients:
+ client.0: [hadoop-internal-tests]
--- /dev/null
+tasks:
+- ssh_keys:
+- install:
+- ceph:
+- hadoop:
+- workunit:
+ clients:
+ client.0: [hadoop-wordcount]
--- /dev/null
+tasks:
+- install:
+- install:
+ project: samba
+ extra_packages: ['samba']
+- ceph:
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - direct_io
+
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ mds:
+ debug mds: 20
+ debug ms: 1
+- kclient:
+- workunit:
+ clients:
+ all:
+ - fs/misc
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - fs/test_o_trunc.sh
+
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ filestore flush min: 0
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
--- /dev/null
+roles:
+- [mon.a, mds.a, osd.0, osd.1]
+- [mon.b, mon.c, osd.2, osd.3, client.0]
+- [client.1]
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- parallel:
+ - user-workload
+ - kclient-workload
+user-workload:
+ sequential:
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/iozone.sh
+kclient-workload:
+ sequential:
+ - kclient: [client.1]
+ - workunit:
+ clients:
+ client.1:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- parallel:
+ - user-workload
+ - kclient-workload
+user-workload:
+ sequential:
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+kclient-workload:
+ sequential:
+ - kclient: [client.1]
+ - workunit:
+ clients:
+ client.1:
+ - kernel_untar_build.sh
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+- thrashosds:
--- /dev/null
+tasks:
+- install:
+- ceph:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ filestore flush min: 0
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
--- /dev/null
+tasks:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+
+tasks:
+- install:
+- ceph:
+- kclient: [client.0]
+- knfsd:
+ client.0:
+ options: [rw,no_root_squash,async]
--- /dev/null
+../../../../clusters/extra-client.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- nfs:
+ client.1:
+ server: client.0
+ options: [rw,hard,intr,nfsvers=3]
--- /dev/null
+tasks:
+- nfs:
+ client.1:
+ server: client.0
+ options: [rw,hard,intr,nfsvers=4]
--- /dev/null
+tasks:
+- workunit:
+ timeout: 6h
+ clients:
+ client.1:
+ - kernel_untar_build.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - fs/misc/chmod.sh
+ - fs/misc/i_complete_vs_rename.sh
+ - fs/misc/trivial_sync.sh
+ #- fs/misc/multiple_rsync.sh
+ #- fs/misc/xattrs.sh
+# Once we can run multiple_rsync.sh and xattrs.sh we can change to this
+# - misc
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - suites/dbench-short.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ filestore flush min: 0
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - suites/ffsb.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - suites/iozone.sh
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/concurrent.sh
+# Options for rbd/concurrent.sh (default values shown)
+# env:
+# RBD_CONCURRENT_ITER: 100
+# RBD_CONCURRENT_COUNT: 5
+# RBD_CONCURRENT_DELAY: 5
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/image_read.sh
+# Options for rbd/image_read.sh (default values shown)
+# env:
+# IMAGE_READ_LOCAL_FILES: 'false'
+# IMAGE_READ_FORMAT: '2'
+# IMAGE_READ_VERBOSE: 'true'
+# IMAGE_READ_PAGE_SIZE: '4096'
+# IMAGE_READ_OBJECT_ORDER: '22'
+# IMAGE_READ_TEST_CLONES: 'true'
+# IMAGE_READ_DOUBLE_ORDER: 'true'
+# IMAGE_READ_HALF_ORDER: 'false'
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/kernel.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/map-snapshot-io.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/map-unmap.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - rbd/simple_big.sh
+
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+ image_size: 20480
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+ fs_type: btrfs
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+ fs_type: ext4
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+ image_size: 20480
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
--- /dev/null
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
+- [client.2]
+tasks:
+- install:
+- ceph:
+- rbd.xfstests:
+ client.0:
+ tests: 1-9 11-15 17 19-21 26-29 31-34 41 46-54 56 61 63-67 69-70 74-76 78-79 84-89 91
+ test_image: 'test_image-0'
+ scratch_image: 'scratch_image-0'
+ client.1:
+ tests: 92 100 103 105 108 110 116-121 124 126 129-132
+ test_image: 'test_image-1'
+ scratch_image: 'scratch_image-1'
+ client.2:
+ tests: 133-135 137-141 164-167 184 187-190 192 194 196 199 201 203 214-216 220-227 234 236-238 241 243-249 253 257-259 261-262 269 273 275 277-278
+ test_image: 'test_image-2'
+ scratch_image: 'scratch_image-2'
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+- thrashosds:
--- /dev/null
+tasks:
+- install:
+- ceph:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
--- /dev/null
+tasks:
+- rbd:
+ all:
+ image_size: 20480
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
--- /dev/null
+tasks:
+- rbd:
+ all:
+ image_size: 20480
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [client.1]
+- [client.0]
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- locktest: [client.0, client.1]
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2]
+- [mds.a]
+- [client.0]
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ mds:
+ mds log segment size: 16384
+ mds log max segments: 1
+- restart:
+ exec:
+ client.0:
+ - test-backtraces.py
--- /dev/null
+roles:
+- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
--- /dev/null
+roles:
+- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
+- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - fs/misc
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ ms_inject_delay_probability: 1
+ ms_inject_delay_type: osd
+ ms_inject_delay_max: 5
+ client_oc_max_dirty_age: 1
+- ceph-fuse:
+- exec:
+ client.0:
+ - dd if=/dev/zero of=./foo count=100
+ - sleep 2
+ - truncate --size 0 ./foo
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mds:
+ mds thrash exports: 1
--- /dev/null
+roles:
+- [mon.a, mds.a, osd.0, osd.1]
+- [mon.b, mon.c, osd.2, osd.3, client.0]
+- [client.1]
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+- parallel:
+ - user-workload
+ - kclient-workload
+user-workload:
+ sequential:
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/iozone.sh
+kclient-workload:
+ sequential:
+ - kclient: [client.1]
+ - workunit:
+ clients:
+ client.1:
+ - suites/dbench.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+- parallel:
+ - user-workload
+ - kclient-workload
+user-workload:
+ sequential:
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+kclient-workload:
+ sequential:
+ - kclient: [client.1]
+ - workunit:
+ clients:
+ client.1:
+ - kernel_untar_build.sh
--- /dev/null
+tasks:
+- ceph-fuse: [client.0]
+- samba:
+ samba.0:
+ ceph: "{testdir}/mnt.0"
+
--- /dev/null
+tasks:
+- kclient: [client.0]
+- samba:
+ samba.0:
+ ceph: "{testdir}/mnt.0"
+
--- /dev/null
+tasks:
+- samba:
--- /dev/null
+tasks:
+- localdir: [client.0]
+- samba:
+ samba.0:
+ ceph: "{testdir}/mnt.0"
--- /dev/null
+roles:
+- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5]
+- [client.0]
--- /dev/null
+roles:
+- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
+- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
+- [client.0]
--- /dev/null
+../../../../debug/mds_client.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
--- /dev/null
+tasks:
+- install:
+- ceph:
+- exec:
+ client.0:
+ - ceph mds set inline_data true --yes-i-really-mean-it
--- /dev/null
+tasks:
+- ceph-fuse:
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- kclient:
--- /dev/null
+../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client:
+ fuse_default_permissions: 0
+tasks:
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
--- /dev/null
+tasks:
+- workunit:
+ timeout: 5h
+ clients:
+ all:
+ - fs/misc
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - fs/test_o_trunc.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ filestore flush min: 0
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/iogen.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 1
+ debug client: 20
+ mds:
+ debug ms: 1
+ debug mds: 20
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client:
+ ms_inject_delay_probability: 1
+ ms_inject_delay_type: osd
+ ms_inject_delay_max: 5
+ client_oc_max_dirty_age: 1
+tasks:
+- exec:
+ client.0:
+ - dd if=/dev/zero of=./foo count=100
+ - sleep 2
+ - truncate --size 0 ./foo
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
--- /dev/null
+roles:
+- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5]
+- [client.0]
--- /dev/null
+roles:
+- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
+- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
+- [client.0]
--- /dev/null
+../../../../debug/mds_client.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
--- /dev/null
+tasks:
+- install:
+- ceph:
+- exec:
+ client.0:
+ - ceph mds set inline_data true --yes-i-really-mean-it
--- /dev/null
+../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - libcephfs/test.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - libcephfs-java/test.sh
--- /dev/null
+tasks:
+-mds_creation_failure:
+-ceph-fuse:
+- workunit:
+ clients:
+ all: [fs/misc/trivial_sync.sh]
--- /dev/null
+roles:
+- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5]
+- [client.0]
--- /dev/null
+roles:
+- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
+- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
+- [client.0]
--- /dev/null
+../../../../debug/mds_client.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ debug client: 1/20
+ debug ms: 0/10
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - libcephfs/test.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
--- /dev/null
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ ceph:
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]
+ ceph-fuse:
+ client.0:
+ valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
--- /dev/null
+roles:
+- [mon.0, mon.1, mon.2, mds.0, client.0]
+- [osd.0]
+- [osd.1]
+- [osd.2]
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/ext4.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/xfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- thrashosds:
+ chance_down: 1.0
+ powercycle: true
+ timeout: 600
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.0:
+ admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 60
+- admin_socket:
+ client.0:
+ objecter_requests:
+ test: "http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - fs/misc
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ filestore flush min: 0
+ mds:
+ debug ms: 1
+ debug mds: 20
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client:
+ ms_inject_delay_probability: 1
+ ms_inject_delay_type: osd
+ ms_inject_delay_max: 5
+ client_oc_max_dirty_age: 1
+tasks:
+- ceph-fuse:
+- exec:
+ client.0:
+ - dd if=/dev/zero of=./foo count=100
+ - sleep 2
+ - truncate --size 0 ./foo
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 1800
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
--- /dev/null
+../../../../clusters/fixed-2.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 1500
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - wrongly marked me down
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+ - rados/test_pool_quota.sh
+
--- /dev/null
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ client.0:
+ - cls
--- /dev/null
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ client.0:
+ - rados/test_python.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ client.0:
+ - rados/stress_watch.sh
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-big.sh
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mostlyread.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon min osdmap epochs: 25
+ paxos service trim min: 5
+tasks:
+- install:
+- ceph:
--- /dev/null
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5, client.0]
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, mon.d, mon.e, osd.0, osd.1, osd.2]
+- [mon.f, mon.g, mon.h, mon.i, mds.a, osd.3, osd.4, osd.5, client.0]
--- /dev/null
+../../../../fs/xfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: mon
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
+ thrash_store: true
+ thrash_many: true
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ mon client ping interval: 4
+ mon client ping timeout: 12
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ thrash_many: true
+ freeze_mon_duration: 20
+ freeze_mon_probability: 10
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ paxos min: 10
+ paxos trim min: 10
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
+ thrash_many: true
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ paxos min: 10
+ paxos trim min: 10
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - slow request
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_delete_pools_parallel --debug_objecter 20 --debug_ms 1 --debug_rados 20 --debug_monc 20
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - mon/pool_ops.sh
+ - mon/crush_ops.sh
+ - mon/osd.sh
+ - mon/caps.sh
+
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
--- /dev/null
+roles:
+- [mon.a, mon.d, mon.g, mon.j, mon.m, mon.p, mon.s, osd.0]
+- [mon.b, mon.e, mon.h, mon.k, mon.n, mon.q, mon.t, mds.a]
+- [mon.c, mon.f, mon.i, mon.l, mon.o, mon.r, mon.u, osd.1]
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, osd.0, osd.1, mds.a]
--- /dev/null
+roles:
+- [mon.a, mon.c, mon.e, osd.0]
+- [mon.b, mon.d, mon.f, osd.1, mds.a]
--- /dev/null
+roles:
+- [mon.a, mon.d, mon.g, osd.0]
+- [mon.b, mon.e, mon.h, mds.a]
+- [mon.c, mon.f, mon.i, osd.1]
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - slow request
+ - .*clock.*skew.*
+ - clocks not synchronized
+- mon_clock_skew_check:
+ expect-skew: false
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon.b:
+ clock offset: 10
+tasks:
+- install:
+- ceph:
+ wait-for-healthy: false
+ log-whitelist:
+ - slow request
+ - .*clock.*skew.*
+ - clocks not synchronized
+- mon_clock_skew_check:
+ expect-skew: true
--- /dev/null
+tasks:
+- install:
+- ceph:
+- mon_recovery:
--- /dev/null
+roles:
+- [mon.0, osd.0, osd.1, osd.2]
+- [osd.3, osd.4, osd.5]
+- [client.0]
+
+tasks:
+- install:
+- ceph:
+- ceph_objectstore_tool:
+ objects: 20
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ debug mon: 20
+ debug ms: 1
+ debug paxos: 20
+ mon warn on legacy crush tunables: false
+ mon min osdmap epochs: 3
+ osd:
+ osd map cache size: 2
+ osd map max advance: 1
+ debug filestore: 20
+ debug journal: 20
+ debug ms: 1
+ debug osd: 20
+ log-whitelist:
+ - osd_map_cache_size
+ - slow request
+ - scrub mismatch
+ - ScrubResult
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - mon.b
+ - mon.c
+ - osd.2
+ - client.0
+tasks:
+- install:
+ branch: v0.80.8
+- print: '**** done installing firefly'
+- ceph:
+ fs: xfs
+- print: '**** done ceph'
+- full_sequential:
+ - ceph_manager.create_pool:
+ args: ['toremove']
+ kwargs:
+ pg_num: 4096
+ - sleep:
+ duration: 30
+ - ceph_manager.wait_for_clean: null
+ - radosbench:
+ clients: [client.0]
+ time: 120
+ size: 1
+ pool: toremove
+ create_pool: false
+ - ceph_manager.remove_pool:
+ args: ['toremove']
+ - sleep:
+ duration: 10
+ - ceph.restart:
+ daemons:
+ - osd.0
+ - osd.1
+ - osd.2
+ - sleep:
+ duration: 30
+ - ceph_manager.wait_for_clean: null
+ - radosbench:
+ clients: [client.0]
+ time: 60
+ size: 1
+ - ceph_manager.create_pool:
+ args: ['newpool']
+ - loop:
+ count: 100
+ body:
+ - ceph_manager.set_pool_property:
+ args: ['newpool', 'min_size', 2]
+ - ceph_manager.set_pool_property:
+ args: ['newpool', 'min_size', 1]
+ - sleep:
+ duration: 30
+ - ceph_manager.wait_for_clean: null
+ - loop:
+ count: 100
+ body:
+ - ceph_manager.set_pool_property:
+ args: ['newpool', 'min_size', 2]
+ - ceph_manager.set_pool_property:
+ args: ['newpool', 'min_size', 1]
+ - sleep:
+ duration: 30
+ - ceph_manager.wait_for_clean: null
+ - sleep:
+ duration: 30
+ - install.upgrade:
+ mon.a: null
+ - ceph.restart:
+ daemons:
+ - osd.0
+ - osd.1
+ - osd.2
+ - sleep:
+ duration: 30
+ - radosbench:
+ clients: [client.0]
+ time: 30
+ size: 1
+ - ceph_manager.wait_for_clean: null
--- /dev/null
+roles:
+- [mon.a, mds.a, osd.0, osd.1, osd.2, client.0]
+
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ filestore xfs extsize: true
+
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_alloc_hint.sh
--- /dev/null
+roles:
+- [mon.0, osd.0, osd.1, mds.a, client.0]
+tasks:
+- install:
+- ceph:
+- exec:
+ client.0:
+ - ceph_test_filejournal
--- /dev/null
+roles:
+- [mon.0, osd.0, osd.1, mds.a, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ global:
+ journal aio: true
+- filestore_idempotent:
--- /dev/null
+roles:
+- [mon.0, osd.0, osd.1, mds.a, client.0]
+tasks:
+- install:
+- ceph:
+- filestore_idempotent:
--- /dev/null
+roles:
+- - mon.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+- - mds.a
+ - osd.3
+ - osd.4
+ - osd.5
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd min pg log entries: 25
+ osd max pg log entries: 100
+- exec:
+ client.0:
+ - ceph osd pool create foo 64
+ - rados -p foo bench 60 write -b 1024 --no-cleanup
+ - ceph osd pool set foo size 3
+ - ceph osd out 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - ceph osd in 0 1
+- sleep:
+ duration: 60
--- /dev/null
+roles:
+- [mon.0, osd.0, osd.1, mds.a, client.0]
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - osdc/stress_objectcacher.sh
--- /dev/null
+roles:
+- [mon.0, osd.0, osd.1, mds.a, client.0]
+tasks:
+- install:
+- ceph:
+- exec:
+ client.0:
+ - mkdir $TESTDIR/ostest && cd $TESTDIR/ostest && ceph_test_objectstore
+ - rm -rf $TESTDIR/ostest
--- /dev/null
+roles:
+- - mon.a
+ - osd.0
+ - mds.a
+ - osd.1
+ - client.a
+tasks:
+- install:
+- ceph:
+- admin_socket:
+ osd.0:
+ version:
+ git_version:
+ help:
+ config show:
+ config set filestore_dump_file /tmp/foo:
+ perf dump:
+ perf schema:
--- /dev/null
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - had wrong client addr
+ - had wrong cluster addr
+ - must scrub before tier agent can activate
+- workunit:
+ clients:
+ all:
+ - cephtool
+ - mon/pool_ops.sh
--- /dev/null
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+- dump_stuck:
--- /dev/null
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+- ec_lost_unfound:
--- /dev/null
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+- rep_lost_unfound_delete:
--- /dev/null
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+- lost_unfound:
--- /dev/null
+roles:
+- - mon.0
+ - mon.1
+ - mon.2
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/test_mon_config_key.py
--- /dev/null
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - osd.0
+ - osd.1
+ - mds.0
+ - client.0
+tasks:
+- install:
+- ceph:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- workunit:
+ clients:
+ all:
+ - mon/workloadgen.sh
+ env:
+ LOADGEN_NUM_OSDS: "5"
+ VERBOSE: "1"
+ DURATION: "600"
--- /dev/null
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ conf:
+ osd:
+ osd min pg log entries: 5
+- osd_backfill:
--- /dev/null
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ conf:
+ osd:
+ osd min pg log entries: 5
+- osd_recovery.test_incomplete_pgs:
--- /dev/null
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ conf:
+ osd:
+ osd min pg log entries: 5
+- osd_recovery:
--- /dev/null
+roles:
+- - mon.0
+ - mon.1
+ - mon.2
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ log-whitelist:
+ - objects unfound and apparently lost
+- peer:
--- /dev/null
+roles:
+- - mon.a
+ - osd.0
+ - osd.1
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - had wrong client addr
+ - had wrong cluster addr
+- workunit:
+ clients:
+ all:
+ - rados/test_rados_tool.sh
--- /dev/null
+roles:
+- - mon.0
+ - mon.1
+ - mon.2
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - had wrong client addr
+- rest-api: [client.0]
+- workunit:
+ clients:
+ all:
+ - rest/test.py
--- /dev/null
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix-small.sh
--- /dev/null
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - slow request
+- exec:
+ client.0:
+ - ceph osd pool create base 4
+ - ceph osd pool create cache 4
+ - ceph osd tier add base cache
+ - ceph osd tier cache-mode cache writeback
+ - ceph osd tier set-overlay base cache
+ - ceph osd pool set cache hit_set_type bloom
+ - ceph osd pool set cache hit_set_count 8
+ - ceph osd pool set cache hit_set_period 60
+ - ceph osd pool set cache target_max_objects 500
+- background_exec:
+ mon.a:
+ - while true
+ - do sleep 30
+ - echo forward
+ - ceph osd tier cache-mode cache forward
+ - sleep 10
+ - ceph osd pool set cache cache_target_full_ratio .001
+ - echo cache-try-flush-evict-all
+ - rados -p cache cache-try-flush-evict-all
+ - sleep 5
+ - echo cache-flush-evict-all
+ - rados -p cache cache-flush-evict-all
+ - sleep 5
+ - echo remove overlay
+ - ceph osd tier remove-overlay base
+ - sleep 20
+ - echo add writeback overlay
+ - ceph osd tier cache-mode cache writeback
+ - ceph osd pool set cache cache_target_full_ratio .8
+ - ceph osd tier set-overlay base cache
+ - done
+- rados:
+ clients: [client.0]
+ pools: [base]
+ max_seconds: 600
+ ops: 400000
+ objects: 10000
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
--- /dev/null
+../../../../clusters/fixed-2.yaml
\ No newline at end of file
--- /dev/null
+openstack:
+ machine:
+ disk: 40 # GB
+ ram: 8000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/ext4.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/xfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms tcp read timeout: 5
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: osd
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon min osdmap epochs: 2
+ osd:
+ osd map cache size: 1
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - osd_map_cache_size
+- thrashosds:
+ timeout: 1800
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_test_map_discontinuity: 0.5
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.0:
+ admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 60
+- admin_socket:
+ client.0:
+ objecter_requests:
+ test: "http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd
+ m=1 k=2
+ - ceph osd pool create base 4 erasure teuthologyprofile
+ - ceph osd pool create cache 4
+ - ceph osd tier add base cache
+ - ceph osd tier cache-mode cache writeback
+ - ceph osd tier set-overlay base cache
+ - ceph osd pool set cache hit_set_type bloom
+ - ceph osd pool set cache hit_set_count 8
+ - ceph osd pool set cache hit_set_period 60
+ - ceph osd pool set cache target_max_objects 5000
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 10000
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create base 4
+ - ceph osd pool create cache 4
+ - ceph osd tier add base cache
+ - ceph osd tier cache-mode cache writeback
+ - ceph osd tier set-overlay base cache
+ - ceph osd pool set cache hit_set_type bloom
+ - ceph osd pool set cache hit_set_count 8
+ - ceph osd pool set cache hit_set_period 60
+ - ceph osd pool set cache target_max_objects 250
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create base 4
+ - ceph osd pool create cache 4
+ - ceph osd tier add base cache
+ - ceph osd tier cache-mode cache writeback
+ - ceph osd tier set-overlay base cache
+ - ceph osd pool set cache hit_set_type bloom
+ - ceph osd pool set cache hit_set_count 8
+ - ceph osd pool set cache hit_set_period 3600
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ flush: 50
+ try_flush: 50
+ evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create base 4
+ - ceph osd pool create cache 4
+ - ceph osd tier add base cache
+ - ceph osd tier cache-mode cache writeback
+ - ceph osd tier set-overlay base cache
+ - ceph osd pool set cache hit_set_type bloom
+ - ceph osd pool set cache hit_set_count 8
+ - ceph osd pool set cache hit_set_period 3600
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ flush: 50
+ try_flush: 50
+ evict: 50
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - shard.*missing
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 1800
+ unique_pool: true
+ ec_pool: true
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ ec_pool: true
+ op_weights:
+ read: 45
+ write: 0
+ append: 45
+ delete: 10
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 1800
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
--- /dev/null
+tasks:
+- install:
+- ceph:
--- /dev/null
+../../../../clusters/fixed-2.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+tasks:
+- mon_recovery:
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ debug monc: 20
+tasks:
+- workunit:
+ timeout: 6h
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - cls
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
--- /dev/null
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ ceph:
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]
--- /dev/null
+tasks:
+- install:
+- ceph:
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create cache 4
+ - ceph osd tier add rbd cache
+ - ceph osd tier cache-mode cache writeback
+ - ceph osd tier set-overlay rbd cache
+ - ceph osd pool set cache hit_set_type bloom
+ - ceph osd pool set cache hit_set_count 8
+ - ceph osd pool set cache hit_set_period 60
+ - ceph osd pool set cache target_max_objects 250
--- /dev/null
+../../../../clusters/fixed-1.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/copy.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/copy.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/run_cli_tests.sh
+
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_lock_fence.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: false
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: true
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: true
+ rbd cache max dirty: 0
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create cache 4
+ - ceph osd tier add rbd cache
+ - ceph osd tier cache-mode cache writeback
+ - ceph osd tier set-overlay rbd cache
+ - ceph osd pool set cache hit_set_type bloom
+ - ceph osd pool set cache hit_set_count 8
+ - ceph osd pool set cache hit_set_period 60
+ - ceph osd pool set cache target_max_objects 250
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+../basic/fs
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ log-whitelist:
+ - wrongly marked me down
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "1"
--- /dev/null
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 5000
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ env:
+ RBD_FEATURES: "1"
--- /dev/null
+tasks:
+- qemu:
+ all:
+ test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/bonnie.sh
+exclude_arch: armv7l
--- /dev/null
+tasks:
+- qemu:
+ all:
+ test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/fsstress.sh;h=firefly
+exclude_arch: armv7l
--- /dev/null
+tasks:
+- qemu:
+ all:
+ test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/iozone.sh
+ image_size: 20480
+exclude_arch: armv7l
--- /dev/null
+tasks:
+- qemu:
+ all:
+ type: block
+ num_rbd: 2
+ test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/run_xfstests_qemu.sh
+exclude_arch: armv7l
--- /dev/null
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+- cram:
+ clients:
+ client.0:
+ - http://git.ceph.com/?p=ceph.git;a=blob_plain;hb=firefly;f=src/test/cli-integration/rbd/formatted-output.t
+
--- /dev/null
+exclude_arch: armv7l
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: false
+- workunit:
+ clients:
+ all: [rbd/qemu-iotests.sh]
--- /dev/null
+exclude_arch: armv7l
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: true
+- workunit:
+ clients:
+ all: [rbd/qemu-iotests.sh]
--- /dev/null
+exclude_arch: armv7l
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: true
+ rbd cache max dirty: 0
+- workunit:
+ clients:
+ all: [rbd/qemu-iotests.sh]
--- /dev/null
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/rbd_snaps_ops.sh
+
--- /dev/null
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: false
+- workunit:
+ clients:
+ all: [rbd/read-flags.sh]
--- /dev/null
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: true
+- workunit:
+ clients:
+ all: [rbd/read-flags.sh]
--- /dev/null
+roles:
+- [mon.a, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ rbd cache: true
+ rbd cache max dirty: 0
+- workunit:
+ clients:
+ all: [rbd/read-flags.sh]
--- /dev/null
+tasks:
+- install:
+- ceph:
--- /dev/null
+../../../../clusters/fixed-2.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/xfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+tasks:
+- exec:
+ client.0:
+ - ceph osd pool create cache 4
+ - ceph osd tier add rbd cache
+ - ceph osd tier cache-mode cache writeback
+ - ceph osd tier set-overlay rbd cache
+ - ceph osd pool set cache hit_set_type bloom
+ - ceph osd pool set cache hit_set_count 8
+ - ceph osd pool set cache hit_set_period 60
+ - ceph osd pool set cache target_max_objects 250
+- thrashosds:
+ timeout: 1200
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "1"
--- /dev/null
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 2000
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd cache: true
--- /dev/null
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 2000
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd cache: true
+ rbd cache max dirty: 0
--- /dev/null
+tasks:
+- rbd_fsx:
+ clients: [client.0]
+ ops: 2000
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd cache: false
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+ - client.0
+
+tasks:
+- install:
+- ceph:
+- rest-api: [client.0]
+- workunit:
+ clients:
+ client.0:
+ - rest/test.py
--- /dev/null
+../../../../clusters/fixed-2.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/ext4.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/xfs.yaml
\ No newline at end of file
--- /dev/null
+../../../rgw_pool_type
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- workunit:
+ clients:
+ client.0:
+ - rgw/s3_bucket_quota.pl
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- workunit:
+ clients:
+ client.0:
+ - rgw/s3_multipart_upload.pl
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3readwrite:
+ client.0:
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3roundtrip:
+ client.0:
+ rgw_server: client.0
+ roundtrip:
+ bucket: rttest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3tests:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- swift:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- workunit:
+ clients:
+ client.0:
+ - rgw/s3_user_quota.pl
--- /dev/null
+roles:
+- [mon.a, osd.0, osd.1, client.0, client.1]
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ debug ms: 1
+ rgw gc obj min wait: 15
+ rgw data log window: 30
+ osd:
+ debug ms: 1
+ debug objclass : 20
+ client.0:
+ rgw region: region0
+ rgw zone: r0z0
+ rgw region root pool: .rgw.region.0
+ rgw zone root pool: .rgw.zone.0
+ rgw gc pool: .rgw.gc.0
+ rgw user uid pool: .users.uid.0
+ rgw user keys pool: .users.0
+ rgw log data: True
+ rgw log meta: True
+ client.1:
+ rgw region: region0
+ rgw zone: r0z1
+ rgw region root pool: .rgw.region.0
+ rgw zone root pool: .rgw.zone.1
+ rgw gc pool: .rgw.gc.1
+ rgw user uid pool: .users.uid.1
+ rgw user keys pool: .users.1
+ rgw log data: False
+ rgw log meta: False
+- rgw:
+ regions:
+ region0:
+ api name: api1
+ is master: True
+ master zone: r0z0
+ zones: [r0z0, r0z1]
+ client.0:
+ system user:
+ name: client0-system-user
+ access key: 0te6NH5mcdcq0Tc5i8i2
+ secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
+ client.1:
+ system user:
+ name: client1-system-user
+ access key: 1te6NH5mcdcq0Tc5i8i3
+ secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw
+- radosgw-agent:
+ client.0:
+ max-entries: 10
+ src: client.0
+ dest: client.1
+- radosgw-admin:
--- /dev/null
+roles:
+- [mon.a, mds.a, osd.0, osd.1, client.0]
+- [mon.b, mon.c, osd.2, osd.3, client.1]
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ debug ms: 1
+ rgw gc obj min wait: 15
+ osd:
+ debug ms: 1
+ debug objclass : 20
+ client.0:
+ rgw region: region0
+ rgw zone: r0z1
+ rgw region root pool: .rgw.region.0
+ rgw zone root pool: .rgw.zone.0
+ rgw gc pool: .rgw.gc.0
+ rgw user uid pool: .users.uid.0
+ rgw user keys pool: .users.0
+ rgw log data: True
+ rgw log meta: True
+ client.1:
+ rgw region: region1
+ rgw zone: r1z1
+ rgw region root pool: .rgw.region.1
+ rgw zone root pool: .rgw.zone.1
+ rgw gc pool: .rgw.gc.1
+ rgw user uid pool: .users.uid.1
+ rgw user keys pool: .users.1
+ rgw log data: False
+ rgw log meta: False
+- rgw:
+ regions:
+ region0:
+ api name: api1
+ is master: True
+ master zone: r0z1
+ zones: [r0z1]
+ region1:
+ api name: api1
+ is master: False
+ master zone: r1z1
+ zones: [r1z1]
+ client.0:
+ system user:
+ name: client0-system-user
+ access key: 0te6NH5mcdcq0Tc5i8i2
+ secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
+ client.1:
+ system user:
+ name: client1-system-user
+ access key: 1te6NH5mcdcq0Tc5i8i3
+ secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw
+- radosgw-agent:
+ client.0:
+ src: client.0
+ dest: client.1
+ metadata-only: true
+- radosgw-admin:
--- /dev/null
+roles:
+- [mon.a, mds.a, osd.0, client.0, osd.1]
+tasks:
+- install:
+- ceph:
+ conf:
+ client:
+ debug ms: 1
+ rgw gc obj min wait: 15
+ osd:
+ debug ms: 1
+ debug objclass : 20
+- rgw:
+ client.0:
+- radosgw-admin:
--- /dev/null
+overrides:
+ s3readwrite:
+ s3:
+ user_id: s3readwrite-test-user
+ display_name: test user for the s3readwrite tests
+ email: tester@inktank
+ access_key: 2te6NH5mcdcq0Tc5i8i4
+ secret_key: Qy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXx
+ readwrite:
+ deterministic_file_names: True
+ duration: 30
+ bucket: testbucket
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
+roles:
+- [mon.a, mds.a, osd.0, osd.1, client.0]
+- [mon.b, mon.c, osd.2, osd.3, client.1]
+
+tasks:
+- install:
+- ceph:
+ conf:
+ client.1:
+ rgw region: default
+ rgw zone: r1z1
+ rgw region root pool: .rgw
+ rgw zone root pool: .rgw
+ rgw domain root: .rgw
+ rgw gc pool: .rgw.gc
+ rgw user uid pool: .users.uid
+ rgw user keys pool: .users
+- rgw:
+ client.0:
+ system user:
+ name: nr-system
+ access key: 0te6NH5mcdcq0Tc5i8i2
+ secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
+- s3readwrite:
+ client.0:
+ extra_args: ['--no-cleanup']
+ s3:
+ delete_user: False
+ readwrite:
+ writers: 1
+ readers: 0
+- rgw:
+ regions:
+ default:
+ api name: api1
+ is master: true
+ master zone: r1z1
+ zones: [r1z1]
+ client.1:
+ system user:
+ name: r2-system
+ access key: 1te6NH5mcdcq0Tc5i8i3
+ secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw
+- s3readwrite:
+ client.1:
+ s3:
+ create_user: False
+ readwrite:
+ writers: 0
+ readers: 2
+
--- /dev/null
+../../../rgw_pool_type/
\ No newline at end of file
--- /dev/null
+../../../../clusters/fixed-2.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+../../../rgw_pool_type/
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+ flavor: notcmalloc
+- ceph:
+- rgw:
+ client.0:
+ valgrind: [--tool=memcheck]
+- s3tests:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+tasks:
+- install:
+ flavor: notcmalloc
+- ceph:
+ conf:
+ client.0:
+ rgw region: zero
+ rgw zone: r0z1
+ rgw region root pool: .rgw.region.0
+ rgw zone root pool: .rgw.zone.0
+ rgw gc pool: .rgw.gc.0
+ rgw user uid pool: .users.uid.0
+ rgw user keys pool: .users.0
+ rgw log data: True
+ rgw log meta: True
+ client.1:
+ rgw region: one
+ rgw zone: r1z1
+ rgw region root pool: .rgw.region.1
+ rgw zone root pool: .rgw.zone.1
+ rgw gc pool: .rgw.gc.1
+ rgw user uid pool: .users.uid.1
+ rgw user keys pool: .users.1
+ rgw log data: False
+ rgw log meta: False
+- rgw:
+ default_idle_timeout: 300
+ regions:
+ zero:
+ api name: api1
+ is master: True
+ master zone: r0z1
+ zones: [r0z1]
+ one:
+ api name: api1
+ is master: False
+ master zone: r1z1
+ zones: [r1z1]
+ client.0:
+ valgrind: [--tool=memcheck]
+ system user:
+ name: client0-system-user
+ access key: 1te6NH5mcdcq0Tc5i8i2
+ secret key: 1y4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
+ client.1:
+ valgrind: [--tool=memcheck]
+ system user:
+ name: client1-system-user
+ access key: 0te6NH5mcdcq0Tc5i8i2
+ secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
+- radosgw-agent:
+ client.0:
+ src: client.0
+ dest: client.1
+ metadata-only: true
+- s3tests:
+ client.0:
+ idle_timeout: 300
+ rgw_server: client.0
--- /dev/null
+tasks:
+- install:
+ flavor: notcmalloc
+- ceph:
+- rgw:
+ client.0:
+ valgrind: [--tool=memcheck]
+- swift:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ lockdep: true
+ mon:
+ lockdep: true
--- /dev/null
+overrides:
+ install:
+ ceph:
+ flavor: notcmalloc
+ ceph:
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1]
+- [samba.0, client.0, client.1]
--- /dev/null
+../../../debug/mds_client.yaml
\ No newline at end of file
--- /dev/null
+../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+# we currently can't install Samba on RHEL; need a gitbuilder and code updates
+os_type: ubuntu
+
+tasks:
+- install:
+- install:
+ project: samba
+ extra_packages: ['samba']
+- ceph:
--- /dev/null
+tasks:
+- ceph-fuse: [client.0]
+- samba:
+ samba.0:
+ ceph: "{testdir}/mnt.0"
+
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- kclient: [client.0]
+- samba:
+ samba.0:
+ ceph: "{testdir}/mnt.0"
+
--- /dev/null
+tasks:
+- samba:
--- /dev/null
+tasks:
+- localdir: [client.0]
+- samba:
+ samba.0:
+ ceph: "{testdir}/mnt.0"
--- /dev/null
+tasks:
+- cifs-mount:
+ client.1:
+ share: ceph
+- workunit:
+ clients:
+ client.1:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- cifs-mount:
+ client.1:
+ share: ceph
+- workunit:
+ clients:
+ client.1:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- cifs-mount:
+ client.1:
+ share: ceph
+- workunit:
+ clients:
+ client.1:
+ - kernel_untar_build.sh
+
--- /dev/null
+tasks:
+- pexec:
+ client.1:
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb
+# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon
+# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl
+# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse: [client.0]
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - direct_io
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - libcephfs/test.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rados/test_python.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "1"
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ env:
+ RBD_FEATURES: "1"
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms die on skipped message: false
+tasks:
+- install:
+- ceph:
+- rbd:
+ all:
+ image_size: 20480
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+roles:
+- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1]
+- [client.1]
+- [client.0]
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- locktest: [client.0, client.1]
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rgw: [client.0]
+- s3tests:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+roles:
+- [mon.a, mon.d, osd.0]
+- [mon.b, mon.e, mds.a]
+- [mon.c, mon.f, osd.1]
--- /dev/null
+tasks:
+- install:
+- ceph:
+- mon_recovery:
--- /dev/null
+roles:
+- [mon.0, osd.0, osd.1, mds.a, client.0]
+tasks:
+- install:
+- ceph:
+- filestore_idempotent:
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ conf:
+ osd:
+ osd min pg log entries: 5
+- osd_backfill:
--- /dev/null
+roles:
+- - mon.a
+ - mds.0
+ - osd.0
+- - osd.1
+- - osd.2
+- - osd.3
+- - osd.4
+- - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix-small.sh
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+roles:
+- [mon.a, osd.0, osd.1, osd.2]
+- [mds.a, osd.3, osd.4, osd.5]
+- [client.0]
--- /dev/null
+../../../../fs/xfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+- thrashosds:
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.0:
+ admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 60
+- admin_socket:
+ client.0:
+ objecter_requests:
+ test: "http://ceph.newdream.net/git/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
--- /dev/null
+tasks:
+- rbd:
+ all:
+ image_size: 20480
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - libcephfs/test.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- mon_recovery:
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - cls
--- /dev/null
+tasks:
+- install:
+- ceph:
+- rgw:
+ client.0:
+ valgrind: [--tool=memcheck]
+- s3tests:
+ default_idle_timeout: 300
+ client.0:
+ rgw_server: client.0
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
--- /dev/null
+overrides:
+ ceph:
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]
--- /dev/null
+../../../../clusters/fixed-3.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - snaps
--- /dev/null
+tasks:
+- install:
+- ceph:
+- kclient:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
--- /dev/null
+roles:
+- [mon.0, mds.a, osd.0]
+- [mon.1, osd.1]
+- [mon.2, osd.2]
+- [osd.3]
+- [osd.4]
+- [osd.5]
+- [osd.6]
+- [osd.7]
+- [osd.8]
+- [osd.9]
+- [osd.10]
+- [osd.11]
+- [osd.12]
+- [osd.13]
+- [osd.14]
+- [osd.15]
+- [client.0]
--- /dev/null
+roles:
+- [mon.0, mds.a, osd.0, osd.1, osd.2]
+- [mon.1, mon.2, client.0]
--- /dev/null
+roles:
+- [mon.0, mds.a, osd.0]
+- [mon.1, osd.1]
+- [mon.2, osd.2]
+- [osd.3]
+- [osd.4]
+- [osd.5]
+- [osd.6]
+- [osd.7]
+- [client.0]
--- /dev/null
+../../../../fs/btrfs.yaml
\ No newline at end of file
--- /dev/null
+../../../../fs/xfs.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+- thrashosds:
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+- thrashosds:
+ op_delay: 1
+ chance_down: 10
--- /dev/null
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+- thrashosds:
+ chance_down: 50
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/bonnie.sh
--- /dev/null
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 1800
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
--- /dev/null
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
--- /dev/null
+overrides:
+ ceph:
+ fs: btrfs
+ conf:
+ osd:
+ osd op thread timeout: 60
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 500
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/bonnie.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/dbench-short.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/ffsb.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/fio.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/fsx.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/fsync-tester.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/iogen.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/iozone-sync.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/iozone.sh
--- /dev/null
+tasks:
+- install:
+- ceph:
+- tgt:
+- iscsi:
+- workunit:
+ clients:
+ all:
+ - suites/pjd.sh
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: bobtail
+- ceph:
+- install.upgrade:
+ all:
+ tag: v0.61.5
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.61.5
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.61.6
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - suites/dbench.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/iogen.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ conf:
+ paxos service trim min: 5
+ mon min osdmap epochs: 25
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: bobtail
+- ceph:
--- /dev/null
+tasks:
+- install:
+ tag: v0.61.1
+- ceph:
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ tag: v0.61.3
+- ceph.restart:
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ tag: v0.61.4
+- ceph.restart:
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ tag: v0.61.5
+- ceph.restart:
--- /dev/null
+tasks:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
--- /dev/null
+workload:
+ rados:
+ clients: [client.0]
+ ops: 2000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- rados:
+ clients: [client.0]
+ ops: 2000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: bobtail
+- ceph:
+- install.upgrade:
+ all:
+ tag: v0.61.5
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.61.5
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.61.6
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ rados:
+ clients: [client.0]
+ ops: 2000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- rados:
+ clients: [client.0]
+ ops: 2000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: bobtail
+- ceph:
+- install.upgrade:
+ all:
+ tag: v0.61.5
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.61.5
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.61.6
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+
+
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- workunit:
+ clients:
+ client.0:
+ - rbd/copy.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_lock_fence.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: bobtail
+- ceph:
+- install.upgrade:
+ all:
+ tag: v0.61.5
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.61.5
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.61.6
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ rgw: [client.0]
+ s3tests:
+ client.0:
+ rgw_server: client.0
+
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 30
+ - ceph.restart: [rgw.client.0]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 30
+ - ceph.restart: [rgw.client.0]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: cuttlefish
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [rgw.client.0]
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- swift:
+ client.0:
+ rgw_server: client.0
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- swift:
+ client.0:
+ rgw_server: client.0
+
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ fs: xfs
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
--- /dev/null
+tasks:
+ - install.upgrade:
+ mon.a:
+ branch: emperor
+ mon.b:
+ branch: emperor
+ - ceph.restart:
+ - parallel:
+ - workload2
+ - upgrade-sequence
--- /dev/null
+workload2:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
--- /dev/null
+workload2:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
--- /dev/null
+workload2:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
--- /dev/null
+workload2:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ branch: emperor
+ mon.b:
+ branch: emperor
+ - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ branch: emperor
+ mon.b:
+ branch: emperor
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
--- /dev/null
+tasks:
+- rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+tasks:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- workunit:
+ branch: dumpling
+ clients:
+ client.1:
+ - rados/test.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- rgw: [client.1]
+- s3tests:
+ client.1:
+ rgw_server: client.1
--- /dev/null
+tasks:
+# Uncomment the next line if you have not already included rgw_s3tests.yaml in your test.
+# - rgw: [client.1]
+- swift:
+ client.1:
+ rgw_server: client.1
--- /dev/null
+../../../../distros/supported
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- print: "**** done install"
+- ceph:
+ fs: xfs
+- print: "**** done ceph"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test-upgrade-firefly.sh
+ - cls
+
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ mon.b:
+ - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ mon.b:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
--- /dev/null
+tasks:
+ - install.upgrade:
+ client.0:
+ - print: "**** done install.upgrade"
--- /dev/null
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
--- /dev/null
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - workunit:
+ clients:
+ client.1:
+ - rados/test.sh
--- /dev/null
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+
--- /dev/null
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+ - rgw: [client.1]
+ - s3tests:
+ client.1:
+ rgw_server: client.1
+ branch: dumpling
--- /dev/null
+tasks:
+# no need for rwg when we use +
+# - rgw: [client.1]
+ - swift:
+ client.1:
+ rgw_server: client.1
--- /dev/null
+../../../../distros/supported
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+roles:
+- - mon.a
+ - mon.b
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - mon.c
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ fs: xfs
--- /dev/null
+tasks:
+- install.upgrade:
+ osd.0:
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ thrash_primary_affinity: false
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test-upgrade-firefly.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test-upgrade-firefly.sh
--- /dev/null
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 1800
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ mon.c:
+- ceph.restart:
+ daemons: [mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- ceph.wait_for_mon_quorum: [a, b, c]
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test-upgrade-firefly.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
--- /dev/null
+tasks:
+- rgw:
+ default_idle_timeout: 300
+ client.0:
+- swift:
+ client.0:
+ rgw_server: client.0
+
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+../../../../distros/supported
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: cuttlefish
+- ceph:
+- install.upgrade:
+ all:
+ tag: v0.67.1
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.1
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.2
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.3
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.4
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.5
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.7
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - suites/dbench.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/iogen.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: cuttlefish
+- ceph:
+- install.upgrade:
+ all:
+ tag: v0.67.1
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.1
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.2
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.3
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.4
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.5
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.7
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ rados:
+ clients: [client.0]
+ ops: 2000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- rados:
+ clients: [client.0]
+ ops: 2000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: cuttlefish
+- ceph:
+- install.upgrade:
+ all:
+ tag: v0.67.1
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.1
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.2
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.3
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.4
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.5
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.7
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+
+
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- workunit:
+ clients:
+ client.0:
+ - rbd/copy.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_lock_fence.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: cuttlefish
+- ceph:
+- install.upgrade:
+ all:
+ tag: v0.67.1
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.1
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.2
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.3
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.4
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.5
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.67.7
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ rgw: [client.0]
+ s3tests:
+ client.0:
+ rgw_server: client.0
+
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 30
+ - ceph.restart: [rgw.client.0]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 30
+ - ceph.restart: [rgw.client.0]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: dumpling
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [rgw.client.0]
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- swift:
+ client.0:
+ rgw_server: client.0
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- swift:
+ client.0:
+ rgw_server: client.0
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+- install.upgrade:
+ all:
+ tag:
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.73
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.74
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.75
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- ceph-fuse:
+- workunit:
+ clients:
+ client.0:
+ - suites/dbench.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/iogen.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+- install.upgrade:
+ all:
+ tag:
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.73
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.74
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.75
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ rados:
+ clients: [client.0]
+ ops: 2000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- rados:
+ clients: [client.0]
+ ops: 2000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+- install.upgrade:
+ all:
+ tag:
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.73
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.74
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.75
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+
+
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- workunit:
+ clients:
+ client.0:
+ - rbd/copy.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_lock_fence.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ fs: xfs
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+- install.upgrade:
+ all:
+ tag:
+- ceph.restart:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.73
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.74
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.75
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ rgw: [client.0]
+ s3tests:
+ client.0:
+ rgw_server: client.0
+
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 30
+ - ceph.restart: [rgw.client.0]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 30
+ - ceph.restart: [rgw.client.0]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [rgw.client.0]
--- /dev/null
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+- swift:
+ client.0:
+ rgw_server: client.0
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- swift:
+ client.0:
+ rgw_server: client.0
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ - scrub mismatch
+ - ScrubResult
+ - osd_map_max_advance
+ fs: xfs
+ conf:
+ osd:
+ osd map max advance: 1000
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+- - client.0
+ - client.1
--- /dev/null
+tasks:
+- install:
+ tag: v0.80.10
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.80.4
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.80.5
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.80.6
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.80.8
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.80.9
+- ceph:
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
--- /dev/null
+workload:
+ sequential:
+ - rgw: [client.0]
+ - s3tests:
+ client.0:
+ force-branch: firefly-original
+ rgw_server: client.0
--- /dev/null
+workload:
+ sequential:
+ - rados:
+ clients: [client.0]
+ ops: 2000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ branch: firefly
+ mon.b:
+ branch: firefly
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ branch: firefly
+ mon.b:
+ branch: firefly
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
--- /dev/null
+tasks:
+- install.upgrade:
+ client.0:
--- /dev/null
+tasks:
+- sequential:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/dbench.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- sequential:
+ - thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/iogen.sh
+
--- /dev/null
+tasks:
+- sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
--- /dev/null
+tasks:
+- sequential:
+ - rgw: [client.1]
+ - s3readwrite:
+ client.0:
+ rgw_server: client.1
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
--- /dev/null
+../../../../distros/supported
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ - osd_map_max_advance
+ fs: xfs
+ conf:
+ osd:
+ osd map max advance: 1000
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+- - client.1
+ - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+- install.upgrade:
+ all:
+ tag: v0.80.1
+- ceph.restart:
+- exec:
+ client.0:
+ - ceph osd crush tunables firefly
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ branch: emperor
+- ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+- install.upgrade:
+ all:
+ tag: v0.80.1
+- ceph.restart:
+- exec:
+ client.0:
+ - ceph osd crush tunables firefly
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ thrashosds:
+ thrash_primary_affinity: false
+tasks:
+- install:
+ tag: v0.67.11
+- ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.80.1
+- ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.80.2
+- ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.80.3
+- ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+tasks:
+- install:
+ tag: v0.80
+- ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ branch: firefly
+ mon.b:
+ branch: firefly
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
--- /dev/null
+tasks:
+- install.upgrade:
+ client.0:
--- /dev/null
+tasks:
+- sequential:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/dbench.sh
+
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- sequential:
+ - thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0:
+ - suites/iogen.sh
+
--- /dev/null
+tasks:
+- sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
--- /dev/null
+tasks:
+- sequential:
+ - rgw: [client.1]
+ - s3tests:
+ client.1:
+ force-branch: firefly-original
+ rgw_server: client.1
--- /dev/null
+../../../../distros/supported
\ No newline at end of file
--- /dev/null
+../../../../../distros/supported/
\ No newline at end of file
--- /dev/null
+# this case tests issue #9419 "dumpling->firefly upgrade, sending setallochint?"
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+tasks:
+- install:
+ branch: dumpling
+- print: "**** done install dumpling"
+- ceph:
+ fs: xfs
+- print: "**** done ceph"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on clinet.0"
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done install.upgrade"
+- ceph.restart:
+ #osd.2 is not upgraded
+ daemons: [mon.a, mon.b, mon.c, osd.0, osd.1]
+- print: "**** done restart all"
+- workunit:
+ branch: firefly
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh"
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+- print: "**** done rados/load-gen-big.sh"
+- workunit:
+ branch: firefly
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh"
+- workunit:
+ branch: firefly
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh"
+- rgw: [client.0]
+- s3tests:
+ client.0:
+ force-branch: firefly
+ rgw_server: client.0
+- print: "**** done s3tests"
--- /dev/null
+../../../../../distros/supported/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub
+ - osd_map_max_advance
+ fs: xfs
+ conf:
+ osd:
+ osd map max advance: 1000
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+- - client.1
+tasks:
+- install:
+ tag: v0.80.4
+- print: "**** done v0.80.4 install"
+- ceph:
+ fs: xfs
+- print: "**** done ceph xfs"
+- sequential:
+ - workload
+- print: "**** done workload v0.80.4"
+- parallel:
+ - workload1
+ - upgrade-sequence1
+- print: "**** done parallel v0.80.5"
+- parallel:
+ - workload2
+ - upgrade-sequence2
+- print: "**** done parallel v0.80.7"
+- parallel:
+ - workload3
+ - upgrade-sequence3
+- print: "**** done parallel v0.80.8"
+- parallel:
+ - workload4
+ - upgrade-sequence4
+- print: "**** done parallel v0.80.9"
+- parallel:
+ - workload_firefly
+ - upgrade-sequence_firefly
+- print: "**** done parallel firefly branch"
+#######################
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh workload"
+workload1:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh workload1"
+ - workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload1"
+ - workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh workload1"
+upgrade-sequence1:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ tag: v0.80.5
+ mon.b:
+ tag: v0.80.5
+ client.1:
+ tag: v0.80.5
+ - print: "**** done v0.80.5 install.upgrade"
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 30
+ - print: "**** done ceph.restart all 1 mon/mds/osd"
+workload2:
+ sequential:
+# removed to fix #10176
+# - workunit:
+# clients:
+# client.0:
+# - rbd/import_export.sh
+# env:
+# RBD_CREATE_ARGS: --new-format
+ - workunit:
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh workload2"
+upgrade-sequence2:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ tag: v0.80.7
+ mon.b:
+ tag: v0.80.7
+ client.1:
+ tag: v0.80.7
+ - print: "**** done v0.80.7 install.upgrade"
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all 2 osd/mon/mds"
+workload3:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh workload3"
+ - workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload3"
+ - workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh workload3"
+upgrade-sequence3:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ tag: v0.80.8
+ mon.b:
+ tag: v0.80.8
+ client.1:
+ tag: v0.80.8
+ - print: "**** done v0.80.8 install.upgrade"
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 30
+ - print: "**** done ceph.restart all mon/mds/osd upgrade-sequence3"
+workload4:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh workload4"
+ - workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload4"
+ - workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh workload4"
+upgrade-sequence4:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ tag: v0.80.9
+ mon.b:
+ tag: v0.80.9
+ client.1:
+ tag: v0.80.9
+ - print: "**** done v0.80.9 install.upgrade"
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 30
+ - print: "**** done ceph.restart all 1 mon/mds/osd upgrade-sequence4"
+workload_firefly:
+ sequential:
+ - rgw: [client.0]
+ - print: "**** done rgw workload_firefly"
+ - s3tests:
+ client.0:
+ force-branch: firefly
+ rgw_server: client.0
+ - print: "**** done s3tests workload_firefly"
+upgrade-sequence_firefly:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ branch: firefly
+ mon.b:
+ branch: firefly
+ client.1:
+ branch: firefly
+ - print: "**** done branch: firefly install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all firefly current branch mds/osd/mon"
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: cuttlefish
+- ceph:
+ fs: xfs
+- ceph-fuse:
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ client.0:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ client.0:
+ - suites/iogen.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: dumpling
--- /dev/null
+tasks:
+- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/iogen.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - kernel_untar_build.sh
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/tiobench.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: emperor
--- /dev/null
+tasks:
+- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
--- /dev/null
+tasks:
+- workunit:
+ branch: emperor
+ clients:
+ client.0:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: emperor
+ clients:
+ all:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: emperor
+ clients:
+ client.0:
+ - suites/iogen.sh
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+
--- /dev/null
+tasks:
+- install:
+ branch: cuttlefish
+- ceph:
+
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
+
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ client.0:
+ - rados/load-gen-mix.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ osd.0:
+ branch: dumpling
+ osd.2:
+ branch: dumpling
+
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.2]
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
+
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+
--- /dev/null
+tasks:
+- install:
+ branch: cuttlefish
+- ceph:
+
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ all:
+ - cephtool/test.sh
+ - mon/pool_ops.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ mon.a:
+ branch: dumpling
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ all:
+ - cephtool/test.sh
+ - mon/pool_ops.sh
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - had wrong client addr
+ - had wrong cluster addr
+tasks:
+- install.upgrade:
+ mon.b:
+ branch: dumpling
+ client.0:
+ branch: dumpling
+- ceph.restart:
+ daemons:
+ - mon.b
+ - mon.c
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+- workunit:
+ branch: dumpling
+ clients:
+ all:
+ - cephtool/test.sh
+ - mon/pool_ops.sh
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ fs: xfs
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+../rados/distro
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ fs: xfs
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ workunit:
+ branch: dumpling
+ clients:
+ all:
+ - rados/load-gen-big.sh
--- /dev/null
+workload:
+ workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/load-gen-mix.sh
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.3]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.3]
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
--- /dev/null
+os_type: centos
+os_version: "6.4"
--- /dev/null
+os_type: debian
+os_version: "7.0"
--- /dev/null
+os_type: fedora
+os_version: "18"
--- /dev/null
+os_type: rhel
+os_version: "6.3"
--- /dev/null
+os_type: ubuntu
+os_version: "12.04"
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ fs: xfs
+- parallel:
+ - workload
+ - upgrade-sequence
--- /dev/null
+workload:
+ sequential:
+ - rgw: [client.0]
+ - s3tests:
+ # use older tests when we are running a mix
+ client.0:
+ force-branch: dumpling
+ rgw_server: client.0
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ all:
+ branch: emperor
+ - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0]
--- /dev/null
+tasks:
+- rgw: [client.1]
+- swift:
+ client.1:
+ rgw_server: client.1
--- /dev/null
+../rados/distro
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mon.b
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - mon.c
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ fs: xfs
--- /dev/null
+tasks:
+- install.upgrade:
+ osd.0:
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 1800
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ mon.c: null
+- ceph.restart:
+ daemons: [mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- ceph.wait_for_mon_quorum: [a, b, c]
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+../rados/distro
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: bobtail
+- ceph:
--- /dev/null
+tasks:
+- workunit:
+ branch: bobtail
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: dumpling
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/load-gen-mix.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: cuttlefish
+- ceph:
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
--- /dev/null
+tasks:
+- workunit:
+ branch: cuttlefish
+ clients:
+ client.0:
+ - rados/load-gen-mix.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: dumpling
--- /dev/null
+tasks:
+- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a]
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: emperor
--- /dev/null
+tasks:
+- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
--- /dev/null
+tasks:
+- workunit:
+ branch: emperor
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: bobtail
+- ceph:
--- /dev/null
+tasks:
+- workunit:
+ branch: bobtail
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: dumpling
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+tasks:
+- install:
+ branch: bobtail
+- ceph:
--- /dev/null
+tasks:
+- workunit:
+ branch: bobtail
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: dumpling
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a]
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: emperor
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a]
--- /dev/null
+tasks:
+- workunit:
+ branch: emperor
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: bobtail
+- ceph:
+- rgw:
--- /dev/null
+tasks:
+- s3tests:
+ client.0:
+ force-branch: bobtail
+ rgw_server: client.0
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: dumpling
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c, rgw.client.0]
--- /dev/null
+tasks:
+- s3readwrite:
+ client.0:
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
--- /dev/null
+tasks:
+- s3tests:
+ client.0:
+ force-branch: dumpling
+ rgw_server: client.0
--- /dev/null
+tasks:
+- swift:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c, rgw.client.0]
--- /dev/null
+tasks:
+- s3readwrite:
+ client.0:
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
--- /dev/null
+tasks:
+- s3tests:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+tasks:
+- swift:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+ - client.1
+- - client.0
+tasks:
+- install:
+ branch: bobtail
+- ceph:
+ conf:
+ client:
+ client mount timeout: 600
+ rgw init timeout: 600
+- rgw: [client.0]
--- /dev/null
+tasks:
+- s3readwrite:
+ client.0:
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
--- /dev/null
+tasks:
+- s3tests:
+ client.0:
+ rgw_server: client.0
+ force-branch: bobtail
--- /dev/null
+tasks:
+- swift:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: dumpling
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a, rgw.client.0]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0]
--- /dev/null
+tasks:
+- s3readwrite:
+ client.0:
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
--- /dev/null
+tasks:
+- s3tests:
+ client.0:
+ force-branch: dumpling
+ rgw_server: client.0
--- /dev/null
+tasks:
+- swift:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: emperor
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a, rgw.client.0]
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0]
--- /dev/null
+tasks:
+- s3readwrite:
+ client.0:
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
--- /dev/null
+tasks:
+- s3tests:
+ client.0:
+ force-branch: emperor
+ rgw_server: client.0
--- /dev/null
+tasks:
+- swift:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ fs: xfs
+- ceph-fuse:
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: emperor
--- /dev/null
+tasks:
+- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
+
--- /dev/null
+tasks:
+- workunit:
+ branch: emperor
+ clients:
+ client.0:
+ - suites/dbench.sh
--- /dev/null
+../rados/distro
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ fs: xfs
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ all:
+ - rados/load-gen-big.sh
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: emperor
--- /dev/null
+tasks:
+- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+- workunit:
+ branch: emperor
+ clients:
+ client.0:
+ - rados/test.sh
--- /dev/null
+os_type: centos
+os_version: "6.4"
--- /dev/null
+os_type: debian
+os_version: "7.0"
--- /dev/null
+os_type: fedora
+os_version: "18"
--- /dev/null
+os_type: rhel
+os_version: "6.3"
--- /dev/null
+os_type: rhel
+os_version: "6.4"
--- /dev/null
+os_type: ubuntu
+os_version: "12.04"
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ fs: xfs
--- /dev/null
+tasks:
+- workunit:
+ branch: dumpling
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: emperor
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a]
--- /dev/null
+tasks:
+- workunit:
+ branch: emperor
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
--- /dev/null
+../rados/distro
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: dumpling
+- ceph:
+ fs: xfs
+- rgw: [client.0]
--- /dev/null
+tasks:
+- s3tests:
+ client.0:
+ rgw_server: client.0
+ force-branch: dumpling
--- /dev/null
+tasks:
+- install.upgrade:
+ all:
+ branch: emperor
--- /dev/null
+tasks:
+- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0]
--- /dev/null
+tasks:
+- s3tests:
+ client.0:
+ rgw_server: client.0
--- /dev/null
+../rados/distro
\ No newline at end of file
--- /dev/null
+tasks:
+- cifs-mount:
+ client.1:
+ share: ceph
+- workunit:
+ clients:
+ client.1:
+ - suites/dbench.sh
--- /dev/null
+tasks:
+- cifs-mount:
+ client.1:
+ share: ceph
+- workunit:
+ clients:
+ client.1:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- cifs-mount:
+ client.1:
+ share: ceph
+- workunit:
+ clients:
+ client.1:
+ - kernel_untar_build.sh
+
--- /dev/null
+tasks:
+- pexec:
+ client.1:
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb
+# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon
+# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number
+ - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl
+# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid
--- /dev/null
+import logging
+
+# Inherit teuthology's log level
+teuthology_log = logging.getLogger('teuthology')
+log = logging.getLogger(__name__)
+log.setLevel(teuthology_log.level)
--- /dev/null
+"""
+Admin Socket task -- used in rados, powercycle, and smoke testing
+"""
+from cStringIO import StringIO
+
+import json
+import logging
+import os
+import time
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+from teuthology.parallel import parallel
+
+log = logging.getLogger(__name__)
+
+
+def task(ctx, config):
+ """
+ Run an admin socket command, make sure the output is json, and run
+ a test program on it. The test program should read json from
+ stdin. This task succeeds if the test program exits with status 0.
+
+ To run the same test on all clients::
+
+ tasks:
+ - ceph:
+ - rados:
+ - admin_socket:
+ all:
+ dump_requests:
+ test: http://example.com/script
+
+ To restrict it to certain clients::
+
+ tasks:
+ - ceph:
+ - rados: [client.1]
+ - admin_socket:
+ client.1:
+ dump_requests:
+ test: http://example.com/script
+
+ If an admin socket command has arguments, they can be specified as
+ a list::
+
+ tasks:
+ - ceph:
+ - rados: [client.0]
+ - admin_socket:
+ client.0:
+ dump_requests:
+ test: http://example.com/script
+ help:
+ test: http://example.com/test_help_version
+ args: [version]
+
+ Note that there must be a ceph client with an admin socket running
+ before this task is run. The tests are parallelized at the client
+ level. Tests for a single client are run serially.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ assert isinstance(config, dict), \
+ 'admin_socket task requires a dict for configuration'
+ teuthology.replace_all_with_clients(ctx.cluster, config)
+
+ with parallel() as ptask:
+ for client, tests in config.iteritems():
+ ptask.spawn(_run_tests, ctx, client, tests)
+
+
+def _socket_command(ctx, remote, socket_path, command, args):
+ """
+ Run an admin socket command and return the result as a string.
+
+ :param ctx: Context
+ :param remote: Remote site
+ :param socket_path: path to socket
+ :param command: command to be run remotely
+ :param args: command arguments
+
+ :returns: output of command in json format
+ """
+ json_fp = StringIO()
+ testdir = teuthology.get_testdir(ctx)
+ max_tries = 60
+ while True:
+ proc = remote.run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'ceph',
+ '--admin-daemon', socket_path,
+ ] + command.split(' ') + args,
+ stdout=json_fp,
+ check_status=False,
+ )
+ if proc.exitstatus == 0:
+ break
+ assert max_tries > 0
+ max_tries -= 1
+ log.info('ceph cli returned an error, command not registered yet?')
+ log.info('sleeping and retrying ...')
+ time.sleep(1)
+ out = json_fp.getvalue()
+ json_fp.close()
+ log.debug('admin socket command %s returned %s', command, out)
+ return json.loads(out)
+
+def _run_tests(ctx, client, tests):
+ """
+ Create a temp directory and wait for a client socket to be created.
+ For each test, copy the executable locally and run the test.
+ Remove temp directory when finished.
+
+ :param ctx: Context
+ :param client: client machine to run the test
+ :param tests: list of tests to run
+ """
+ testdir = teuthology.get_testdir(ctx)
+ log.debug('Running admin socket tests on %s', client)
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client)
+ overrides = ctx.config.get('overrides', {}).get('admin_socket', {})
+
+ try:
+ tmp_dir = os.path.join(
+ testdir,
+ 'admin_socket_{client}'.format(client=client),
+ )
+ remote.run(
+ args=[
+ 'mkdir',
+ '--',
+ tmp_dir,
+ run.Raw('&&'),
+ # wait for client process to create the socket
+ 'while', 'test', '!', '-e', socket_path, run.Raw(';'),
+ 'do', 'sleep', '1', run.Raw(';'), 'done',
+ ],
+ )
+
+ for command, config in tests.iteritems():
+ if config is None:
+ config = {}
+ teuthology.deep_merge(config, overrides)
+ log.debug('Testing %s with config %s', command, str(config))
+
+ test_path = None
+ if 'test' in config:
+ url = config['test'].format(
+ branch=config.get('branch', 'master')
+ )
+ test_path = os.path.join(tmp_dir, command)
+ remote.run(
+ args=[
+ 'wget',
+ '-q',
+ '-O',
+ test_path,
+ '--',
+ url,
+ run.Raw('&&'),
+ 'chmod',
+ 'u=rx',
+ '--',
+ test_path,
+ ],
+ )
+
+ args = config.get('args', [])
+ assert isinstance(args, list), \
+ 'admin socket command args must be a list'
+ sock_out = _socket_command(ctx, remote, socket_path, command, args)
+ if test_path is not None:
+ remote.run(
+ args=[
+ test_path,
+ ],
+ stdin=json.dumps(sock_out),
+ )
+
+ finally:
+ remote.run(
+ args=[
+ 'rm', '-rf', '--', tmp_dir,
+ ],
+ )
--- /dev/null
+<IfModule !version_module>
+ LoadModule version_module {mod_path}/mod_version.so
+</IfModule>
+<IfModule !env_module>
+ LoadModule env_module {mod_path}/mod_env.so
+</IfModule>
+<IfModule !rewrite_module>
+ LoadModule rewrite_module {mod_path}/mod_rewrite.so
+</IfModule>
+<IfModule !fastcgi_module>
+ LoadModule fastcgi_module {mod_path}/mod_fastcgi.so
+</IfModule>
+<IfModule !log_config_module>
+ LoadModule log_config_module {mod_path}/mod_log_config.so
+</IfModule>
+
+Listen {port}
+ServerName {host}
+
+<IfVersion >= 2.4>
+ <IfModule !unixd_module>
+ LoadModule unixd_module {mod_path}/mod_unixd.so
+ </IfModule>
+ <IfModule !authz_core_module>
+ LoadModule authz_core_module {mod_path}/mod_authz_core.so
+ </IfModule>
+ <IfModule !mpm_worker_module>
+ LoadModule mpm_worker_module {mod_path}/mod_mpm_worker.so
+ </IfModule>
+ User {user}
+ Group {group}
+</IfVersion>
+
+ServerRoot {testdir}/apache
+ErrorLog {testdir}/archive/apache.{client}/error.log
+LogFormat "%h l %u %t \"%r\" %>s %b \"{{Referer}}i\" \"%{{User-agent}}i\"" combined
+CustomLog {testdir}/archive/apache.{client}/access.log combined
+PidFile {testdir}/apache/tmp.{client}/apache.pid
+DocumentRoot {testdir}/apache/htdocs.{client}
+FastCgiIPCDir {testdir}/apache/tmp.{client}/fastcgi_sock
+FastCgiExternalServer {testdir}/apache/htdocs.{client}/rgw.fcgi -socket rgw_sock -idle-timeout {idle_timeout}
+RewriteEngine On
+
+RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /rgw.fcgi?page=$1¶ms=$2&%{{QUERY_STRING}} [E=HTTP_AUTHORIZATION:%{{HTTP:Authorization}},L]
+
+# Set fastcgi environment variables.
+# Note that this is separate from Unix environment variables!
+SetEnv RGW_LOG_LEVEL 20
+SetEnv RGW_SHOULD_LOG yes
+SetEnv RGW_PRINT_CONTINUE {print_continue}
+
+<Directory {testdir}/apache/htdocs.{client}>
+ Options +ExecCGI
+ AllowOverride All
+ SetHandler fastcgi-script
+</Directory>
+
+AllowEncodedSlashes On
+ServerSignature Off
--- /dev/null
+"""
+Run an autotest test on the ceph cluster.
+"""
+import json
+import logging
+import os
+
+from teuthology import misc as teuthology
+from teuthology.parallel import parallel
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Run an autotest test on the ceph cluster.
+
+ Only autotest client tests are supported.
+
+ The config is a mapping from role name to list of tests to run on
+ that client.
+
+ For example::
+
+ tasks:
+ - ceph:
+ - ceph-fuse: [client.0, client.1]
+ - autotest:
+ client.0: [dbench]
+ client.1: [bonnie]
+
+ You can also specify a list of tests to run on all clients::
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ - autotest:
+ all: [dbench]
+ """
+ assert isinstance(config, dict)
+ config = teuthology.replace_all_with_clients(ctx.cluster, config)
+ log.info('Setting up autotest...')
+ testdir = teuthology.get_testdir(ctx)
+ with parallel() as p:
+ for role in config.iterkeys():
+ (remote,) = ctx.cluster.only(role).remotes.keys()
+ p.spawn(_download, testdir, remote)
+
+ log.info('Making a separate scratch dir for every client...')
+ for role in config.iterkeys():
+ assert isinstance(role, basestring)
+ PREFIX = 'client.'
+ assert role.startswith(PREFIX)
+ id_ = role[len(PREFIX):]
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
+ remote.run(
+ args=[
+ 'sudo',
+ 'install',
+ '-d',
+ '-m', '0755',
+ '--owner={user}'.format(user='ubuntu'), #TODO
+ '--',
+ scratch,
+ ],
+ )
+
+ with parallel() as p:
+ for role, tests in config.iteritems():
+ (remote,) = ctx.cluster.only(role).remotes.keys()
+ p.spawn(_run_tests, testdir, remote, role, tests)
+
+def _download(testdir, remote):
+ """
+ Download. Does not explicitly support muliple tasks in a single run.
+ """
+ remote.run(
+ args=[
+ # explicitly does not support multiple autotest tasks
+ # in a single run; the result archival would conflict
+ 'mkdir', '{tdir}/archive/autotest'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'mkdir', '{tdir}/autotest'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'wget',
+ '-nv',
+ '--no-check-certificate',
+ 'https://github.com/ceph/autotest/tarball/ceph',
+ '-O-',
+ run.Raw('|'),
+ 'tar',
+ '-C', '{tdir}/autotest'.format(tdir=testdir),
+ '-x',
+ '-z',
+ '-f-',
+ '--strip-components=1',
+ ],
+ )
+
+def _run_tests(testdir, remote, role, tests):
+ """
+ Spawned to run test on remote site
+ """
+ assert isinstance(role, basestring)
+ PREFIX = 'client.'
+ assert role.startswith(PREFIX)
+ id_ = role[len(PREFIX):]
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
+
+ assert isinstance(tests, list)
+ for idx, testname in enumerate(tests):
+ log.info('Running autotest client test #%d: %s...', idx, testname)
+
+ tag = 'client.{id}.num{idx}.{testname}'.format(
+ idx=idx,
+ testname=testname,
+ id=id_,
+ )
+ control = '{tdir}/control.{tag}'.format(tdir=testdir, tag=tag)
+ teuthology.write_file(
+ remote=remote,
+ path=control,
+ data='import json; data=json.loads({data!r}); job.run_test(**data)'.format(
+ data=json.dumps(dict(
+ url=testname,
+ dir=scratch,
+ # TODO perhaps tag
+ # results will be in {testdir}/autotest/client/results/dbench
+ # or {testdir}/autotest/client/results/dbench.{tag}
+ )),
+ ),
+ )
+ remote.run(
+ args=[
+ '{tdir}/autotest/client/bin/autotest'.format(tdir=testdir),
+ '--verbose',
+ '--harness=simple',
+ '--tag={tag}'.format(tag=tag),
+ control,
+ run.Raw('3>&1'),
+ ],
+ )
+
+ remote.run(
+ args=[
+ 'rm', '-rf', '--', control,
+ ],
+ )
+
+ remote.run(
+ args=[
+ 'mv',
+ '--',
+ '{tdir}/autotest/client/results/{tag}'.format(tdir=testdir, tag=tag),
+ '{tdir}/archive/autotest/{tag}'.format(tdir=testdir, tag=tag),
+ ],
+ )
+
+ remote.run(
+ args=[
+ 'rm', '-rf', '--', '{tdir}/autotest'.format(tdir=testdir),
+ ],
+ )
--- /dev/null
+"""
+Run blktrace program through teuthology
+"""
+import contextlib
+import logging
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+blktrace = '/usr/sbin/blktrace'
+daemon_signal = 'term'
+
+@contextlib.contextmanager
+def setup(ctx, config):
+ """
+ Setup all the remotes
+ """
+ osds = ctx.cluster.only(teuthology.is_type('osd'))
+ log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx))
+
+ for remote, roles_for_host in osds.remotes.iteritems():
+ log.info('Creating %s on %s' % (log_dir, remote.name))
+ remote.run(
+ args=['mkdir', '-p', '-m0755', '--', log_dir],
+ wait=False,
+ )
+ yield
+
+@contextlib.contextmanager
+def execute(ctx, config):
+ """
+ Run the blktrace program on remote machines.
+ """
+ procs = []
+ testdir = teuthology.get_testdir(ctx)
+ log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir)
+
+ osds = ctx.cluster.only(teuthology.is_type('osd'))
+ for remote, roles_for_host in osds.remotes.iteritems():
+ roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote]
+ for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
+ if roles_to_devs.get(id_):
+ dev = roles_to_devs[id_]
+ log.info("running blktrace on %s: %s" % (remote.name, dev))
+
+ proc = remote.run(
+ args=[
+ 'cd',
+ log_dir,
+ run.Raw(';'),
+ 'daemon-helper',
+ daemon_signal,
+ 'sudo',
+ blktrace,
+ '-o',
+ dev.rsplit("/", 1)[1],
+ '-d',
+ dev,
+ ],
+ wait=False,
+ stdin=run.PIPE,
+ )
+ procs.append(proc)
+ try:
+ yield
+ finally:
+ osds = ctx.cluster.only(teuthology.is_type('osd'))
+ log.info('stopping blktrace processs')
+ for proc in procs:
+ proc.stdin.close()
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Usage:
+ blktrace:
+
+ Runs blktrace on all clients.
+ """
+ if config is None:
+ config = dict(('client.{id}'.format(id=id_), None)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
+ elif isinstance(config, list):
+ config = dict.fromkeys(config)
+
+ with contextutil.nested(
+ lambda: setup(ctx=ctx, config=config),
+ lambda: execute(ctx=ctx, config=config),
+ ):
+ yield
+
--- /dev/null
+[Boto]
+http_socket_timeout = {idle_timeout}
--- /dev/null
+#!/usr/bin/env python
+
+import json
+import logging
+import requests
+
+log = logging.getLogger(__name__)
+
+
+class AuthenticatedHttpClient(requests.Session):
+ """
+ Client for the calamari REST API, principally exists to do
+ authentication, but also helpfully prefixes
+ URLs in requests with the API base URL and JSONizes
+ POST data.
+ """
+ def __init__(self, api_url, username, password):
+ super(AuthenticatedHttpClient, self).__init__()
+ self._username = username
+ self._password = password
+ self._api_url = api_url
+ self.headers = {
+ 'Content-type': "application/json; charset=UTF-8"
+ }
+
+ def request(self, method, url, **kwargs):
+ if not url.startswith('/'):
+ url = self._api_url + url
+ response = super(AuthenticatedHttpClient, self).request(method, url, **kwargs)
+ if response.status_code >= 400:
+ # For the benefit of test logs
+ print "%s: %s" % (response.status_code, response.content)
+ return response
+
+ def post(self, url, data=None, **kwargs):
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ return super(AuthenticatedHttpClient, self).post(url, data, **kwargs)
+
+ def patch(self, url, data=None, **kwargs):
+ if isinstance(data, dict):
+ data = json.dumps(data)
+ return super(AuthenticatedHttpClient, self).patch(url, data, **kwargs)
+
+ def login(self):
+ """
+ Authenticate with the Django auth system as
+ it is exposed in the Calamari REST API.
+ """
+ log.info("Logging in as %s" % self._username)
+ response = self.get("auth/login/")
+ response.raise_for_status()
+ self.headers['X-XSRF-TOKEN'] = response.cookies['XSRF-TOKEN']
+
+ self.post("auth/login/", {
+ 'next': "/",
+ 'username': self._username,
+ 'password': self._password
+ })
+ response.raise_for_status()
+
+ # Check we're allowed in now.
+ response = self.get("cluster")
+ response.raise_for_status()
+
+if __name__ == "__main__":
+
+ import argparse
+
+ p = argparse.ArgumentParser()
+ p.add_argument('-u', '--uri', default='http://mira035/api/v1/')
+ p.add_argument('--user', default='admin')
+ p.add_argument('--pass', dest='password', default='admin')
+ args, remainder = p.parse_known_args()
+
+ c = AuthenticatedHttpClient(args.uri, args.user, args.password)
+ c.login()
+ response = c.request('GET', ''.join(remainder)).json()
+ print json.dumps(response, indent=2)
--- /dev/null
+#!/usr/bin/env python
+
+import datetime
+import os
+import logging
+import logging.handlers
+import requests
+import uuid
+import unittest
+from http_client import AuthenticatedHttpClient
+
+log = logging.getLogger(__name__)
+log.addHandler(logging.StreamHandler())
+log.setLevel(logging.INFO)
+
+global base_uri
+global client
+base_uri = None
+server_uri = None
+client = None
+
+def setUpModule():
+ global base_uri
+ global server_uri
+ global client
+ try:
+ base_uri = os.environ['CALAMARI_BASE_URI']
+ except KeyError:
+ log.error('Must define CALAMARI_BASE_URI')
+ os._exit(1)
+ if not base_uri.endswith('/'):
+ base_uri += '/'
+ if not base_uri.endswith('api/v1/'):
+ base_uri += 'api/v1/'
+ client = AuthenticatedHttpClient(base_uri, 'admin', 'admin')
+ server_uri = base_uri.replace('api/v1/', '')
+ client.login()
+
+class RestTest(unittest.TestCase):
+ 'Base class for all tests here; get class\'s data'
+
+ def setUp(self):
+ # Called once for each test_* case. A bit wasteful, but we
+ # really like using the simple class variable self.uri
+ # to customize each derived TestCase
+ method = getattr(self, 'method', 'GET')
+ raw = self.uri.startswith('/')
+ self.response = self.get_object(method, self.uri, raw=raw)
+
+ def get_object(self, method, url, raw=False):
+ global server_uri
+ 'Return Python object decoded from JSON response to method/url'
+ if not raw:
+ return client.request(method, url).json()
+ else:
+ return requests.request(method, server_uri + url).json()
+
+class TestUserMe(RestTest):
+
+ uri = 'user/me'
+
+ def test_me(self):
+ self.assertEqual(self.response['username'], 'admin')
+
+class TestCluster(RestTest):
+
+ uri = 'cluster'
+
+ def test_id(self):
+ self.assertEqual(self.response[0]['id'], 1)
+
+ def test_times(self):
+ for time in (
+ self.response[0]['cluster_update_time'],
+ self.response[0]['cluster_update_attempt_time'],
+ ):
+ self.assertTrue(is_datetime(time))
+
+ def test_api_base_url(self):
+ api_base_url = self.response[0]['api_base_url']
+ self.assertTrue(api_base_url.startswith('http'))
+ self.assertIn('api/v0.1', api_base_url)
+
+class TestHealth(RestTest):
+
+ uri = 'cluster/1/health'
+
+ def test_cluster(self):
+ self.assertEqual(self.response['cluster'], 1)
+
+ def test_times(self):
+ for time in (
+ self.response['cluster_update_time'],
+ self.response['added'],
+ ):
+ self.assertTrue(is_datetime(time))
+
+ def test_report_and_overall_status(self):
+ self.assertIn('report', self.response)
+ self.assertIn('overall_status', self.response['report'])
+
+class TestHealthCounters(RestTest):
+
+ uri = 'cluster/1/health_counters'
+
+ def test_cluster(self):
+ self.assertEqual(self.response['cluster'], 1)
+
+ def test_time(self):
+ self.assertTrue(is_datetime(self.response['cluster_update_time']))
+
+ def test_existence(self):
+ for section in ('pg', 'mon', 'osd'):
+ for counter in ('warn', 'critical', 'ok'):
+ count = self.response[section][counter]['count']
+ self.assertIsInstance(count, int)
+ self.assertIsInstance(self.response['pool']['total'], int)
+
+ def test_mds_sum(self):
+ count = self.response['mds']
+ self.assertEqual(
+ count['up_not_in'] + count['not_up_not_in'] + count['up_in'],
+ count['total']
+ )
+
+class TestSpace(RestTest):
+
+ uri = 'cluster/1/space'
+
+ def test_cluster(self):
+ self.assertEqual(self.response['cluster'], 1)
+
+ def test_times(self):
+ for time in (
+ self.response['cluster_update_time'],
+ self.response['added'],
+ ):
+ self.assertTrue(is_datetime(time))
+
+ def test_space(self):
+ for size in ('free_bytes', 'used_bytes', 'capacity_bytes'):
+ self.assertIsInstance(self.response['space'][size], int)
+ self.assertGreater(self.response['space'][size], 0)
+
+ def test_report(self):
+ for size in ('total_used', 'total_space', 'total_avail'):
+ self.assertIsInstance(self.response['report'][size], int)
+ self.assertGreater(self.response['report'][size], 0)
+
+class TestOSD(RestTest):
+
+ uri = 'cluster/1/osd'
+
+ def test_cluster(self):
+ self.assertEqual(self.response['cluster'], 1)
+
+ def test_times(self):
+ for time in (
+ self.response['cluster_update_time'],
+ self.response['added'],
+ ):
+ self.assertTrue(is_datetime(time))
+
+ def test_osd_uuid(self):
+ for osd in self.response['osds']:
+ uuidobj = uuid.UUID(osd['uuid'])
+ self.assertEqual(str(uuidobj), osd['uuid'])
+
+ def test_osd_pools(self):
+ for osd in self.response['osds']:
+ if osd['up'] != 1:
+ continue
+ self.assertIsInstance(osd['pools'], list)
+ self.assertIsInstance(osd['pools'][0], basestring)
+
+ def test_osd_up_in(self):
+ for osd in self.response['osds']:
+ for flag in ('up', 'in'):
+ self.assertIn(osd[flag], (0, 1))
+
+ def test_osd_0(self):
+ osd0 = self.get_object('GET', 'cluster/1/osd/0')['osd']
+ for field in osd0.keys():
+ if not field.startswith('cluster_update_time'):
+ self.assertEqual(self.response['osds'][0][field], osd0[field])
+
+class TestPool(RestTest):
+
+ uri = 'cluster/1/pool'
+
+ def test_cluster(self):
+ for pool in self.response:
+ self.assertEqual(pool['cluster'], 1)
+
+ def test_fields_are_ints(self):
+ for pool in self.response:
+ for field in ('id', 'used_objects', 'used_bytes'):
+ self.assertIsInstance(pool[field], int)
+
+ def test_name_is_str(self):
+ for pool in self.response:
+ self.assertIsInstance(pool['name'], basestring)
+
+ def test_pool_0(self):
+ poolid = self.response[0]['id']
+ pool = self.get_object('GET', 'cluster/1/pool/{id}'.format(id=poolid))
+ self.assertEqual(self.response[0], pool)
+
+class TestServer(RestTest):
+
+ uri = 'cluster/1/server'
+
+ def test_ipaddr(self):
+ for server in self.response:
+ octets = server['addr'].split('.')
+ self.assertEqual(len(octets), 4)
+ for octetstr in octets:
+ octet = int(octetstr)
+ self.assertIsInstance(octet, int)
+ self.assertGreaterEqual(octet, 0)
+ self.assertLessEqual(octet, 255)
+
+ def test_hostname_name_strings(self):
+ for server in self.response:
+ for field in ('name', 'hostname'):
+ self.assertIsInstance(server[field], basestring)
+
+ def test_services(self):
+ for server in self.response:
+ self.assertIsInstance(server['services'], list)
+ for service in server['services']:
+ self.assertIn(service['type'], ('osd', 'mon', 'mds'))
+
+class TestGraphitePoolIOPS(RestTest):
+
+ uri = '/graphite/render?format=json-array&' \
+ 'target=ceph.cluster.ceph.pool.0.num_read&' \
+ 'target=ceph.cluster.ceph.pool.0.num_write'
+
+ def test_targets_contain_request(self):
+ self.assertIn('targets', self.response)
+ self.assertIn('ceph.cluster.ceph.pool.0.num_read',
+ self.response['targets'])
+ self.assertIn('ceph.cluster.ceph.pool.0.num_write',
+ self.response['targets'])
+
+ def test_datapoints(self):
+ self.assertIn('datapoints', self.response)
+ self.assertGreater(len(self.response['datapoints']), 0)
+ data = self.response['datapoints'][0]
+ self.assertEqual(len(data), 3)
+ self.assertIsInstance(data[0], int)
+ if data[1]:
+ self.assertIsInstance(data[1], float)
+ if data[2]:
+ self.assertIsInstance(data[2], float)
+
+#
+# Utility functions
+#
+
+DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
+
+def is_datetime(time):
+ datetime.datetime.strptime(time, DATETIME_FORMAT)
+ return True
+
+if __name__ == '__main__':
+ unittest.main()
--- /dev/null
+"""
+Ceph cluster task.
+
+Handle the setup, starting, and clean-up of a Ceph cluster.
+"""
+from cStringIO import StringIO
+
+import argparse
+import contextlib
+import logging
+import os
+import json
+import time
+
+from ceph_manager import CephManager
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.orchestra import run
+from teuthology.orchestra.daemon import DaemonGroup
+import ceph_client as cclient
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def ceph_log(ctx, config):
+ """
+ Create /var/log/ceph log directory that is open to everyone.
+ Add valgrind and profiling-logger directories.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ log.info('Making ceph log dir writeable by non-root...')
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'sudo',
+ 'chmod',
+ '777',
+ '/var/log/ceph',
+ ],
+ wait=False,
+ )
+ )
+ log.info('Disabling ceph logrotate...')
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'sudo',
+ 'rm', '-f', '--',
+ '/etc/logrotate.d/ceph',
+ ],
+ wait=False,
+ )
+ )
+ log.info('Creating extra log directories...')
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'sudo',
+ 'install', '-d', '-m0755', '--',
+ '/var/log/ceph/valgrind',
+ '/var/log/ceph/profiling-logger',
+ ],
+ wait=False,
+ )
+ )
+
+ try:
+ yield
+
+ finally:
+ pass
+
+
+def assign_devs(roles, devs):
+ """
+ Create a dictionary of devs indexed by roles
+
+ :param roles: List of roles
+ :param devs: Corresponding list of devices.
+ :returns: Dictionary of devs indexed by roles.
+ """
+ return dict(zip(roles, devs))
+
+@contextlib.contextmanager
+def valgrind_post(ctx, config):
+ """
+ After the tests run, look throught all the valgrind logs. Exceptions are raised
+ if textual errors occured in the logs, or if valgrind exceptions were detected in
+ the logs.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ try:
+ yield
+ finally:
+ lookup_procs = list()
+ log.info('Checking for errors in any valgrind logs...');
+ for remote in ctx.cluster.remotes.iterkeys():
+ #look at valgrind logs for each node
+ proc = remote.run(
+ args=[
+ 'sudo',
+ 'zgrep',
+ '<kind>',
+ run.Raw('/var/log/ceph/valgrind/*'),
+ '/dev/null', # include a second file so that we always get a filename prefix on the output
+ run.Raw('|'),
+ 'sort',
+ run.Raw('|'),
+ 'uniq',
+ ],
+ wait=False,
+ check_status=False,
+ stdout=StringIO(),
+ )
+ lookup_procs.append((proc, remote))
+
+ valgrind_exception = None
+ for (proc, remote) in lookup_procs:
+ proc.wait()
+ out = proc.stdout.getvalue()
+ for line in out.split('\n'):
+ if line == '':
+ continue
+ try:
+ (file, kind) = line.split(':')
+ except Exception:
+ log.error('failed to split line %s', line)
+ raise
+ log.debug('file %s kind %s', file, kind)
+ if (file.find('mds') >= 0) and kind.find('Lost') > 0:
+ continue
+ log.error('saw valgrind issue %s in %s', kind, file)
+ valgrind_exception = Exception('saw valgrind issues')
+
+ if valgrind_exception is not None:
+ raise valgrind_exception
+
+
+
+@contextlib.contextmanager
+def cluster(ctx, config):
+ """
+ Handle the creation and removal of a ceph cluster.
+
+ On startup:
+ Create directories needed for the cluster.
+ Create remote journals for all osds.
+ Create and set keyring.
+ Copy the monmap to tht test systems.
+ Setup mon nodes.
+ Setup mds nodes.
+ Mkfs osd nodes.
+ Add keyring information to monmaps
+ Mkfs mon nodes.
+
+ On exit:
+ If errors occured, extract a failure message and store in ctx.summary.
+ Unmount all test files and temporary journaling files.
+ Save the monitor information and archive all ceph logs.
+ Cleanup the keyring setup, and remove all monitor map and data files left over.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ if ctx.config.get('use_existing_cluster', False) is True:
+ log.info("'use_existing_cluster' is true; skipping cluster creation")
+ yield
+
+ testdir = teuthology.get_testdir(ctx)
+ log.info('Creating ceph cluster...')
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'install', '-d', '-m0755', '--',
+ '{tdir}/data'.format(tdir=testdir),
+ ],
+ wait=False,
+ )
+ )
+
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'sudo',
+ 'install', '-d', '-m0777', '--', '/var/run/ceph',
+ ],
+ wait=False,
+ )
+ )
+
+
+ devs_to_clean = {}
+ remote_to_roles_to_devs = {}
+ remote_to_roles_to_journals = {}
+ osds = ctx.cluster.only(teuthology.is_type('osd'))
+ for remote, roles_for_host in osds.remotes.iteritems():
+ devs = teuthology.get_scratch_devices(remote)
+ roles_to_devs = {}
+ roles_to_journals = {}
+ if config.get('fs'):
+ log.info('fs option selected, checking for scratch devs')
+ log.info('found devs: %s' % (str(devs),))
+ devs_id_map = teuthology.get_wwn_id_map(remote, devs)
+ iddevs = devs_id_map.values()
+ roles_to_devs = assign_devs(
+ teuthology.roles_of_type(roles_for_host, 'osd'), iddevs
+ )
+ if len(roles_to_devs) < len(iddevs):
+ iddevs = iddevs[len(roles_to_devs):]
+ devs_to_clean[remote] = []
+
+ if config.get('block_journal'):
+ log.info('block journal enabled')
+ roles_to_journals = assign_devs(
+ teuthology.roles_of_type(roles_for_host, 'osd'), iddevs
+ )
+ log.info('journal map: %s', roles_to_journals)
+
+ if config.get('tmpfs_journal'):
+ log.info('tmpfs journal enabled')
+ roles_to_journals = {}
+ remote.run( args=[ 'sudo', 'mount', '-t', 'tmpfs', 'tmpfs', '/mnt' ] )
+ for osd in teuthology.roles_of_type(roles_for_host, 'osd'):
+ tmpfs = '/mnt/osd.%s' % osd
+ roles_to_journals[osd] = tmpfs
+ remote.run( args=[ 'truncate', '-s', '1500M', tmpfs ] )
+ log.info('journal map: %s', roles_to_journals)
+
+ log.info('dev map: %s' % (str(roles_to_devs),))
+ remote_to_roles_to_devs[remote] = roles_to_devs
+ remote_to_roles_to_journals[remote] = roles_to_journals
+
+
+ log.info('Generating config...')
+ remotes_and_roles = ctx.cluster.remotes.items()
+ roles = [role_list for (remote, role_list) in remotes_and_roles]
+ ips = [host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
+ conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips)
+ for remote, roles_to_journals in remote_to_roles_to_journals.iteritems():
+ for role, journal in roles_to_journals.iteritems():
+ key = "osd." + str(role)
+ if key not in conf:
+ conf[key] = {}
+ conf[key]['osd journal'] = journal
+ for section, keys in config['conf'].iteritems():
+ for key, value in keys.iteritems():
+ log.info("[%s] %s = %s" % (section, key, value))
+ if section not in conf:
+ conf[section] = {}
+ conf[section][key] = value
+
+ if config.get('tmpfs_journal'):
+ conf['journal dio'] = False
+
+ ctx.ceph = argparse.Namespace()
+ ctx.ceph.conf = conf
+
+ keyring_path = config.get('keyring_path', '/etc/ceph/ceph.keyring')
+
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+
+ firstmon = teuthology.get_first_mon(ctx, config)
+
+ log.info('Setting up %s...' % firstmon)
+ ctx.cluster.only(firstmon).run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ '--create-keyring',
+ keyring_path,
+ ],
+ )
+ ctx.cluster.only(firstmon).run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ '--gen-key',
+ '--name=mon.',
+ keyring_path,
+ ],
+ )
+ ctx.cluster.only(firstmon).run(
+ args=[
+ 'sudo',
+ 'chmod',
+ '0644',
+ keyring_path,
+ ],
+ )
+ (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+ fsid = teuthology.create_simple_monmap(
+ ctx,
+ remote=mon0_remote,
+ conf=conf,
+ )
+ if not 'global' in conf:
+ conf['global'] = {}
+ conf['global']['fsid'] = fsid
+
+ log.info('Writing ceph.conf for FSID %s...' % fsid)
+ conf_path = config.get('conf_path', '/etc/ceph/ceph.conf')
+ conf_fp = StringIO()
+ conf.write(conf_fp)
+ conf_fp.seek(0)
+ writes = ctx.cluster.run(
+ args=[
+ 'sudo', 'mkdir', '-p', '/etc/ceph', run.Raw('&&'),
+ 'sudo', 'chmod', '0755', '/etc/ceph', run.Raw('&&'),
+ 'sudo', 'python',
+ '-c',
+ 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
+ conf_path,
+ run.Raw('&&'),
+ 'sudo', 'chmod', '0644', conf_path,
+ ],
+ stdin=run.PIPE,
+ wait=False,
+ )
+ teuthology.feed_many_stdins_and_close(conf_fp, writes)
+ run.wait(writes)
+
+ log.info('Creating admin key on %s...' % firstmon)
+ ctx.cluster.only(firstmon).run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ '--gen-key',
+ '--name=client.admin',
+ '--set-uid=0',
+ '--cap', 'mon', 'allow *',
+ '--cap', 'osd', 'allow *',
+ '--cap', 'mds', 'allow',
+ keyring_path,
+ ],
+ )
+
+ log.info('Copying monmap to all nodes...')
+ keyring = teuthology.get_file(
+ remote=mon0_remote,
+ path=keyring_path,
+ )
+ monmap = teuthology.get_file(
+ remote=mon0_remote,
+ path='{tdir}/monmap'.format(tdir=testdir),
+ )
+
+ for rem in ctx.cluster.remotes.iterkeys():
+ # copy mon key and initial monmap
+ log.info('Sending monmap to node {remote}'.format(remote=rem))
+ teuthology.sudo_write_file(
+ remote=rem,
+ path=keyring_path,
+ data=keyring,
+ perms='0644'
+ )
+ teuthology.write_file(
+ remote=rem,
+ path='{tdir}/monmap'.format(tdir=testdir),
+ data=monmap,
+ )
+
+ log.info('Setting up mon nodes...')
+ mons = ctx.cluster.only(teuthology.is_type('mon'))
+ run.wait(
+ mons.run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'osdmaptool',
+ '-c', conf_path,
+ '--clobber',
+ '--createsimple', '{num:d}'.format(
+ num=teuthology.num_instances_of_type(ctx.cluster, 'osd'),
+ ),
+ '{tdir}/osdmap'.format(tdir=testdir),
+ '--pg_bits', '2',
+ '--pgp_bits', '4',
+ ],
+ wait=False,
+ ),
+ )
+
+ log.info('Setting up mds nodes...')
+ mdss = ctx.cluster.only(teuthology.is_type('mds'))
+ for remote, roles_for_host in mdss.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, 'mds'):
+ remote.run(
+ args=[
+ 'sudo',
+ 'mkdir',
+ '-p',
+ '/var/lib/ceph/mds/ceph-{id}'.format(id=id_),
+ run.Raw('&&'),
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ '--create-keyring',
+ '--gen-key',
+ '--name=mds.{id}'.format(id=id_),
+ '/var/lib/ceph/mds/ceph-{id}/keyring'.format(id=id_),
+ ],
+ )
+
+ cclient.create_keyring(ctx)
+ log.info('Running mkfs on osd nodes...')
+
+ ctx.disk_config = argparse.Namespace()
+ ctx.disk_config.remote_to_roles_to_dev = remote_to_roles_to_devs
+ ctx.disk_config.remote_to_roles_to_journals = remote_to_roles_to_journals
+ ctx.disk_config.remote_to_roles_to_dev_mount_options = {}
+ ctx.disk_config.remote_to_roles_to_dev_fstype = {}
+
+ log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(r=str(ctx.disk_config.remote_to_roles_to_dev)))
+ for remote, roles_for_host in osds.remotes.iteritems():
+ roles_to_devs = remote_to_roles_to_devs[remote]
+ roles_to_journals = remote_to_roles_to_journals[remote]
+
+
+ for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
+ remote.run(
+ args=[
+ 'sudo',
+ 'mkdir',
+ '-p',
+ '/var/lib/ceph/osd/ceph-{id}'.format(id=id_),
+ ])
+ log.info(str(roles_to_journals))
+ log.info(id_)
+ if roles_to_devs.get(id_):
+ dev = roles_to_devs[id_]
+ fs = config.get('fs')
+ package = None
+ mkfs_options = config.get('mkfs_options')
+ mount_options = config.get('mount_options')
+ if fs == 'btrfs':
+ #package = 'btrfs-tools'
+ if mount_options is None:
+ mount_options = ['noatime','user_subvol_rm_allowed']
+ if mkfs_options is None:
+ mkfs_options = ['-m', 'single',
+ '-l', '32768',
+ '-n', '32768']
+ if fs == 'xfs':
+ #package = 'xfsprogs'
+ if mount_options is None:
+ mount_options = ['noatime']
+ if mkfs_options is None:
+ mkfs_options = ['-f', '-i', 'size=2048']
+ if fs == 'ext4' or fs == 'ext3':
+ if mount_options is None:
+ mount_options = ['noatime','user_xattr']
+
+ if mount_options is None:
+ mount_options = []
+ if mkfs_options is None:
+ mkfs_options = []
+ mkfs = ['mkfs.%s' % fs] + mkfs_options
+ log.info('%s on %s on %s' % (mkfs, dev, remote))
+ if package is not None:
+ remote.run(
+ args=[
+ 'sudo',
+ 'apt-get', 'install', '-y', package
+ ],
+ stdout=StringIO(),
+ )
+
+ try:
+ remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
+ except run.CommandFailedError:
+ # Newer btfs-tools doesn't prompt for overwrite, use -f
+ if '-f' not in mount_options:
+ mkfs_options.append('-f')
+ mkfs = ['mkfs.%s' % fs] + mkfs_options
+ log.info('%s on %s on %s' % (mkfs, dev, remote))
+ remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
+
+ log.info('mount %s on %s -o %s' % (dev, remote,
+ ','.join(mount_options)))
+ remote.run(
+ args=[
+ 'sudo',
+ 'mount',
+ '-t', fs,
+ '-o', ','.join(mount_options),
+ dev,
+ os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)),
+ ]
+ )
+ if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options:
+ ctx.disk_config.remote_to_roles_to_dev_mount_options[remote] = {}
+ ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][id_] = mount_options
+ if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype:
+ ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {}
+ ctx.disk_config.remote_to_roles_to_dev_fstype[remote][id_] = fs
+ devs_to_clean[remote].append(
+ os.path.join(
+ os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)),
+ )
+ )
+
+ for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
+ remote.run(
+ args=[
+ 'sudo',
+ 'MALLOC_CHECK_=3',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-osd',
+ '--mkfs',
+ '--mkkey',
+ '-i', id_,
+ '--monmap', '{tdir}/monmap'.format(tdir=testdir),
+ ],
+ )
+
+
+ log.info('Reading keys from all nodes...')
+ keys_fp = StringIO()
+ keys = []
+ for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for type_ in ['mds','osd']:
+ for id_ in teuthology.roles_of_type(roles_for_host, type_):
+ data = teuthology.get_file(
+ remote=remote,
+ path='/var/lib/ceph/{type}/ceph-{id}/keyring'.format(
+ type=type_,
+ id=id_,
+ ),
+ sudo=True,
+ )
+ keys.append((type_, id_, data))
+ keys_fp.write(data)
+ for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for type_ in ['client']:
+ for id_ in teuthology.roles_of_type(roles_for_host, type_):
+ data = teuthology.get_file(
+ remote=remote,
+ path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+ )
+ keys.append((type_, id_, data))
+ keys_fp.write(data)
+
+ log.info('Adding keys to all mons...')
+ writes = mons.run(
+ args=[
+ 'sudo', 'tee', '-a',
+ keyring_path,
+ ],
+ stdin=run.PIPE,
+ wait=False,
+ stdout=StringIO(),
+ )
+ keys_fp.seek(0)
+ teuthology.feed_many_stdins_and_close(keys_fp, writes)
+ run.wait(writes)
+ for type_, id_, data in keys:
+ run.wait(
+ mons.run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ keyring_path,
+ '--name={type}.{id}'.format(
+ type=type_,
+ id=id_,
+ ),
+ ] + list(teuthology.generate_caps(type_)),
+ wait=False,
+ ),
+ )
+
+ log.info('Running mkfs on mon nodes...')
+ for remote, roles_for_host in mons.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, 'mon'):
+ remote.run(
+ args=[
+ 'sudo',
+ 'mkdir',
+ '-p',
+ '/var/lib/ceph/mon/ceph-{id}'.format(id=id_),
+ ],
+ )
+ remote.run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-mon',
+ '--mkfs',
+ '-i', id_,
+ '--monmap={tdir}/monmap'.format(tdir=testdir),
+ '--osdmap={tdir}/osdmap'.format(tdir=testdir),
+ '--keyring={kpath}'.format(kpath=keyring_path),
+ ],
+ )
+
+
+ run.wait(
+ mons.run(
+ args=[
+ 'rm',
+ '--',
+ '{tdir}/monmap'.format(tdir=testdir),
+ '{tdir}/osdmap'.format(tdir=testdir),
+ ],
+ wait=False,
+ ),
+ )
+
+ try:
+ yield
+ except Exception:
+ # we need to know this below
+ ctx.summary['success'] = False
+ raise
+ finally:
+ (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+
+ log.info('Checking cluster log for badness...')
+ def first_in_ceph_log(pattern, excludes):
+ """
+ Find the first occurence of the pattern specified in the Ceph log,
+ Returns None if none found.
+
+ :param pattern: Pattern scanned for.
+ :param excludes: Patterns to ignore.
+ :return: First line of text (or None if not found)
+ """
+ args = [
+ 'sudo',
+ 'egrep', pattern,
+ '/var/log/ceph/ceph.log',
+ ]
+ for exclude in excludes:
+ args.extend([run.Raw('|'), 'egrep', '-v', exclude])
+ args.extend([
+ run.Raw('|'), 'head', '-n', '1',
+ ])
+ r = mon0_remote.run(
+ stdout=StringIO(),
+ args=args,
+ )
+ stdout = r.stdout.getvalue()
+ if stdout != '':
+ return stdout
+ return None
+
+ if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]',
+ config['log_whitelist']) is not None:
+ log.warning('Found errors (ERR|WRN|SEC) in cluster log')
+ ctx.summary['success'] = False
+ # use the most severe problem as the failure reason
+ if 'failure_reason' not in ctx.summary:
+ for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']:
+ match = first_in_ceph_log(pattern, config['log_whitelist'])
+ if match is not None:
+ ctx.summary['failure_reason'] = \
+ '"{match}" in cluster log'.format(
+ match=match.rstrip('\n'),
+ )
+ break
+
+ for remote, dirs in devs_to_clean.iteritems():
+ for dir_ in dirs:
+ log.info('Unmounting %s on %s' % (dir_, remote))
+ remote.run(
+ args=[
+ 'sync',
+ run.Raw('&&'),
+ 'sudo',
+ 'umount',
+ '-f',
+ dir_
+ ]
+ )
+
+ if config.get('tmpfs_journal'):
+ log.info('tmpfs journal enabled - unmounting tmpfs at /mnt')
+ for remote, roles_for_host in osds.remotes.iteritems():
+ remote.run(
+ args=[ 'sudo', 'umount', '-f', '/mnt' ],
+ check_status=False,
+ )
+
+ if ctx.archive is not None and \
+ not (ctx.config.get('archive-on-error') and ctx.summary['success']):
+ # archive mon data, too
+ log.info('Archiving mon data...')
+ path = os.path.join(ctx.archive, 'data')
+ os.makedirs(path)
+ for remote, roles in mons.remotes.iteritems():
+ for role in roles:
+ if role.startswith('mon.'):
+ teuthology.pull_directory_tarball(
+ remote,
+ '/var/lib/ceph/mon',
+ path + '/' + role + '.tgz')
+
+ # and logs
+ log.info('Compressing logs...')
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'sudo',
+ 'find',
+ '/var/log/ceph',
+ '-name',
+ '*.log',
+ '-print0',
+ run.Raw('|'),
+ 'sudo',
+ 'xargs',
+ '-0',
+ '--no-run-if-empty',
+ '--',
+ 'gzip',
+ '--',
+ ],
+ wait=False,
+ ),
+ )
+
+ log.info('Archiving logs...')
+ path = os.path.join(ctx.archive, 'remote')
+ os.makedirs(path)
+ for remote in ctx.cluster.remotes.iterkeys():
+ sub = os.path.join(path, remote.shortname)
+ os.makedirs(sub)
+ teuthology.pull_directory(remote, '/var/log/ceph',
+ os.path.join(sub, 'log'))
+
+
+ log.info('Cleaning ceph cluster...')
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'sudo',
+ 'rm',
+ '-rf',
+ '--',
+ conf_path,
+ keyring_path,
+ '{tdir}/data'.format(tdir=testdir),
+ '{tdir}/monmap'.format(tdir=testdir),
+ ],
+ wait=False,
+ ),
+ )
+
+def get_all_pg_info(rem_site, testdir):
+ """
+ Get the results of a ceph pg dump
+ """
+ info = rem_site.run(args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'ceph', 'pg', 'dump',
+ '--format', 'json'], stdout=StringIO())
+ all_info = json.loads(info.stdout.getvalue())
+ return all_info['pg_stats']
+
+def osd_scrub_pgs(ctx, config):
+ """
+ Scrub pgs when we exit.
+
+ First make sure all pgs are active and clean.
+ Next scrub all osds.
+ Then periodically check until all pgs have scrub time stamps that
+ indicate the last scrub completed. Time out if no progess is made
+ here after two minutes.
+ """
+ retries = 12
+ delays = 10
+ vlist = ctx.cluster.remotes.values()
+ testdir = teuthology.get_testdir(ctx)
+ rem_site = ctx.cluster.remotes.keys()[0]
+ all_clean = False
+ for _ in range(0, retries):
+ stats = get_all_pg_info(rem_site, testdir)
+ states = [stat['state'] for stat in stats]
+ if len(set(states)) == 1 and states[0] == 'active+clean':
+ all_clean = True
+ break
+ log.info("Waiting for all osds to be active and clean.")
+ time.sleep(delays)
+ if not all_clean:
+ log.info("Scrubbing terminated -- not all pgs were active and clean.")
+ return
+ check_time_now = time.localtime()
+ time.sleep(1)
+ for slists in vlist:
+ for role in slists:
+ if role.startswith('osd.'):
+ log.info("Scrubbing osd {osd}".format(osd=role))
+ rem_site.run(args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'ceph', 'osd', 'scrub', role])
+ prev_good = 0
+ gap_cnt = 0
+ loop = True
+ while loop:
+ stats = get_all_pg_info(rem_site, testdir)
+ timez = [stat['last_scrub_stamp'] for stat in stats]
+ loop = False
+ thiscnt = 0
+ for tmval in timez:
+ pgtm = time.strptime(tmval[0:tmval.find('.')], '%Y-%m-%d %H:%M:%S')
+ if pgtm > check_time_now:
+ thiscnt += 1
+ else:
+ loop = True
+ if thiscnt > prev_good:
+ prev_good = thiscnt
+ gap_cnt = 0
+ else:
+ gap_cnt += 1
+ if gap_cnt > retries:
+ log.info('Exiting scrub checking -- not all pgs scrubbed.')
+ return
+ if loop:
+ log.info('Still waiting for all pgs to be scrubbed.')
+ time.sleep(delays)
+
+@contextlib.contextmanager
+def run_daemon(ctx, config, type_):
+ """
+ Run daemons for a role type. Handle the startup and termination of a a daemon.
+ On startup -- set coverages, cpu_profile, valgrind values for all remotes,
+ and a max_mds value for one mds.
+ On cleanup -- Stop all existing daemons of this type.
+
+ :param ctx: Context
+ :param config: Configuration
+ :paran type_: Role type
+ """
+ log.info('Starting %s daemons...' % type_)
+ testdir = teuthology.get_testdir(ctx)
+ daemons = ctx.cluster.only(teuthology.is_type(type_))
+
+ # check whether any daemons if this type are configured
+ if daemons is None:
+ return
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+
+ daemon_signal = 'kill'
+ if config.get('coverage') or config.get('valgrind') is not None:
+ daemon_signal = 'term'
+
+ num_active = 0
+ for remote, roles_for_host in daemons.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, type_):
+ name = '%s.%s' % (type_, id_)
+
+ if not (id_.endswith('-s')) and (id_.find('-s-') == -1):
+ num_active += 1
+
+ run_cmd = [
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'daemon-helper',
+ daemon_signal,
+ ]
+ run_cmd_tail = [
+ 'ceph-%s' % (type_),
+ '-f',
+ '-i', id_]
+
+ if type_ in config.get('cpu_profile', []):
+ profile_path = '/var/log/ceph/profiling-logger/%s.%s.prof' % (type_, id_)
+ run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ])
+
+ if config.get('valgrind') is not None:
+ valgrind_args = None
+ if type_ in config['valgrind']:
+ valgrind_args = config['valgrind'][type_]
+ if name in config['valgrind']:
+ valgrind_args = config['valgrind'][name]
+ run_cmd = teuthology.get_valgrind_args(testdir, name,
+ run_cmd,
+ valgrind_args)
+
+ run_cmd.extend(run_cmd_tail)
+
+ ctx.daemons.add_daemon(remote, type_, id_,
+ args=run_cmd,
+ logger=log.getChild(name),
+ stdin=run.PIPE,
+ wait=False,
+ )
+
+ if type_ == 'mds':
+ firstmon = teuthology.get_first_mon(ctx, config)
+ (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+
+ mon0_remote.run(args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph',
+ 'mds', 'set_max_mds', str(num_active)])
+
+ try:
+ yield
+ finally:
+ teuthology.stop_daemons_of_type(ctx, type_)
+
+def healthy(ctx, config):
+ """
+ Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ log.info('Waiting until ceph is healthy...')
+ firstmon = teuthology.get_first_mon(ctx, config)
+ (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+ teuthology.wait_until_osds_up(
+ ctx,
+ cluster=ctx.cluster,
+ remote=mon0_remote
+ )
+ teuthology.wait_until_healthy(
+ ctx,
+ remote=mon0_remote,
+ )
+
+def wait_for_osds_up(ctx, config):
+ """
+ Wait for all osd's to come up.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ log.info('Waiting until ceph osds are all up...')
+ firstmon = teuthology.get_first_mon(ctx, config)
+ (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+ teuthology.wait_until_osds_up(
+ ctx,
+ cluster=ctx.cluster,
+ remote=mon0_remote
+ )
+
+def wait_for_mon_quorum(ctx, config):
+ """
+ Check renote ceph status until all monitors are up.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+
+ assert isinstance(config, list)
+ firstmon = teuthology.get_first_mon(ctx, config)
+ (remote,) = ctx.cluster.only(firstmon).remotes.keys()
+ while True:
+ r = remote.run(
+ args=[
+ 'ceph',
+ 'quorum_status',
+ ],
+ stdout=StringIO(),
+ logger=log.getChild('quorum_status'),
+ )
+ j = json.loads(r.stdout.getvalue())
+ q = j.get('quorum_names', [])
+ log.debug('Quorum: %s', q)
+ if sorted(q) == sorted(config):
+ break
+ time.sleep(1)
+
+
+@contextlib.contextmanager
+def restart(ctx, config):
+ """
+ restart ceph daemons
+
+ For example::
+ tasks:
+ - ceph.restart: [all]
+
+ For example::
+ tasks:
+ - ceph.restart: [osd.0, mon.1]
+
+ or::
+
+ tasks:
+ - ceph.restart:
+ daemons: [osd.0, mon.1]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ if config is None:
+ config = {}
+ if isinstance(config, list):
+ config = { 'daemons': config }
+ if 'daemons' not in config:
+ config['daemons'] = []
+ type_daemon = ['mon', 'osd', 'mds', 'rgw']
+ for d in type_daemon:
+ type_ = d
+ for daemon in ctx.daemons.iter_daemons_of_role(type_):
+ config['daemons'].append(type_ + '.' + daemon.id_)
+
+ assert isinstance(config['daemons'], list)
+ daemons = dict.fromkeys(config['daemons'])
+ for i in daemons.keys():
+ type_ = i.split('.', 1)[0]
+ id_ = i.split('.', 1)[1]
+ ctx.daemons.get_daemon(type_, id_).stop()
+ ctx.daemons.get_daemon(type_, id_).restart()
+
+ if config.get('wait-for-healthy', True):
+ healthy(ctx=ctx, config=None)
+ if config.get('wait-for-osds-up', False):
+ wait_for_osds_up(ctx=ctx, config=None)
+ yield
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Set up and tear down a Ceph cluster.
+
+ For example::
+
+ tasks:
+ - ceph:
+ - interactive:
+
+ You can also specify what branch to run::
+
+ tasks:
+ - ceph:
+ branch: foo
+
+ Or a tag::
+
+ tasks:
+ - ceph:
+ tag: v0.42.13
+
+ Or a sha1::
+
+ tasks:
+ - ceph:
+ sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed
+
+ Or a local source dir::
+
+ tasks:
+ - ceph:
+ path: /home/sage/ceph
+
+ To capture code coverage data, use::
+
+ tasks:
+ - ceph:
+ coverage: true
+
+ To use btrfs, ext4, or xfs on the target's scratch disks, use::
+
+ tasks:
+ - ceph:
+ fs: xfs
+ mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1]
+ mount_options: [nobarrier, inode64]
+
+ Note, this will cause the task to check the /scratch_devs file on each node
+ for available devices. If no such file is found, /dev/sdb will be used.
+
+ To run some daemons under valgrind, include their names
+ and the tool/args to use in a valgrind section::
+
+ tasks:
+ - ceph:
+ valgrind:
+ mds.1: --tool=memcheck
+ osd.1: [--tool=memcheck, --leak-check=no]
+
+ Those nodes which are using memcheck or valgrind will get
+ checked for bad results.
+
+ To adjust or modify config options, use::
+
+ tasks:
+ - ceph:
+ conf:
+ section:
+ key: value
+
+ For example::
+
+ tasks:
+ - ceph:
+ conf:
+ mds.0:
+ some option: value
+ other key: other value
+ client.0:
+ debug client: 10
+ debug ms: 1
+
+ By default, the cluster log is checked for errors and warnings,
+ and the run marked failed if any appear. You can ignore log
+ entries by giving a list of egrep compatible regexes, i.e.:
+
+ tasks:
+ - ceph:
+ log-whitelist: ['foo.*bar', 'bad message']
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ "task ceph only supports a dictionary for configuration"
+
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('ceph', {}))
+
+ ctx.daemons = DaemonGroup()
+
+ testdir = teuthology.get_testdir(ctx)
+ if config.get('coverage'):
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+ log.info('Creating coverage directory...')
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'install', '-d', '-m0755', '--',
+ coverage_dir,
+ ],
+ wait=False,
+ )
+ )
+
+ with contextutil.nested(
+ lambda: ceph_log(ctx=ctx, config=None),
+ lambda: valgrind_post(ctx=ctx, config=config),
+ lambda: cluster(ctx=ctx, config=dict(
+ conf=config.get('conf', {}),
+ fs=config.get('fs', None),
+ mkfs_options=config.get('mkfs_options', None),
+ mount_options=config.get('mount_options',None),
+ block_journal=config.get('block_journal', None),
+ tmpfs_journal=config.get('tmpfs_journal', None),
+ log_whitelist=config.get('log-whitelist', []),
+ cpu_profile=set(config.get('cpu_profile', [])),
+ )),
+ lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
+ lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
+ lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
+ ):
+ try:
+ if config.get('wait-for-healthy', True):
+ healthy(ctx=ctx, config=None)
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ ctx.manager = CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+ yield
+ finally:
+ osd_scrub_pgs(ctx, config)
--- /dev/null
+"""
+Set up client keyring
+"""
+import logging
+
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+def create_keyring(ctx):
+ """
+ Set up key ring on remote sites
+ """
+ log.info('Setting up client nodes...')
+ clients = ctx.cluster.only(teuthology.is_type('client'))
+ testdir = teuthology.get_testdir(ctx)
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+ for remote, roles_for_host in clients.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
+ client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+ remote.run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ '--create-keyring',
+ '--gen-key',
+ # TODO this --name= is not really obeyed, all unknown "types" are munged to "client"
+ '--name=client.{id}'.format(id=id_),
+ client_keyring,
+ run.Raw('&&'),
+ 'sudo',
+ 'chmod',
+ '0644',
+ client_keyring,
+ ],
+ )
--- /dev/null
+"""
+Execute ceph-deploy as a task
+"""
+from cStringIO import StringIO
+
+import contextlib
+import os
+import time
+import logging
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.config import config as teuth_config
+from teuthology.task import install as install_fn
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def download_ceph_deploy(ctx, config):
+ """
+ Downloads ceph-deploy from the ceph.com git mirror and (by default)
+ switches to the master branch. If the `ceph-deploy-branch` is specified, it
+ will use that instead.
+ """
+ log.info('Downloading ceph-deploy...')
+ testdir = teuthology.get_testdir(ctx)
+ ceph_admin = teuthology.get_first_mon(ctx, config)
+ default_cd_branch = {'ceph-deploy-branch': 'master'}
+ ceph_deploy_branch = config.get(
+ 'ceph-deploy',
+ default_cd_branch).get('ceph-deploy-branch')
+
+ ctx.cluster.only(ceph_admin).run(
+ args=[
+ 'git', 'clone', '-b', ceph_deploy_branch,
+ teuth_config.ceph_git_base_url + 'ceph-deploy.git',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ ],
+ )
+ ctx.cluster.only(ceph_admin).run(
+ args=[
+ 'cd',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ run.Raw('&&'),
+ './bootstrap',
+ ],
+ )
+
+ try:
+ yield
+ finally:
+ log.info('Removing ceph-deploy ...')
+ ctx.cluster.only(ceph_admin).run(
+ args=[
+ 'rm',
+ '-rf',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ ],
+ )
+
+
+def is_healthy(ctx, config):
+ """Wait until a Ceph cluster is healthy."""
+ testdir = teuthology.get_testdir(ctx)
+ ceph_admin = teuthology.get_first_mon(ctx, config)
+ (remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
+ max_tries = 90 # 90 tries * 10 secs --> 15 minutes
+ tries = 0
+ while True:
+ tries += 1
+ if tries >= max_tries:
+ msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes"
+ raise RuntimeError(msg)
+
+ r = remote.run(
+ args=[
+ 'cd',
+ '{tdir}'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'sudo', 'ceph',
+ 'health',
+ ],
+ stdout=StringIO(),
+ logger=log.getChild('health'),
+ )
+ out = r.stdout.getvalue()
+ log.debug('Ceph health: %s', out.rstrip('\n'))
+ if out.split(None, 1)[0] == 'HEALTH_OK':
+ break
+ time.sleep(10)
+
+def get_nodes_using_roles(ctx, config, role):
+ """Extract the names of nodes that match a given role from a cluster"""
+ newl = []
+ for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, role):
+ rem = _remote
+ if role == 'mon':
+ req1 = str(rem).split('@')[-1]
+ else:
+ req = str(rem).split('.')[0]
+ req1 = str(req).split('@')[1]
+ newl.append(req1)
+ return newl
+
+def get_dev_for_osd(ctx, config):
+ """Get a list of all osd device names."""
+ osd_devs = []
+ for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ host = remote.name.split('@')[-1]
+ shortname = host.split('.')[0]
+ devs = teuthology.get_scratch_devices(remote)
+ num_osd_per_host = list(teuthology.roles_of_type(roles_for_host, 'osd'))
+ num_osds = len(num_osd_per_host)
+ assert num_osds <= len(devs), 'fewer disks than osds on ' + shortname
+ for dev in devs[:num_osds]:
+ dev_short = dev.split('/')[-1]
+ osd_devs.append('{host}:{dev}'.format(host=shortname, dev=dev_short))
+ return osd_devs
+
+def get_all_nodes(ctx, config):
+ """Return a string of node names separated by blanks"""
+ nodelist = []
+ for t, k in ctx.config['targets'].iteritems():
+ host = t.split('@')[-1]
+ simple_host = host.split('.')[0]
+ nodelist.append(simple_host)
+ nodelist = " ".join(nodelist)
+ return nodelist
+
+def execute_ceph_deploy(ctx, config, cmd):
+ """Remotely execute a ceph_deploy command"""
+ testdir = teuthology.get_testdir(ctx)
+ ceph_admin = teuthology.get_first_mon(ctx, config)
+ exec_cmd = cmd
+ (remote,) = ctx.cluster.only(ceph_admin).remotes.iterkeys()
+ proc = remote.run(
+ args = [
+ 'cd',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ run.Raw('&&'),
+ run.Raw(exec_cmd),
+ ],
+ check_status=False,
+ )
+ exitstatus = proc.exitstatus
+ return exitstatus
+
+
+@contextlib.contextmanager
+def build_ceph_cluster(ctx, config):
+ """Build a ceph cluster"""
+
+ try:
+ log.info('Building ceph cluster using ceph-deploy...')
+ testdir = teuthology.get_testdir(ctx)
+ ceph_branch = None
+ if config.get('branch') is not None:
+ cbranch = config.get('branch')
+ for var, val in cbranch.iteritems():
+ if var == 'testing':
+ ceph_branch = '--{var}'.format(var=var)
+ ceph_branch = '--{var}={val}'.format(var=var, val=val)
+ node_dev_list = []
+ all_nodes = get_all_nodes(ctx, config)
+ mds_nodes = get_nodes_using_roles(ctx, config, 'mds')
+ mds_nodes = " ".join(mds_nodes)
+ mon_node = get_nodes_using_roles(ctx, config, 'mon')
+ mon_nodes = " ".join(mon_node)
+ new_mon = './ceph-deploy new'+" "+mon_nodes
+ install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes
+ purge_nodes = './ceph-deploy purge'+" "+all_nodes
+ purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
+ mon_hostname = mon_nodes.split(' ')[0]
+ mon_hostname = str(mon_hostname)
+ gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname
+ deploy_mds = './ceph-deploy mds create'+" "+mds_nodes
+ no_of_osds = 0
+
+ if mon_nodes is None:
+ raise RuntimeError("no monitor nodes in the config file")
+
+ estatus_new = execute_ceph_deploy(ctx, config, new_mon)
+ if estatus_new != 0:
+ raise RuntimeError("ceph-deploy: new command failed")
+
+ log.info('adding config inputs...')
+ testdir = teuthology.get_testdir(ctx)
+ conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (remote,) = ctx.cluster.only(first_mon).remotes.keys()
+
+ lines = None
+ if config.get('conf') is not None:
+ confp = config.get('conf')
+ for section, keys in confp.iteritems():
+ lines = '[{section}]\n'.format(section=section)
+ teuthology.append_lines_to_file(remote, conf_path, lines,
+ sudo=True)
+ for key, value in keys.iteritems():
+ log.info("[%s] %s = %s" % (section, key, value))
+ lines = '{key} = {value}\n'.format(key=key, value=value)
+ teuthology.append_lines_to_file(remote, conf_path, lines,
+ sudo=True)
+
+ estatus_install = execute_ceph_deploy(ctx, config, install_nodes)
+ if estatus_install != 0:
+ raise RuntimeError("ceph-deploy: Failed to install ceph")
+
+ mon_create_nodes = './ceph-deploy mon create-initial'
+ # If the following fails, it is OK, it might just be that the monitors
+ # are taking way more than a minute/monitor to form quorum, so lets
+ # try the next block which will wait up to 15 minutes to gatherkeys.
+ execute_ceph_deploy(ctx, config, mon_create_nodes)
+
+ estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
+ max_gather_tries = 90
+ gather_tries = 0
+ while (estatus_gather != 0):
+ gather_tries += 1
+ if gather_tries >= max_gather_tries:
+ msg = 'ceph-deploy was not able to gatherkeys after 15 minutes'
+ raise RuntimeError(msg)
+ estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
+ time.sleep(10)
+
+ if mds_nodes:
+ estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds)
+ if estatus_mds != 0:
+ raise RuntimeError("ceph-deploy: Failed to deploy mds")
+
+ if config.get('test_mon_destroy') is not None:
+ for d in range(1, len(mon_node)):
+ mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d]
+ estatus_mon_d = execute_ceph_deploy(ctx, config,
+ mon_destroy_nodes)
+ if estatus_mon_d != 0:
+ raise RuntimeError("ceph-deploy: Failed to delete monitor")
+
+ node_dev_list = get_dev_for_osd(ctx, config)
+ for d in node_dev_list:
+ osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d
+ estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
+ if estatus_osd == 0:
+ log.info('successfully created osd')
+ no_of_osds += 1
+ else:
+ zap_disk = './ceph-deploy disk zap'+" "+d
+ execute_ceph_deploy(ctx, config, zap_disk)
+ estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
+ if estatus_osd == 0:
+ log.info('successfully created osd')
+ no_of_osds += 1
+ else:
+ raise RuntimeError("ceph-deploy: Failed to create osds")
+
+ if config.get('wait-for-healthy', True) and no_of_osds >= 2:
+ is_healthy(ctx=ctx, config=None)
+
+ log.info('Setting up client nodes...')
+ conf_path = '/etc/ceph/ceph.conf'
+ admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
+ conf_data = teuthology.get_file(
+ remote=mon0_remote,
+ path=conf_path,
+ sudo=True,
+ )
+ admin_keyring = teuthology.get_file(
+ remote=mon0_remote,
+ path=admin_keyring_path,
+ sudo=True,
+ )
+
+ clients = ctx.cluster.only(teuthology.is_type('client'))
+ for remot, roles_for_host in clients.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
+ client_keyring = \
+ '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+ mon0_remote.run(
+ args=[
+ 'cd',
+ '{tdir}'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'sudo', 'bash', '-c',
+ run.Raw('"'), 'ceph',
+ 'auth',
+ 'get-or-create',
+ 'client.{id}'.format(id=id_),
+ 'mds', 'allow',
+ 'mon', 'allow *',
+ 'osd', 'allow *',
+ run.Raw('>'),
+ client_keyring,
+ run.Raw('"'),
+ ],
+ )
+ key_data = teuthology.get_file(
+ remote=mon0_remote,
+ path=client_keyring,
+ sudo=True,
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=client_keyring,
+ data=key_data,
+ perms='0644'
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=admin_keyring_path,
+ data=admin_keyring,
+ perms='0644'
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=conf_path,
+ data=conf_data,
+ perms='0644'
+ )
+ else:
+ raise RuntimeError(
+ "The cluster is NOT operational due to insufficient OSDs")
+ yield
+
+ finally:
+ log.info('Stopping ceph...')
+ ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
+ 'sudo', 'service', 'ceph', 'stop' ])
+
+ # Are you really not running anymore?
+ # try first with the init tooling
+ # ignoring the status so this becomes informational only
+ ctx.cluster.run(args=['sudo', 'status', 'ceph-all', run.Raw('||'),
+ 'sudo', 'service', 'ceph', 'status'],
+ check_status=False)
+
+ # and now just check for the processes themselves, as if upstart/sysvinit
+ # is lying to us. Ignore errors if the grep fails
+ ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'),
+ 'grep', '-v', 'grep', run.Raw('|'),
+ 'grep', 'ceph'], check_status=False)
+
+ if ctx.archive is not None:
+ # archive mon data, too
+ log.info('Archiving mon data...')
+ path = os.path.join(ctx.archive, 'data')
+ os.makedirs(path)
+ mons = ctx.cluster.only(teuthology.is_type('mon'))
+ for remote, roles in mons.remotes.iteritems():
+ for role in roles:
+ if role.startswith('mon.'):
+ teuthology.pull_directory_tarball(
+ remote,
+ '/var/lib/ceph/mon',
+ path + '/' + role + '.tgz')
+
+ log.info('Compressing logs...')
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'sudo',
+ 'find',
+ '/var/log/ceph',
+ '-name',
+ '*.log',
+ '-print0',
+ run.Raw('|'),
+ 'sudo',
+ 'xargs',
+ '-0',
+ '--no-run-if-empty',
+ '--',
+ 'gzip',
+ '--',
+ ],
+ wait=False,
+ ),
+ )
+
+ log.info('Archiving logs...')
+ path = os.path.join(ctx.archive, 'remote')
+ os.makedirs(path)
+ for remote in ctx.cluster.remotes.iterkeys():
+ sub = os.path.join(path, remote.shortname)
+ os.makedirs(sub)
+ teuthology.pull_directory(remote, '/var/log/ceph',
+ os.path.join(sub, 'log'))
+
+ # Prevent these from being undefined if the try block fails
+ all_nodes = get_all_nodes(ctx, config)
+ purge_nodes = './ceph-deploy purge'+" "+all_nodes
+ purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
+
+ log.info('Purging package...')
+ execute_ceph_deploy(ctx, config, purge_nodes)
+ log.info('Purging data...')
+ execute_ceph_deploy(ctx, config, purgedata_nodes)
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Set up and tear down a Ceph cluster.
+
+ For example::
+
+ tasks:
+ - install:
+ extras: yes
+ - ssh_keys:
+ - ceph-deploy:
+ branch:
+ stable: bobtail
+ mon_initial_members: 1
+
+ tasks:
+ - install:
+ extras: yes
+ - ssh_keys:
+ - ceph-deploy:
+ branch:
+ dev: master
+ conf:
+ mon:
+ debug mon = 20
+
+ tasks:
+ - install:
+ extras: yes
+ - ssh_keys:
+ - ceph-deploy:
+ branch:
+ testing:
+ """
+ if config is None:
+ config = {}
+
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
+
+ assert isinstance(config, dict), \
+ "task ceph-deploy only supports a dictionary for configuration"
+
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
+
+ if config.get('branch') is not None:
+ assert isinstance(config['branch'], dict), 'branch must be a dictionary'
+
+ with contextutil.nested(
+ lambda: install_fn.ship_utilities(ctx=ctx, config=None),
+ lambda: download_ceph_deploy(ctx=ctx, config=config),
+ lambda: build_ceph_cluster(ctx=ctx, config=dict(
+ conf=config.get('conf', {}),
+ branch=config.get('branch',{}),
+ mon_initial_members=config.get('mon_initial_members', None),
+ test_mon_destroy=config.get('test_mon_destroy', None),
+ )),
+ ):
+ yield
--- /dev/null
+"""
+Ceph FUSE client task
+"""
+import contextlib
+import logging
+import os
+import time
+from cStringIO import StringIO
+
+from teuthology import misc
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Mount/unmount a ``ceph-fuse`` client.
+
+ The config is optional and defaults to mounting on all clients. If
+ a config is given, it is expected to be a list of clients to do
+ this operation on. This lets you e.g. set up one client with
+ ``ceph-fuse`` and another with ``kclient``.
+
+ Example that mounts all clients::
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ - interactive:
+
+ Example that uses both ``kclient` and ``ceph-fuse``::
+
+ tasks:
+ - ceph:
+ - ceph-fuse: [client.0]
+ - kclient: [client.1]
+ - interactive:
+
+ Example that enables valgrind:
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ client.0:
+ valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ - interactive:
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ log.info('Mounting ceph-fuse clients...')
+ fuse_daemons = {}
+
+ testdir = misc.get_testdir(ctx)
+
+ if config is None:
+ config = dict(('client.{id}'.format(id=id_), None)
+ for id_ in misc.all_roles_of_type(ctx.cluster, 'client'))
+ elif isinstance(config, list):
+ config = dict((name, None) for name in config)
+
+ overrides = ctx.config.get('overrides', {})
+ misc.deep_merge(config, overrides.get('ceph-fuse', {}))
+
+ clients = list(misc.get_clients(ctx=ctx, roles=config.keys()))
+
+ for id_, remote in clients:
+ client_config = config.get("client.%s" % id_)
+ if client_config is None:
+ client_config = {}
+ log.info("Client client.%s config is %s" % (id_, client_config))
+
+ daemon_signal = 'kill'
+ if client_config.get('coverage') or client_config.get('valgrind') is not None:
+ daemon_signal = 'term'
+
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
+ id=id_, remote=remote,mnt=mnt))
+
+ remote.run(
+ args=[
+ 'mkdir',
+ '--',
+ mnt,
+ ],
+ )
+
+ run_cmd=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'daemon-helper',
+ daemon_signal,
+ ]
+ run_cmd_tail=[
+ 'ceph-fuse',
+ '-f',
+ '--name', 'client.{id}'.format(id=id_),
+ # TODO ceph-fuse doesn't understand dash dash '--',
+ mnt,
+ ]
+
+ if client_config.get('valgrind') is not None:
+ run_cmd = misc.get_valgrind_args(
+ testdir,
+ 'client.{id}'.format(id=id_),
+ run_cmd,
+ client_config.get('valgrind'),
+ )
+
+ run_cmd.extend(run_cmd_tail)
+
+ proc = remote.run(
+ args=run_cmd,
+ logger=log.getChild('ceph-fuse.{id}'.format(id=id_)),
+ stdin=run.PIPE,
+ wait=False,
+ )
+ fuse_daemons[id_] = proc
+
+ for id_, remote in clients:
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ wait_until_fuse_mounted(
+ remote=remote,
+ fuse=fuse_daemons[id_],
+ mountpoint=mnt,
+ )
+ remote.run(args=['sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_)],)
+
+ try:
+ yield
+ finally:
+ log.info('Unmounting ceph-fuse clients...')
+ for id_, remote in clients:
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ try:
+ remote.run(
+ args=[
+ 'sudo',
+ 'fusermount',
+ '-u',
+ mnt,
+ ],
+ )
+ except run.CommandFailedError:
+ log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=remote.name))
+ # abort the fuse mount, killing all hung processes
+ remote.run(
+ args=[
+ 'if', 'test', '-e', '/sys/fs/fuse/connections/*/abort',
+ run.Raw(';'), 'then',
+ 'echo',
+ '1',
+ run.Raw('>'),
+ run.Raw('/sys/fs/fuse/connections/*/abort'),
+ run.Raw(';'), 'fi',
+ ],
+ )
+ # make sure its unmounted
+ remote.run(
+ args=[
+ 'sudo',
+ 'umount',
+ '-l',
+ '-f',
+ mnt,
+ ],
+ )
+
+ run.wait(fuse_daemons.itervalues())
+
+ for id_, remote in clients:
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ remote.run(
+ args=[
+ 'rmdir',
+ '--',
+ mnt,
+ ],
+ )
+
+
+def wait_until_fuse_mounted(remote, fuse, mountpoint):
+ while True:
+ proc = remote.run(
+ args=[
+ 'stat',
+ '--file-system',
+ '--printf=%T\n',
+ '--',
+ mountpoint,
+ ],
+ stdout=StringIO(),
+ )
+ fstype = proc.stdout.getvalue().rstrip('\n')
+ if fstype == 'fuseblk':
+ break
+ log.debug('ceph-fuse not yet mounted, got fs type {fstype!r}'.format(fstype=fstype))
+
+ # it shouldn't have exited yet; exposes some trivial problems
+ assert not fuse.poll()
+
+ time.sleep(5)
+ log.info('ceph-fuse is mounted on %s', mountpoint)
--- /dev/null
+"""
+ceph manager -- Thrasher and CephManager objects
+"""
+from cStringIO import StringIO
+import random
+import time
+import gevent
+import json
+import logging
+import threading
+import os
+from teuthology import misc as teuthology
+from tasks.scrub import Scrubber
+from teuthology.orchestra.remote import Remote
+
+log = logging.getLogger(__name__)
+
+def make_admin_daemon_dir(ctx, remote):
+ """
+ Create /var/run/ceph directory on remote site.
+
+ :param ctx: Context
+ :param remote: Remote site
+ """
+ remote.run(
+ args=[
+ 'sudo',
+ 'install', '-d', '-m0777', '--', '/var/run/ceph',
+ ],
+ )
+
+
+def mount_osd_data(ctx, remote, osd):
+ """
+ Mount a remote OSD
+
+ :param ctx: Context
+ :param remote: Remote site
+ :param ods: Osd name
+ """
+ log.debug('Mounting data for osd.{o} on {r}'.format(o=osd, r=remote))
+ if remote in ctx.disk_config.remote_to_roles_to_dev and osd in ctx.disk_config.remote_to_roles_to_dev[remote]:
+ dev = ctx.disk_config.remote_to_roles_to_dev[remote][osd]
+ mount_options = ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][osd]
+ fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][osd]
+ mnt = os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd))
+
+ log.info('Mounting osd.{o}: dev: {n}, mountpoint: {p}, type: {t}, options: {v}'.format(
+ o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options))
+
+ remote.run(
+ args=[
+ 'sudo',
+ 'mount',
+ '-t', fstype,
+ '-o', ','.join(mount_options),
+ dev,
+ mnt,
+ ]
+ )
+
+
+class Thrasher:
+ """
+ Object used to thrash Ceph
+ """
+ def __init__(self, manager, config, logger=None):
+ self.ceph_manager = manager
+ self.ceph_manager.wait_for_clean()
+ osd_status = self.ceph_manager.get_osd_status()
+ self.in_osds = osd_status['in']
+ self.live_osds = osd_status['live']
+ self.out_osds = osd_status['out']
+ self.dead_osds = osd_status['dead']
+ self.stopping = False
+ self.logger = logger
+ self.config = config
+ self.revive_timeout = self.config.get("revive_timeout", 150)
+ if self.config.get('powercycle'):
+ self.revive_timeout += 120
+ self.clean_wait = self.config.get('clean_wait', 0)
+ self.minin = self.config.get("min_in", 3)
+ self.chance_move_pg = self.config.get('chance_move_pg', 1.0)
+
+ num_osds = self.in_osds + self.out_osds
+ self.max_pgs = self.config.get("max_pgs_per_pool_osd", 1200) * num_osds
+ if self.logger is not None:
+ self.log = lambda x: self.logger.info(x)
+ else:
+ def tmp(x):
+ """
+ Implement log behavior
+ """
+ print x
+ self.log = tmp
+ if self.config is None:
+ self.config = dict()
+ # prevent monitor from auto-marking things out while thrasher runs
+ # try both old and new tell syntax, in case we are testing old code
+ try:
+ manager.raw_cluster_cmd('--', 'tell', 'mon.*', 'injectargs',
+ '--mon-osd-down-out-interval 0')
+ except Exception:
+ manager.raw_cluster_cmd('--', 'mon', 'tell', '*', 'injectargs',
+ '--mon-osd-down-out-interval 0')
+ self.thread = gevent.spawn(self.do_thrash)
+ if self.config.get('powercycle') or not self.cmd_exists_on_osds("ceph-objectstore-tool"):
+ self.ceph_objectstore_tool = False
+ self.test_rm_past_intervals = False
+ if self.config.get('powercycle'):
+ self.log("Unable to test ceph-objectstore-tool, "
+ "powercycle testing")
+ else:
+ self.log("Unable to test ceph-objectstore-tool, "
+ "not available on all OSD nodes")
+ else:
+ self.ceph_objectstore_tool = \
+ self.config.get('ceph_objectstore_tool', True)
+ self.test_rm_past_intervals = \
+ self.config.get('test_rm_past_intervals', True)
+
+ def cmd_exists_on_osds(self, cmd):
+ allremotes = self.ceph_manager.ctx.cluster.only(\
+ teuthology.is_type('osd')).remotes.keys()
+ allremotes = list(set(allremotes))
+ for remote in allremotes:
+ proc = remote.run(args=['type', cmd], wait=True,
+ check_status=False, stdout=StringIO(),
+ stderr=StringIO())
+ if proc.exitstatus != 0:
+ return False;
+ return True;
+
+ def kill_osd(self, osd=None, mark_down=False, mark_out=False):
+ """
+ :param osd: Osd to be killed.
+ :mark_down: Mark down if true.
+ :mark_out: Mark out if true.
+ """
+ if osd is None:
+ osd = random.choice(self.live_osds)
+ self.log("Killing osd %s, live_osds are %s" % (str(osd), str(self.live_osds)))
+ self.live_osds.remove(osd)
+ self.dead_osds.append(osd)
+ self.ceph_manager.kill_osd(osd)
+ if mark_down:
+ self.ceph_manager.mark_down_osd(osd)
+ if mark_out and osd in self.in_osds:
+ self.out_osd(osd)
+ if self.ceph_objectstore_tool:
+ self.log("Testing ceph-objectstore-tool on down osd")
+ (remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
+ FSPATH = self.ceph_manager.get_filepath()
+ JPATH = os.path.join(FSPATH, "journal")
+ exp_osd = imp_osd = osd
+ exp_remote = imp_remote = remote
+ # If an older osd is available we'll move a pg from there
+ if len(self.dead_osds) > 1 and random.random() < self.chance_move_pg:
+ exp_osd = random.choice(self.dead_osds[:-1])
+ (exp_remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=exp_osd)).remotes.iterkeys()
+ if 'keyvaluestore_backend' in self.ceph_manager.ctx.ceph.conf['osd']:
+ prefix = "sudo ceph-objectstore-tool --data-path {fpath} --journal-path {jpath} --type keyvaluestore-dev --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH)
+ else:
+ prefix = "sudo ceph-objectstore-tool --data-path {fpath} --journal-path {jpath} --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH)
+ cmd = (prefix + "--op list-pgs").format(id=exp_osd)
+ proc = exp_remote.run(args=cmd, wait=True,
+ check_status=False, stdout=StringIO())
+ if proc.exitstatus:
+ raise Exception("ceph-objectstore-tool: exp list-pgs failure with status {ret}".format(ret=proc.exitstatus))
+ pgs = proc.stdout.getvalue().split('\n')[:-1]
+ if len(pgs) == 0:
+ self.log("No PGs found for osd.{osd}".format(osd=exp_osd))
+ return
+ pg = random.choice(pgs)
+ exp_path = os.path.join(os.path.join(teuthology.get_testdir(self.ceph_manager.ctx), "data"), "exp.{pg}.{id}".format(pg=pg, id=exp_osd))
+ # export
+ cmd = (prefix + "--op export --pgid {pg} --file {file}").format(id=exp_osd, pg=pg, file=exp_path)
+ proc = exp_remote.run(args=cmd)
+ if proc.exitstatus:
+ raise Exception("ceph-objectstore-tool: export failure with status {ret}".format(ret=proc.exitstatus))
+ # remove
+ cmd = (prefix + "--op remove --pgid {pg}").format(id=exp_osd, pg=pg)
+ proc = exp_remote.run(args=cmd)
+ if proc.exitstatus:
+ raise Exception("ceph-objectstore-tool: remove failure with status {ret}".format(ret=proc.exitstatus))
+ # If there are at least 2 dead osds we might move the pg
+ if exp_osd != imp_osd:
+ # If pg isn't already on this osd, then we will move it there
+ cmd = (prefix + "--op list-pgs").format(id=imp_osd)
+ proc = imp_remote.run(args=cmd, wait=True,
+ check_status=False, stdout=StringIO())
+ if proc.exitstatus:
+ raise Exception("ceph-objectstore-tool: imp list-pgs failure with status {ret}".format(ret=proc.exitstatus))
+ pgs = proc.stdout.getvalue().split('\n')[:-1]
+ if pg not in pgs:
+ self.log("Moving pg {pg} from osd.{fosd} to osd.{tosd}".format(pg=pg, fosd=exp_osd, tosd=imp_osd))
+ if imp_remote != exp_remote:
+ # Copy export file to the other machine
+ self.log("Transfer export file from {srem} to {trem}".format(srem=exp_remote, trem=imp_remote))
+ tmpexport = Remote.get_file(exp_remote, exp_path)
+ Remote.put_file(imp_remote, tmpexport, exp_path)
+ os.remove(tmpexport)
+ else:
+ # Can't move the pg after all
+ imp_osd = exp_osd
+ imp_remote = exp_remote
+ # import
+ cmd = (prefix + "--op import --file {file}")
+ cmd = cmd.format(id=imp_osd, file=exp_path)
+ proc = imp_remote.run(args=cmd, wait=True, check_status=False)
+ if proc.exitstatus == 10:
+ self.log("Pool went away before processing an import"
+ "...ignored")
+ elif proc.exitstatus == 11:
+ self.log("Attempt to import an incompatible export"
+ "...ignored")
+ elif proc.exitstatus:
+ raise Exception("ceph-objectstore-tool: "
+ "import failure with status {ret}".
+ format(ret=proc.exitstatus))
+ cmd = "rm -f {file}".format(file=exp_path)
+ exp_remote.run(args=cmd)
+ if imp_remote != exp_remote:
+ imp_remote.run(args=cmd)
+
+ def rm_past_intervals(self, osd=None):
+ """
+ :param osd: Osd to find pg to remove past intervals
+ """
+ if self.test_rm_past_intervals:
+ if osd is None:
+ osd = random.choice(self.dead_osds)
+ self.log("Use ceph_objectstore_tool to remove past intervals")
+ (remote,) = self.ceph_manager.ctx.\
+ cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
+ FSPATH = self.ceph_manager.get_filepath()
+ JPATH = os.path.join(FSPATH, "journal")
+ if ('keyvaluestore_backend' in
+ self.ceph_manager.ctx.ceph.conf['osd']):
+ prefix = ("sudo ceph-objectstore-tool "
+ "--data-path {fpath} --journal-path {jpath} "
+ "--type keyvaluestore-dev "
+ "--log-file="
+ "/var/log/ceph/objectstore_tool.\\$pid.log ".
+ format(fpath=FSPATH, jpath=JPATH))
+ else:
+ prefix = ("sudo ceph-objectstore-tool "
+ "--data-path {fpath} --journal-path {jpath} "
+ "--log-file="
+ "/var/log/ceph/objectstore_tool.\\$pid.log ".
+ format(fpath=FSPATH, jpath=JPATH))
+ cmd = (prefix + "--op list-pgs").format(id=osd)
+ proc = remote.run(args=cmd, wait=True,
+ check_status=False, stdout=StringIO())
+ if proc.exitstatus:
+ raise Exception("ceph_objectstore_tool: "
+ "exp list-pgs failure with status {ret}".
+ format(ret=proc.exitstatus))
+ pgs = proc.stdout.getvalue().split('\n')[:-1]
+ if len(pgs) == 0:
+ self.log("No PGs found for osd.{osd}".format(osd=osd))
+ return
+ pg = random.choice(pgs)
+ cmd = (prefix + "--op rm-past-intervals --pgid {pg}").\
+ format(id=osd, pg=pg)
+ proc = remote.run(args=cmd)
+ if proc.exitstatus:
+ raise Exception("ceph_objectstore_tool: "
+ "rm-past-intervals failure with status {ret}".
+ format(ret=proc.exitstatus))
+
+ def blackhole_kill_osd(self, osd=None):
+ """
+ If all else fails, kill the osd.
+ :param osd: Osd to be killed.
+ """
+ if osd is None:
+ osd = random.choice(self.live_osds)
+ self.log("Blackholing and then killing osd %s, live_osds are %s" % (str(osd), str(self.live_osds)))
+ self.live_osds.remove(osd)
+ self.dead_osds.append(osd)
+ self.ceph_manager.blackhole_kill_osd(osd)
+
+ def revive_osd(self, osd=None):
+ """
+ Revive the osd.
+ :param osd: Osd to be revived.
+ """
+ if osd is None:
+ osd = random.choice(self.dead_osds)
+ self.log("Reviving osd %s" % (str(osd),))
+ self.live_osds.append(osd)
+ self.dead_osds.remove(osd)
+ self.ceph_manager.revive_osd(osd, self.revive_timeout)
+
+ def out_osd(self, osd=None):
+ """
+ Mark the osd out
+ :param osd: Osd to be marked.
+ """
+ if osd is None:
+ osd = random.choice(self.in_osds)
+ self.log("Removing osd %s, in_osds are: %s" % (str(osd), str(self.in_osds)))
+ self.ceph_manager.mark_out_osd(osd)
+ self.in_osds.remove(osd)
+ self.out_osds.append(osd)
+
+ def in_osd(self, osd=None):
+ """
+ Mark the osd out
+ :param osd: Osd to be marked.
+ """
+ if osd is None:
+ osd = random.choice(self.out_osds)
+ if osd in self.dead_osds:
+ return self.revive_osd(osd)
+ self.log("Adding osd %s" % (str(osd),))
+ self.out_osds.remove(osd)
+ self.in_osds.append(osd)
+ self.ceph_manager.mark_in_osd(osd)
+ self.log("Added osd %s"%(str(osd),))
+
+ def reweight_osd(self, osd=None):
+ """
+ Reweight an osd that is in
+ :param osd: Osd to be marked.
+ """
+ if osd is None:
+ osd = random.choice(self.in_osds)
+ val = random.uniform(.1, 1.0)
+ self.log("Reweighting osd %s to %s" % (str(osd), str(val)))
+ self.ceph_manager.raw_cluster_cmd('osd', 'reweight', str(osd), str(val))
+
+ def primary_affinity(self, osd=None):
+ if osd is None:
+ osd = random.choice(self.in_osds)
+ if random.random() >= .5:
+ pa = random.random()
+ elif random.random() >= .5:
+ pa = 1
+ else:
+ pa = 0
+ self.log('Setting osd %s primary_affinity to %f' % (str(osd), pa))
+ self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity', str(osd), str(pa))
+
+ def all_up(self):
+ """
+ Make sure all osds are up and not out.
+ """
+ while len(self.dead_osds) > 0:
+ self.log("reviving osd")
+ self.revive_osd()
+ while len(self.out_osds) > 0:
+ self.log("inning osd")
+ self.in_osd()
+
+ def do_join(self):
+ """
+ Break out of this Ceph loop
+ """
+ self.stopping = True
+ self.thread.get()
+
+ def grow_pool(self):
+ """
+ Increase the size of the pool
+ """
+ pool = self.ceph_manager.get_pool()
+ self.log("Growing pool %s"%(pool,))
+ self.ceph_manager.expand_pool(pool, self.config.get('pool_grow_by', 10), self.max_pgs)
+
+ def fix_pgp_num(self):
+ """
+ Fix number of pgs in pool.
+ """
+ pool = self.ceph_manager.get_pool()
+ self.log("fixing pg num pool %s"%(pool,))
+ self.ceph_manager.set_pool_pgpnum(pool)
+
+ def test_pool_min_size(self):
+ """
+ Kill and revive all osds except one.
+ """
+ self.log("test_pool_min_size")
+ self.all_up()
+ self.ceph_manager.wait_for_recovery(
+ timeout=self.config.get('timeout')
+ )
+ the_one = random.choice(self.in_osds)
+ self.log("Killing everyone but %s", the_one)
+ to_kill = filter(lambda x: x != the_one, self.in_osds)
+ [self.kill_osd(i) for i in to_kill]
+ [self.out_osd(i) for i in to_kill]
+ time.sleep(self.config.get("test_pool_min_size_time", 10))
+ self.log("Killing %s" % (the_one,))
+ self.kill_osd(the_one)
+ self.out_osd(the_one)
+ self.log("Reviving everyone but %s" % (the_one,))
+ [self.revive_osd(i) for i in to_kill]
+ [self.in_osd(i) for i in to_kill]
+ self.log("Revived everyone but %s" % (the_one,))
+ self.log("Waiting for clean")
+ self.ceph_manager.wait_for_recovery(
+ timeout=self.config.get('timeout')
+ )
+
+ def inject_pause(self, conf_key, duration, check_after, should_be_down):
+ """
+ Pause injection testing. Check for osd being down when finished.
+ """
+ the_one = random.choice(self.live_osds)
+ self.log("inject_pause on {osd}".format(osd = the_one))
+ self.log(
+ "Testing {key} pause injection for duration {duration}".format(
+ key = conf_key,
+ duration = duration
+ ))
+ self.log(
+ "Checking after {after}, should_be_down={shouldbedown}".format(
+ after = check_after,
+ shouldbedown = should_be_down
+ ))
+ self.ceph_manager.set_config(the_one, **{conf_key:duration})
+ if not should_be_down:
+ return
+ time.sleep(check_after)
+ status = self.ceph_manager.get_osd_status()
+ assert the_one in status['down']
+ time.sleep(duration - check_after + 20)
+ status = self.ceph_manager.get_osd_status()
+ assert not the_one in status['down']
+
+ def test_backfill_full(self):
+ """
+ Test backfills stopping when the replica fills up.
+
+ First, use osd_backfill_full_ratio to simulate a now full
+ osd by setting it to 0 on all of the OSDs.
+
+ Second, on a random subset, set
+ osd_debug_skip_full_check_in_backfill_reservation to force
+ the more complicated check in do_scan to be exercised.
+
+ Then, verify that all backfills stop.
+ """
+ self.log("injecting osd_backfill_full_ratio = 0")
+ for i in self.live_osds:
+ self.ceph_manager.set_config(
+ i,
+ osd_debug_skip_full_check_in_backfill_reservation = random.choice(
+ ['false', 'true']),
+ osd_backfill_full_ratio = 0)
+ for i in range(30):
+ status = self.ceph_manager.compile_pg_status()
+ if 'backfill' not in status.keys():
+ break
+ self.log(
+ "waiting for {still_going} backfills".format(
+ still_going=status.get('backfill')))
+ time.sleep(1)
+ assert('backfill' not in self.ceph_manager.compile_pg_status().keys())
+ for i in self.live_osds:
+ self.ceph_manager.set_config(
+ i,
+ osd_debug_skip_full_check_in_backfill_reservation = \
+ 'false',
+ osd_backfill_full_ratio = 0.85)
+
+ def test_map_discontinuity(self):
+ """
+ 1) Allows the osds to recover
+ 2) kills an osd
+ 3) allows the remaining osds to recover
+ 4) waits for some time
+ 5) revives the osd
+ This sequence should cause the revived osd to have to handle
+ a map gap since the mons would have trimmed
+ """
+ while len(self.in_osds) < (self.minin + 1):
+ self.in_osd()
+ self.log("Waiting for recovery")
+ self.ceph_manager.wait_for_all_up(
+ timeout=self.config.get('timeout')
+ )
+ # now we wait 20s for the pg status to change, if it takes longer,
+ # the test *should* fail!
+ time.sleep(20)
+ self.ceph_manager.wait_for_clean(
+ timeout=self.config.get('timeout')
+ )
+
+ # now we wait 20s for the backfill replicas to hear about the clean
+ time.sleep(20)
+ self.log("Recovered, killing an osd")
+ self.kill_osd(mark_down=True, mark_out=True)
+ self.log("Waiting for clean again")
+ self.ceph_manager.wait_for_clean(
+ timeout=self.config.get('timeout')
+ )
+ self.log("Waiting for trim")
+ time.sleep(int(self.config.get("map_discontinuity_sleep_time", 40)))
+ self.revive_osd()
+
+ def choose_action(self):
+ """
+ Random action selector.
+ """
+ chance_down = self.config.get('chance_down', 0.4)
+ chance_test_min_size = self.config.get('chance_test_min_size', 0)
+ chance_test_backfill_full = self.config.get('chance_test_backfill_full', 0)
+ if isinstance(chance_down, int):
+ chance_down = float(chance_down) / 100
+ minin = self.minin
+ minout = self.config.get("min_out", 0)
+ minlive = self.config.get("min_live", 2)
+ mindead = self.config.get("min_dead", 0)
+
+ self.log('choose_action: min_in %d min_out %d min_live %d min_dead %d' %
+ (minin, minout, minlive, mindead))
+ actions = []
+ if len(self.in_osds) > minin:
+ actions.append((self.out_osd, 1.0,))
+ if len(self.live_osds) > minlive and chance_down > 0:
+ actions.append((self.kill_osd, chance_down,))
+ if len(self.dead_osds) > 1:
+ actions.append((self.rm_past_intervals, 1.0,))
+ if len(self.out_osds) > minout:
+ actions.append((self.in_osd, 1.7,))
+ if len(self.dead_osds) > mindead:
+ actions.append((self.revive_osd, 1.0,))
+ if self.config.get('thrash_primary_affinity', True):
+ actions.append((self.primary_affinity, 1.0,))
+ actions.append((self.reweight_osd, self.config.get('reweight_osd',.5),))
+ actions.append((self.grow_pool, self.config.get('chance_pgnum_grow', 0),))
+ actions.append((self.fix_pgp_num, self.config.get('chance_pgpnum_fix', 0),))
+ actions.append((self.test_pool_min_size, chance_test_min_size,))
+ actions.append((self.test_backfill_full, chance_test_backfill_full,))
+ for key in ['heartbeat_inject_failure', 'filestore_inject_stall']:
+ for scenario in [
+ (lambda: self.inject_pause(key,
+ self.config.get('pause_short', 3),
+ 0,
+ False),
+ self.config.get('chance_inject_pause_short', 1),),
+ (lambda: self.inject_pause(key,
+ self.config.get('pause_long', 80),
+ self.config.get('pause_check_after', 70),
+ True),
+ self.config.get('chance_inject_pause_long', 0),)]:
+ actions.append(scenario)
+
+ total = sum([y for (x, y) in actions])
+ val = random.uniform(0, total)
+ for (action, prob) in actions:
+ if val < prob:
+ return action
+ val -= prob
+ return None
+
+ def do_thrash(self):
+ """
+ Loop to select random actions to thrash ceph manager with.
+ """
+ cleanint = self.config.get("clean_interval", 60)
+ scrubint = self.config.get("scrub_interval", -1)
+ maxdead = self.config.get("max_dead", 0)
+ delay = self.config.get("op_delay", 5)
+ self.log("starting do_thrash")
+ while not self.stopping:
+ self.log(" ".join([str(x) for x in ["in_osds: ", self.in_osds, " out_osds: ", self.out_osds,
+ "dead_osds: ", self.dead_osds, "live_osds: ",
+ self.live_osds]]))
+ if random.uniform(0, 1) < (float(delay) / cleanint):
+ while len(self.dead_osds) > maxdead:
+ self.revive_osd()
+ for osd in self.in_osds:
+ self.ceph_manager.raw_cluster_cmd('osd', 'reweight',
+ str(osd), str(1))
+ if random.uniform(0, 1) < float(
+ self.config.get('chance_test_map_discontinuity', 0)):
+ self.test_map_discontinuity()
+ else:
+ self.ceph_manager.wait_for_recovery(
+ timeout=self.config.get('timeout')
+ )
+ time.sleep(self.clean_wait)
+ if scrubint > 0:
+ if random.uniform(0, 1) < (float(delay) / scrubint):
+ self.log('Scrubbing while thrashing being performed')
+ Scrubber(self.ceph_manager, self.config)
+ self.choose_action()()
+ time.sleep(delay)
+ self.all_up()
+
+class CephManager:
+ """
+ Ceph manager object.
+ Contains several local functions that form a bulk of this module.
+ """
+
+ REPLICATED_POOL = 1
+ ERASURE_CODED_POOL = 3
+
+ def __init__(self, controller, ctx=None, config=None, logger=None):
+ self.lock = threading.RLock()
+ self.ctx = ctx
+ self.config = config
+ self.controller = controller
+ self.next_pool_id = 0
+ self.created_erasure_pool = False
+ if (logger):
+ self.log = lambda x: logger.info(x)
+ else:
+ def tmp(x):
+ """
+ implement log behavior.
+ """
+ print x
+ self.log = tmp
+ if self.config is None:
+ self.config = dict()
+ pools = self.list_pools()
+ self.pools = {}
+ for pool in pools:
+ self.pools[pool] = self.get_pool_property(pool, 'pg_num')
+
+ def raw_cluster_cmd(self, *args):
+ """
+ Start ceph on a raw cluster. Return count
+ """
+ testdir = teuthology.get_testdir(self.ctx)
+ ceph_args = [
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'ceph',
+ ]
+ ceph_args.extend(args)
+ proc = self.controller.run(
+ args=ceph_args,
+ stdout=StringIO(),
+ )
+ return proc.stdout.getvalue()
+
+ def raw_cluster_cmd_result(self, *args):
+ """
+ Start ceph on a cluster. Return success or failure information.
+ """
+ testdir = teuthology.get_testdir(self.ctx)
+ ceph_args = [
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'ceph',
+ ]
+ ceph_args.extend(args)
+ proc = self.controller.run(
+ args=ceph_args,
+ check_status=False,
+ )
+ return proc.exitstatus
+
+ def do_rados(self, remote, cmd):
+ """
+ Execute a remote rados command.
+ """
+ testdir = teuthology.get_testdir(self.ctx)
+ pre = [
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'rados',
+ ]
+ pre.extend(cmd)
+ proc = remote.run(
+ args=pre,
+ wait=True,
+ )
+ return proc
+
+ def rados_write_objects(
+ self, pool, num_objects, size, timelimit, threads, cleanup=False):
+ """
+ Write rados objects
+ Threads not used yet.
+ """
+ args = [
+ '-p', pool,
+ '--num-objects', num_objects,
+ '-b', size,
+ 'bench', timelimit,
+ 'write'
+ ]
+ if not cleanup: args.append('--no-cleanup')
+ return self.do_rados(self.controller, map(str, args))
+
+ def do_put(self, pool, obj, fname):
+ """
+ Implement rados put operation
+ """
+ return self.do_rados(
+ self.controller,
+ [
+ '-p',
+ pool,
+ 'put',
+ obj,
+ fname
+ ]
+ )
+
+ def do_get(self, pool, obj, fname='/dev/null'):
+ """
+ Implement rados get operation
+ """
+ return self.do_rados(
+ self.controller,
+ [
+ '-p',
+ pool,
+ 'stat',
+ obj,
+ fname
+ ]
+ )
+
+ def osd_admin_socket(self, osdnum, command, check_status=True):
+ """
+ Remotely start up ceph specifying the admin socket
+ :param command a list of words to use as the command to the admin socket
+ """
+ testdir = teuthology.get_testdir(self.ctx)
+ remote = None
+ for _remote, roles_for_host in self.ctx.cluster.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
+ if int(id_) == int(osdnum):
+ remote = _remote
+ assert remote is not None
+ args = [
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'ceph',
+ '--admin-daemon',
+ '/var/run/ceph/ceph-osd.{id}.asok'.format(id=osdnum),
+ ]
+ args.extend(command)
+ return remote.run(
+ args=args,
+ stdout=StringIO(),
+ wait=True,
+ check_status=check_status
+ )
+
+ def get_pgid(self, pool, pgnum):
+ """
+ :param pool: pool name
+ :param pgnum: pg number
+ :returns: a string representing this pg.
+ """
+ poolnum = self.get_pool_num(pool)
+ pg_str = "{poolnum}.{pgnum}".format(
+ poolnum=poolnum,
+ pgnum=pgnum)
+ return pg_str
+
+ def get_pg_replica(self, pool, pgnum):
+ """
+ get replica for pool, pgnum (e.g. (data, 0)->0
+ """
+ output = self.raw_cluster_cmd("pg", "dump", '--format=json')
+ j = json.loads('\n'.join(output.split('\n')[1:]))
+ pg_str = self.get_pgid(pool, pgnum)
+ for pg in j['pg_stats']:
+ if pg['pgid'] == pg_str:
+ return int(pg['acting'][-1])
+ assert False
+
+ def get_pg_primary(self, pool, pgnum):
+ """
+ get primary for pool, pgnum (e.g. (data, 0)->0
+ """
+ output = self.raw_cluster_cmd("pg", "dump", '--format=json')
+ j = json.loads('\n'.join(output.split('\n')[1:]))
+ pg_str = self.get_pgid(pool, pgnum)
+ for pg in j['pg_stats']:
+ if pg['pgid'] == pg_str:
+ return int(pg['acting'][0])
+ assert False
+
+ def get_pool_num(self, pool):
+ """
+ get number for pool (e.g., data -> 2)
+ """
+ out = self.raw_cluster_cmd('osd', 'dump', '--format=json')
+ j = json.loads('\n'.join(out.split('\n')[1:]))
+ for i in j['pools']:
+ if i['pool_name'] == pool:
+ return int(i['pool'])
+ assert False
+
+ def list_pools(self):
+ """
+ list all pool names
+ """
+ out = self.raw_cluster_cmd('osd', 'dump', '--format=json')
+ j = json.loads('\n'.join(out.split('\n')[1:]))
+ self.log(j['pools'])
+ return [str(i['pool_name']) for i in j['pools']]
+
+ def clear_pools(self):
+ """
+ remove all pools
+ """
+ [self.remove_pool(i) for i in self.list_pools()]
+
+ def kick_recovery_wq(self, osdnum):
+ """
+ Run kick_recovery_wq on cluster.
+ """
+ return self.raw_cluster_cmd(
+ 'tell', "osd.%d" % (int(osdnum),),
+ 'debug',
+ 'kick_recovery_wq',
+ '0')
+
+ def wait_run_admin_socket(self, osdnum, args=['version'], timeout=300):
+ """
+ If osd_admin_socket call suceeds, return. Otherwise wait
+ five seconds and try again.
+ """
+ tries = 0
+ while True:
+ proc = self.osd_admin_socket(
+ osdnum, args,
+ check_status=False)
+ if proc.exitstatus is 0:
+ break
+ else:
+ tries += 1
+ if (tries * 5) > timeout:
+ raise Exception('timed out waiting for admin_socket to appear after osd.{o} restart'.format(o=osdnum))
+ self.log(
+ "waiting on admin_socket for {osdnum}, {command}".format(
+ osdnum=osdnum,
+ command=args))
+ time.sleep(5)
+
+ def get_pool_dump(self, pool):
+ """
+ get the osd dump part of a pool
+ """
+ osd_dump = self.get_osd_dump_json()
+ for i in osd_dump['pools']:
+ if i['pool_name'] == pool:
+ return i
+ assert False
+
+ def set_config(self, osdnum, **argdict):
+ """
+ :param osdnum: osd number
+ :param argdict: dictionary containing values to set.
+ """
+ for k, v in argdict.iteritems():
+ self.wait_run_admin_socket(
+ osdnum,
+ ['config', 'set', str(k), str(v)])
+
+ def raw_cluster_status(self):
+ """
+ Get status from cluster
+ """
+ status = self.raw_cluster_cmd('status', '--format=json-pretty')
+ return json.loads(status)
+
+ def raw_osd_status(self):
+ """
+ Get osd status from cluster
+ """
+ return self.raw_cluster_cmd('osd', 'dump')
+
+ def get_osd_status(self):
+ """
+ Get osd statuses sorted by states that the osds are in.
+ """
+ osd_lines = filter(
+ lambda x: x.startswith('osd.') and (("up" in x) or ("down" in x)),
+ self.raw_osd_status().split('\n'))
+ self.log(osd_lines)
+ in_osds = [int(i[4:].split()[0]) for i in filter(
+ lambda x: " in " in x,
+ osd_lines)]
+ out_osds = [int(i[4:].split()[0]) for i in filter(
+ lambda x: " out " in x,
+ osd_lines)]
+ up_osds = [int(i[4:].split()[0]) for i in filter(
+ lambda x: " up " in x,
+ osd_lines)]
+ down_osds = [int(i[4:].split()[0]) for i in filter(
+ lambda x: " down " in x,
+ osd_lines)]
+ dead_osds = [int(x.id_) for x in
+ filter(lambda x: not x.running(), self.ctx.daemons.iter_daemons_of_role('osd'))]
+ live_osds = [int(x.id_) for x in
+ filter(lambda x: x.running(), self.ctx.daemons.iter_daemons_of_role('osd'))]
+ return { 'in' : in_osds, 'out' : out_osds, 'up' : up_osds,
+ 'down' : down_osds, 'dead' : dead_osds, 'live' : live_osds,
+ 'raw' : osd_lines}
+
+ def get_num_pgs(self):
+ """
+ Check cluster status for the number of pgs
+ """
+ status = self.raw_cluster_status()
+ self.log(status)
+ return status['pgmap']['num_pgs']
+
+ def create_pool_with_unique_name(self, pg_num=16, ec_pool=False, ec_m=1, ec_k=2):
+ """
+ Create a pool named unique_pool_X where X is unique.
+ """
+ name = ""
+ with self.lock:
+ name = "unique_pool_%s" % (str(self.next_pool_id),)
+ self.next_pool_id += 1
+ self.create_pool(
+ name,
+ pg_num,
+ ec_pool=ec_pool,
+ ec_m=ec_m,
+ ec_k=ec_k)
+ return name
+
+ def create_pool(self, pool_name, pg_num=16, ec_pool=False, ec_m=1, ec_k=2):
+ """
+ Create a pool named from the pool_name parameter.
+ :param pool_name: name of the pool being created.
+ :param pg_num: initial number of pgs.
+ """
+ with self.lock:
+ assert isinstance(pool_name, str)
+ assert isinstance(pg_num, int)
+ assert pool_name not in self.pools
+ self.log("creating pool_name %s"%(pool_name,))
+ if ec_pool and not self.created_erasure_pool:
+ self.created_erasure_pool = True
+ self.raw_cluster_cmd('osd', 'erasure-code-profile', 'set', 'teuthologyprofile', 'ruleset-failure-domain=osd', 'm='+str(ec_m), 'k='+str(ec_k))
+
+ if ec_pool:
+ self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num), str(pg_num), 'erasure', 'teuthologyprofile')
+ else:
+ self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num))
+ self.pools[pool_name] = pg_num
+
+ def remove_pool(self, pool_name):
+ """
+ Remove the indicated pool
+ :param pool_name: Pool to be removed
+ """
+ with self.lock:
+ assert isinstance(pool_name, str)
+ assert pool_name in self.pools
+ self.log("removing pool_name %s" % (pool_name,))
+ del self.pools[pool_name]
+ self.do_rados(
+ self.controller,
+ ['rmpool', pool_name, pool_name, "--yes-i-really-really-mean-it"]
+ )
+
+ def get_pool(self):
+ """
+ Pick a random pool
+ """
+ with self.lock:
+ return random.choice(self.pools.keys())
+
+ def get_pool_pg_num(self, pool_name):
+ """
+ Return the number of pgs in the pool specified.
+ """
+ with self.lock:
+ assert isinstance(pool_name, str)
+ if pool_name in self.pools:
+ return self.pools[pool_name]
+ return 0
+
+ def get_pool_property(self, pool_name, prop):
+ """
+ :param pool_name: pool
+ :param prop: property to be checked.
+ :returns: property as an int value.
+ """
+ with self.lock:
+ assert isinstance(pool_name, str)
+ assert isinstance(prop, str)
+ output = self.raw_cluster_cmd(
+ 'osd',
+ 'pool',
+ 'get',
+ pool_name,
+ prop)
+ return int(output.split()[1])
+
+ def set_pool_property(self, pool_name, prop, val):
+ """
+ :param pool_name: pool
+ :param prop: property to be set.
+ :param val: value to set.
+
+ This routine retries if set operation fails.
+ """
+ with self.lock:
+ assert isinstance(pool_name, str)
+ assert isinstance(prop, str)
+ assert isinstance(val, int)
+ tries = 0
+ while True:
+ r = self.raw_cluster_cmd_result(
+ 'osd',
+ 'pool',
+ 'set',
+ pool_name,
+ prop,
+ str(val))
+ if r != 11: # EAGAIN
+ break
+ tries += 1
+ if tries > 50:
+ raise Exception('timed out getting EAGAIN when setting pool property %s %s = %s' % (pool_name, prop, val))
+ self.log('got EAGAIN setting pool property, waiting a few seconds...')
+ time.sleep(2)
+
+ def expand_pool(self, pool_name, by, max_pgs):
+ """
+ Increase the number of pgs in a pool
+ """
+ with self.lock:
+ assert isinstance(pool_name, str)
+ assert isinstance(by, int)
+ assert pool_name in self.pools
+ if self.get_num_creating() > 0:
+ return
+ if (self.pools[pool_name] + by) > max_pgs:
+ return
+ self.log("increase pool size by %d"%(by,))
+ new_pg_num = self.pools[pool_name] + by
+ self.set_pool_property(pool_name, "pg_num", new_pg_num)
+ self.pools[pool_name] = new_pg_num
+
+ def set_pool_pgpnum(self, pool_name):
+ """
+ Set pgpnum property of pool_name pool.
+ """
+ with self.lock:
+ assert isinstance(pool_name, str)
+ assert pool_name in self.pools
+ if self.get_num_creating() > 0:
+ return
+ self.set_pool_property(pool_name, 'pgp_num', self.pools[pool_name])
+
+ def list_pg_missing(self, pgid):
+ """
+ return list of missing pgs with the id specified
+ """
+ r = None
+ offset = {}
+ while True:
+ out = self.raw_cluster_cmd('--', 'pg', pgid, 'list_missing',
+ json.dumps(offset))
+ j = json.loads(out)
+ if r is None:
+ r = j
+ else:
+ r['objects'].extend(j['objects'])
+ if not 'more' in j:
+ break
+ if j['more'] == 0:
+ break
+ offset = j['objects'][-1]['oid']
+ if 'more' in r:
+ del r['more']
+ return r
+
+ def get_pg_stats(self):
+ """
+ Dump the cluster and get pg stats
+ """
+ out = self.raw_cluster_cmd('pg', 'dump', '--format=json')
+ j = json.loads('\n'.join(out.split('\n')[1:]))
+ return j['pg_stats']
+
+ def compile_pg_status(self):
+ """
+ Return a histogram of pg state values
+ """
+ ret = {}
+ j = self.get_pg_stats()
+ for pg in j:
+ for status in pg['state'].split('+'):
+ if status not in ret:
+ ret[status] = 0
+ ret[status] += 1
+ return ret
+
+ def pg_scrubbing(self, pool, pgnum):
+ """
+ pg scrubbing wrapper
+ """
+ pgstr = self.get_pgid(pool, pgnum)
+ stats = self.get_single_pg_stats(pgstr)
+ return 'scrub' in stats['state']
+
+ def pg_repairing(self, pool, pgnum):
+ """
+ pg repairing wrapper
+ """
+ pgstr = self.get_pgid(pool, pgnum)
+ stats = self.get_single_pg_stats(pgstr)
+ return 'repair' in stats['state']
+
+ def pg_inconsistent(self, pool, pgnum):
+ """
+ pg inconsistent wrapper
+ """
+ pgstr = self.get_pgid(pool, pgnum)
+ stats = self.get_single_pg_stats(pgstr)
+ return 'inconsistent' in stats['state']
+
+ def get_last_scrub_stamp(self, pool, pgnum):
+ """
+ Get the timestamp of the last scrub.
+ """
+ stats = self.get_single_pg_stats(self.get_pgid(pool, pgnum))
+ return stats["last_scrub_stamp"]
+
+ def do_pg_scrub(self, pool, pgnum, stype):
+ """
+ Scrub pg and wait for scrubbing to finish
+ """
+ init = self.get_last_scrub_stamp(pool, pgnum)
+ self.raw_cluster_cmd('pg', stype, self.get_pgid(pool, pgnum))
+ while init == self.get_last_scrub_stamp(pool, pgnum):
+ self.log("waiting for scrub type %s"%(stype,))
+ time.sleep(10)
+
+ def get_single_pg_stats(self, pgid):
+ """
+ Return pg for the pgid specified.
+ """
+ all_stats = self.get_pg_stats()
+
+ for pg in all_stats:
+ if pg['pgid'] == pgid:
+ return pg
+
+ return None
+
+ def get_osd_dump_json(self):
+ """
+ osd dump --format=json converted to a python object
+ :returns: the python object
+ """
+ out = self.raw_cluster_cmd('osd', 'dump', '--format=json')
+ return json.loads('\n'.join(out.split('\n')[1:]))
+
+ def get_osd_dump(self):
+ """
+ Dump osds
+ :returns: all osds
+ """
+ out = self.raw_cluster_cmd('osd', 'dump', '--format=json')
+ j = json.loads('\n'.join(out.split('\n')[1:]))
+ return j['osds']
+
+ def get_stuck_pgs(self, type_, threshold):
+ """
+ :returns: stuck pg information from the cluster
+ """
+ out = self.raw_cluster_cmd('pg', 'dump_stuck', type_, str(threshold),
+ '--format=json')
+ return json.loads(out)
+
+ def get_num_unfound_objects(self):
+ """
+ Check cluster status to get the number of unfound objects
+ """
+ status = self.raw_cluster_status()
+ self.log(status)
+ return status['pgmap'].get('unfound_objects', 0)
+
+ def get_num_creating(self):
+ """
+ Find the number of pgs in creating mode.
+ """
+ pgs = self.get_pg_stats()
+ num = 0
+ for pg in pgs:
+ if 'creating' in pg['state']:
+ num += 1
+ return num
+
+ def get_num_active_clean(self):
+ """
+ Find the number of active and clean pgs.
+ """
+ pgs = self.get_pg_stats()
+ num = 0
+ for pg in pgs:
+ if pg['state'].count('active') and pg['state'].count('clean') and not pg['state'].count('stale'):
+ num += 1
+ return num
+
+ def get_num_active_recovered(self):
+ """
+ Find the number of active and recovered pgs.
+ """
+ pgs = self.get_pg_stats()
+ num = 0
+ for pg in pgs:
+ if pg['state'].count('active') and not pg['state'].count('recover') and not pg['state'].count('backfill') and not pg['state'].count('stale'):
+ num += 1
+ return num
+
+ def get_is_making_recovery_progress(self):
+ """
+ Return whether there is recovery progress discernable in the
+ raw cluster status
+ """
+ status = self.raw_cluster_status()
+ kps = status['pgmap'].get('recovering_keys_per_sec', 0)
+ bps = status['pgmap'].get('recovering_bytes_per_sec', 0)
+ ops = status['pgmap'].get('recovering_objects_per_sec', 0)
+ return kps > 0 or bps > 0 or ops > 0
+
+ def get_num_active(self):
+ """
+ Find the number of active pgs.
+ """
+ pgs = self.get_pg_stats()
+ num = 0
+ for pg in pgs:
+ if pg['state'].count('active') and not pg['state'].count('stale'):
+ num += 1
+ return num
+
+ def get_num_down(self):
+ """
+ Find the number of pgs that are down.
+ """
+ pgs = self.get_pg_stats()
+ num = 0
+ for pg in pgs:
+ if (pg['state'].count('down') and not pg['state'].count('stale')) or \
+ (pg['state'].count('incomplete') and not pg['state'].count('stale')):
+ num += 1
+ return num
+
+ def get_num_active_down(self):
+ """
+ Find the number of pgs that are either active or down.
+ """
+ pgs = self.get_pg_stats()
+ num = 0
+ for pg in pgs:
+ if (pg['state'].count('active') and not pg['state'].count('stale')) or \
+ (pg['state'].count('down') and not pg['state'].count('stale')) or \
+ (pg['state'].count('incomplete') and not pg['state'].count('stale')):
+ num += 1
+ return num
+
+ def is_clean(self):
+ """
+ True if all pgs are clean
+ """
+ return self.get_num_active_clean() == self.get_num_pgs()
+
+ def is_recovered(self):
+ """
+ True if all pgs have recovered
+ """
+ return self.get_num_active_recovered() == self.get_num_pgs()
+
+ def is_active_or_down(self):
+ """
+ True if all pgs are active or down
+ """
+ return self.get_num_active_down() == self.get_num_pgs()
+
+ def wait_for_clean(self, timeout=None):
+ """
+ Returns trues when all pgs are clean.
+ """
+ self.log("waiting for clean")
+ start = time.time()
+ num_active_clean = self.get_num_active_clean()
+ while not self.is_clean():
+ if timeout is not None:
+ if self.get_is_making_recovery_progress():
+ self.log("making progress, resetting timeout")
+ start = time.time()
+ else:
+ self.log("no progress seen, keeping timeout for now")
+ assert time.time() - start < timeout, \
+ 'failed to become clean before timeout expired'
+ cur_active_clean = self.get_num_active_clean()
+ if cur_active_clean != num_active_clean:
+ start = time.time()
+ num_active_clean = cur_active_clean
+ time.sleep(3)
+ self.log("clean!")
+
+ def are_all_osds_up(self):
+ """
+ Returns true if all osds are up.
+ """
+ x = self.get_osd_dump()
+ return (len(x) == \
+ sum([(y['up'] > 0) for y in x]))
+
+ def wait_for_all_up(self, timeout=None):
+ """
+ When this exits, either the timeout has expired, or all
+ osds are up.
+ """
+ self.log("waiting for all up")
+ start = time.time()
+ while not self.are_all_osds_up():
+ if timeout is not None:
+ assert time.time() - start < timeout, \
+ 'timeout expired in wait_for_all_up'
+ time.sleep(3)
+ self.log("all up!")
+
+ def wait_for_recovery(self, timeout=None):
+ """
+ Check peering. When this exists, we have recovered.
+ """
+ self.log("waiting for recovery to complete")
+ start = time.time()
+ num_active_recovered = self.get_num_active_recovered()
+ while not self.is_recovered():
+ if timeout is not None:
+ if self.get_is_making_recovery_progress():
+ self.log("making progress, resetting timeout")
+ start = time.time()
+ else:
+ self.log("no progress seen, keeping timeout for now")
+ assert time.time() - start < timeout, \
+ 'failed to recover before timeout expired'
+ cur_active_recovered = self.get_num_active_recovered()
+ if cur_active_recovered != num_active_recovered:
+ start = time.time()
+ num_active_recovered = cur_active_recovered
+ time.sleep(3)
+ self.log("recovered!")
+
+ def wait_for_active(self, timeout=None):
+ """
+ Check peering. When this exists, we are definitely active
+ """
+ self.log("waiting for peering to complete")
+ start = time.time()
+ num_active = self.get_num_active()
+ while not self.is_active():
+ if timeout is not None:
+ assert time.time() - start < timeout, \
+ 'failed to recover before timeout expired'
+ cur_active = self.get_num_active()
+ if cur_active != num_active:
+ start = time.time()
+ num_active = cur_active
+ time.sleep(3)
+ self.log("active!")
+
+ def wait_for_active_or_down(self, timeout=None):
+ """
+ Check peering. When this exists, we are definitely either
+ active or down
+ """
+ self.log("waiting for peering to complete or become blocked")
+ start = time.time()
+ num_active_down = self.get_num_active_down()
+ while not self.is_active_or_down():
+ if timeout is not None:
+ assert time.time() - start < timeout, \
+ 'failed to recover before timeout expired'
+ cur_active_down = self.get_num_active_down()
+ if cur_active_down != num_active_down:
+ start = time.time()
+ num_active_down = cur_active_down
+ time.sleep(3)
+ self.log("active or down!")
+
+ def osd_is_up(self, osd):
+ """
+ Wrapper for osd check
+ """
+ osds = self.get_osd_dump()
+ return osds[osd]['up'] > 0
+
+ def wait_till_osd_is_up(self, osd, timeout=None):
+ """
+ Loop waiting for osd.
+ """
+ self.log('waiting for osd.%d to be up' % osd)
+ start = time.time()
+ while not self.osd_is_up(osd):
+ if timeout is not None:
+ assert time.time() - start < timeout, \
+ 'osd.%d failed to come up before timeout expired' % osd
+ time.sleep(3)
+ self.log('osd.%d is up' % osd)
+
+ def is_active(self):
+ """
+ Wrapper to check if active
+ """
+ return self.get_num_active() == self.get_num_pgs()
+
+ def wait_till_active(self, timeout=None):
+ """
+ Wait until osds are active.
+ """
+ self.log("waiting till active")
+ start = time.time()
+ while not self.is_active():
+ if timeout is not None:
+ assert time.time() - start < timeout, \
+ 'failed to become active before timeout expired'
+ time.sleep(3)
+ self.log("active!")
+
+ def mark_out_osd(self, osd):
+ """
+ Wrapper to mark osd out.
+ """
+ self.raw_cluster_cmd('osd', 'out', str(osd))
+
+ def kill_osd(self, osd):
+ """
+ Kill osds by either power cycling (if indicated by the config)
+ or by stopping.
+ """
+ if self.config.get('powercycle'):
+ (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
+ self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name))
+ assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
+ remote.console.power_off()
+ else:
+ self.ctx.daemons.get_daemon('osd', osd).stop()
+
+ def blackhole_kill_osd(self, osd):
+ """
+ Stop osd if nothing else works.
+ """
+ self.raw_cluster_cmd('--', 'tell', 'osd.%d' % osd,
+ 'injectargs', '--filestore-blackhole')
+ time.sleep(2)
+ self.ctx.daemons.get_daemon('osd', osd).stop()
+
+ def revive_osd(self, osd, timeout=150):
+ """
+ Revive osds by either power cycling (if indicated by the config)
+ or by restarting.
+ """
+ if self.config.get('powercycle'):
+ (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
+ self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name))
+ assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
+ remote.console.power_on()
+ if not remote.console.check_status(300):
+ raise Exception('Failed to revive osd.{o} via ipmi'.format(o=osd))
+ teuthology.reconnect(self.ctx, 60, [remote])
+ mount_osd_data(self.ctx, remote, str(osd))
+ make_admin_daemon_dir(self.ctx, remote)
+ self.ctx.daemons.get_daemon('osd', osd).reset()
+ self.ctx.daemons.get_daemon('osd', osd).restart()
+ # wait for dump_ops_in_flight; this command doesn't appear
+ # until after the signal handler is installed and it is safe
+ # to stop the osd again without making valgrind leak checks
+ # unhappy. see #5924.
+ self.wait_run_admin_socket(osd,
+ args=['dump_ops_in_flight'],
+ timeout=timeout)
+
+ def mark_down_osd(self, osd):
+ """
+ Cluster command wrapper
+ """
+ self.raw_cluster_cmd('osd', 'down', str(osd))
+
+ def mark_in_osd(self, osd):
+ """
+ Cluster command wrapper
+ """
+ self.raw_cluster_cmd('osd', 'in', str(osd))
+
+
+ ## monitors
+
+ def signal_mon(self, mon, sig):
+ """
+ Wrapper to local get_deamon call
+ """
+ self.ctx.daemons.get_daemon('mon', mon).signal(sig)
+
+ def kill_mon(self, mon):
+ """
+ Kill the monitor by either power cycling (if the config says so),
+ or by doing a stop.
+ """
+ if self.config.get('powercycle'):
+ (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys()
+ self.log('kill_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name))
+ assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
+ remote.console.power_off()
+ else:
+ self.ctx.daemons.get_daemon('mon', mon).stop()
+
+ def revive_mon(self, mon):
+ """
+ Restart by either power cycling (if the config says so),
+ or by doing a normal restart.
+ """
+ if self.config.get('powercycle'):
+ (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys()
+ self.log('revive_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name))
+ assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
+ remote.console.power_on()
+ make_admin_daemon_dir(self.ctx, remote)
+ self.ctx.daemons.get_daemon('mon', mon).restart()
+
+ def get_mon_status(self, mon):
+ """
+ Extract all the monitor status information from the cluster
+ """
+ addr = self.ctx.ceph.conf['mon.%s' % mon]['mon addr']
+ out = self.raw_cluster_cmd('-m', addr, 'mon_status')
+ return json.loads(out)
+
+ def get_mon_quorum(self):
+ """
+ Extract monitor quorum information from the cluster
+ """
+ out = self.raw_cluster_cmd('quorum_status')
+ j = json.loads(out)
+ self.log('quorum_status is %s' % out)
+ return j['quorum']
+
+ def wait_for_mon_quorum_size(self, size, timeout=300):
+ """
+ Loop until quorum size is reached.
+ """
+ self.log('waiting for quorum size %d' % size)
+ start = time.time()
+ while not len(self.get_mon_quorum()) == size:
+ if timeout is not None:
+ assert time.time() - start < timeout, \
+ 'failed to reach quorum size %d before timeout expired' % size
+ time.sleep(3)
+ self.log("quorum is size %d" % size)
+
+ def get_mon_health(self, debug=False):
+ """
+ Extract all the monitor health information.
+ """
+ out = self.raw_cluster_cmd('health', '--format=json')
+ if debug:
+ self.log('health:\n{h}'.format(h=out))
+ return json.loads(out)
+
+ ## metadata servers
+
+ def kill_mds(self, mds):
+ """
+ Powercyle if set in config, otherwise just stop.
+ """
+ if self.config.get('powercycle'):
+ (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys()
+ self.log('kill_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name))
+ assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
+ remote.console.power_off()
+ else:
+ self.ctx.daemons.get_daemon('mds', mds).stop()
+
+ def kill_mds_by_rank(self, rank):
+ """
+ kill_mds wrapper to kill based on rank passed.
+ """
+ status = self.get_mds_status_by_rank(rank)
+ self.kill_mds(status['name'])
+
+ def revive_mds(self, mds, standby_for_rank=None):
+ """
+ Revive mds -- do an ipmpi powercycle (if indicated by the config)
+ and then restart (using --hot-standby if specified.
+ """
+ if self.config.get('powercycle'):
+ (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys()
+ self.log('revive_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name))
+ assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
+ remote.console.power_on()
+ make_admin_daemon_dir(self.ctx, remote)
+ args = []
+ if standby_for_rank:
+ args.extend(['--hot-standby', standby_for_rank])
+ self.ctx.daemons.get_daemon('mds', mds).restart(*args)
+
+ def revive_mds_by_rank(self, rank, standby_for_rank=None):
+ """
+ revive_mds wrapper to revive based on rank passed.
+ """
+ status = self.get_mds_status_by_rank(rank)
+ self.revive_mds(status['name'], standby_for_rank)
+
+ def get_mds_status(self, mds):
+ """
+ Run cluster commands for the mds in order to get mds information
+ """
+ out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
+ j = json.loads(' '.join(out.splitlines()[1:]))
+ # collate; for dup ids, larger gid wins.
+ for info in j['info'].itervalues():
+ if info['name'] == mds:
+ return info
+ return None
+
+ def get_mds_status_by_rank(self, rank):
+ """
+ Run cluster commands for the mds in order to get mds information
+ check rank.
+ """
+ out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
+ j = json.loads(' '.join(out.splitlines()[1:]))
+ # collate; for dup ids, larger gid wins.
+ for info in j['info'].itervalues():
+ if info['rank'] == rank:
+ return info
+ return None
+
+ def get_mds_status_all(self):
+ """
+ Run cluster command to extract all the mds status.
+ """
+ out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
+ j = json.loads(' '.join(out.splitlines()[1:]))
+ return j
+
+ def get_filepath(self):
+ """
+ Return path to osd data with {id} needing to be replaced
+ """
+ return "/var/lib/ceph/osd/ceph-{id}"
+
+def utility_task(name):
+ """
+ Generate ceph_manager subtask corresponding to ceph_manager
+ method name
+ """
+ def task(ctx, config):
+ if config is None:
+ config = {}
+ args = config.get('args', [])
+ kwargs = config.get('kwargs', {})
+ fn = getattr(ctx.manager, name)
+ fn(*args, **kwargs)
+ return task
+
+revive_osd = utility_task("revive_osd")
+kill_osd = utility_task("kill_osd")
+create_pool = utility_task("create_pool")
+remove_pool = utility_task("remove_pool")
+wait_for_clean = utility_task("wait_for_clean")
+set_pool_property = utility_task("set_pool_property")
--- /dev/null
+"""
+ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility
+"""
+from cStringIO import StringIO
+import contextlib
+import logging
+import ceph_manager
+from teuthology import misc as teuthology
+import time
+import os
+import string
+from teuthology.orchestra import run
+import sys
+import tempfile
+import json
+from util.rados import (rados, create_replicated_pool, create_ec_pool)
+# from util.rados import (rados, create_ec_pool,
+# create_replicated_pool,
+# create_cache_pool)
+
+log = logging.getLogger(__name__)
+
+# Should get cluster name "ceph" from somewhere
+# and normal path from osd_data and osd_journal in conf
+FSPATH = "/var/lib/ceph/osd/ceph-{id}"
+JPATH = "/var/lib/ceph/osd/ceph-{id}/journal"
+
+
+def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR,
+ BASE_NAME, DATALINECOUNT):
+ objects = range(1, NUM_OBJECTS + 1)
+ for i in objects:
+ NAME = BASE_NAME + "{num}".format(num=i)
+ LOCALNAME = os.path.join(DATADIR, NAME)
+
+ dataline = range(DATALINECOUNT)
+ fd = open(LOCALNAME, "w")
+ data = "This is the data for " + NAME + "\n"
+ for _ in dataline:
+ fd.write(data)
+ fd.close()
+
+
+def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
+ BASE_NAME, DATALINECOUNT):
+
+ objects = range(1, NUM_OBJECTS + 1)
+ for i in objects:
+ NAME = BASE_NAME + "{num}".format(num=i)
+ DDNAME = os.path.join(DATADIR, NAME)
+
+ remote.run(args=['rm', '-f', DDNAME])
+
+ dataline = range(DATALINECOUNT)
+ data = "This is the data for " + NAME + "\n"
+ DATA = ""
+ for _ in dataline:
+ DATA += data
+ teuthology.write_file(remote, DDNAME, DATA)
+
+
+def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR,
+ BASE_NAME, DATALINECOUNT, POOL, db, ec):
+ ERRORS = 0
+ log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS))
+
+ objects = range(1, NUM_OBJECTS + 1)
+ for i in objects:
+ NAME = BASE_NAME + "{num}".format(num=i)
+ DDNAME = os.path.join(DATADIR, NAME)
+
+ proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME],
+ wait=False)
+ # proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME])
+ ret = proc.wait()
+ if ret != 0:
+ log.critical("Rados put failed with status {ret}".
+ format(ret=proc.exitstatus))
+ sys.exit(1)
+
+ db[NAME] = {}
+
+ keys = range(i)
+ db[NAME]["xattr"] = {}
+ for k in keys:
+ if k == 0:
+ continue
+ mykey = "key{i}-{k}".format(i=i, k=k)
+ myval = "val{i}-{k}".format(i=i, k=k)
+ proc = remote.run(args=['rados', '-p', POOL, 'setxattr',
+ NAME, mykey, myval])
+ ret = proc.wait()
+ if ret != 0:
+ log.error("setxattr failed with {ret}".format(ret=ret))
+ ERRORS += 1
+ db[NAME]["xattr"][mykey] = myval
+
+ # Erasure coded pools don't support omap
+ if ec:
+ continue
+
+ # Create omap header in all objects but REPobject1
+ if i != 1:
+ myhdr = "hdr{i}".format(i=i)
+ proc = remote.run(args=['rados', '-p', POOL, 'setomapheader',
+ NAME, myhdr])
+ ret = proc.wait()
+ if ret != 0:
+ log.critical("setomapheader failed with {ret}".format(ret=ret))
+ ERRORS += 1
+ db[NAME]["omapheader"] = myhdr
+
+ db[NAME]["omap"] = {}
+ for k in keys:
+ if k == 0:
+ continue
+ mykey = "okey{i}-{k}".format(i=i, k=k)
+ myval = "oval{i}-{k}".format(i=i, k=k)
+ proc = remote.run(args=['rados', '-p', POOL, 'setomapval',
+ NAME, mykey, myval])
+ ret = proc.wait()
+ if ret != 0:
+ log.critical("setomapval failed with {ret}".format(ret=ret))
+ db[NAME]["omap"][mykey] = myval
+
+ return ERRORS
+
+
+def get_lines(filename):
+ tmpfd = open(filename, "r")
+ line = True
+ lines = []
+ while line:
+ line = tmpfd.readline().rstrip('\n')
+ if line:
+ lines += [line]
+ tmpfd.close()
+ os.unlink(filename)
+ return lines
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run ceph_objectstore_tool test
+
+ The config should be as follows::
+
+ ceph_objectstore_tool:
+ objects: 20 # <number of objects>
+ pgnum: 12
+ """
+
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'ceph_objectstore_tool task only accepts a dict for configuration'
+
+ log.info('Beginning ceph_objectstore_tool...')
+
+ log.debug(config)
+ log.debug(ctx)
+ clients = ctx.cluster.only(teuthology.is_type('client'))
+ assert len(clients.remotes) > 0, 'Must specify at least 1 client'
+ (cli_remote, _) = clients.remotes.popitem()
+ log.debug(cli_remote)
+
+ # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
+ # client = clients.popitem()
+ # log.info(client)
+ osds = ctx.cluster.only(teuthology.is_type('osd'))
+ log.info("OSDS")
+ log.info(osds)
+ log.info(osds.remotes)
+
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ config=config,
+ logger=log.getChild('ceph_manager'),
+ )
+ ctx.manager = manager
+
+ while (len(manager.get_osd_status()['up']) !=
+ len(manager.get_osd_status()['raw'])):
+ time.sleep(10)
+ while (len(manager.get_osd_status()['in']) !=
+ len(manager.get_osd_status()['up'])):
+ time.sleep(10)
+ manager.raw_cluster_cmd('osd', 'set', 'noout')
+ manager.raw_cluster_cmd('osd', 'set', 'nodown')
+
+ PGNUM = config.get('pgnum', 12)
+ log.info("pgnum: {num}".format(num=PGNUM))
+
+ ERRORS = 0
+
+ REP_POOL = "rep_pool"
+ REP_NAME = "REPobject"
+ create_replicated_pool(cli_remote, REP_POOL, PGNUM)
+ ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)
+
+ EC_POOL = "ec_pool"
+ EC_NAME = "ECobject"
+ create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
+ ERRORS += test_objectstore(ctx, config, cli_remote,
+ EC_POOL, EC_NAME, ec=True)
+
+ if ERRORS == 0:
+ log.info("TEST PASSED")
+ else:
+ log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
+
+ assert ERRORS == 0
+
+ try:
+ yield
+ finally:
+ log.info('Ending ceph_objectstore_tool')
+
+
+def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False):
+ manager = ctx.manager
+
+ osds = ctx.cluster.only(teuthology.is_type('osd'))
+
+ TEUTHDIR = teuthology.get_testdir(ctx)
+ DATADIR = os.path.join(TEUTHDIR, "data")
+ DATALINECOUNT = 10000
+ ERRORS = 0
+ NUM_OBJECTS = config.get('objects', 10)
+ log.info("objects: {num}".format(num=NUM_OBJECTS))
+
+ pool_dump = manager.get_pool_dump(REP_POOL)
+ REPID = pool_dump['pool']
+
+ log.debug("repid={num}".format(num=REPID))
+
+ db = {}
+
+ LOCALDIR = tempfile.mkdtemp("cod")
+
+ cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR,
+ REP_NAME, DATALINECOUNT)
+ allremote = []
+ allremote.append(cli_remote)
+ allremote += osds.remotes.keys()
+ allremote = list(set(allremote))
+ for remote in allremote:
+ cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
+ REP_NAME, DATALINECOUNT)
+
+ ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR,
+ REP_NAME, DATALINECOUNT, REP_POOL, db, ec)
+
+ pgs = {}
+ for stats in manager.get_pg_stats():
+ if stats["pgid"].find(str(REPID) + ".") != 0:
+ continue
+ if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL:
+ for osd in stats["acting"]:
+ pgs.setdefault(osd, []).append(stats["pgid"])
+ elif pool_dump["type"] == ceph_manager.CephManager.ERASURE_CODED_POOL:
+ shard = 0
+ for osd in stats["acting"]:
+ pgs.setdefault(osd, []).append("{pgid}s{shard}".
+ format(pgid=stats["pgid"],
+ shard=shard))
+ shard += 1
+ else:
+ raise Exception("{pool} has an unexpected type {type}".
+ format(pool=REP_POOL, type=pool_dump["type"]))
+
+ log.info(pgs)
+ log.info(db)
+
+ for osd in manager.get_osd_status()['up']:
+ manager.kill_osd(osd)
+ time.sleep(5)
+
+ pgswithobjects = set()
+ objsinpg = {}
+
+ # Test --op list and generate json for all objects
+ log.info("Test --op list by generating json for all objects")
+ prefix = ("sudo ceph-objectstore-tool "
+ "--data-path {fpath} "
+ "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH)
+ for remote in osds.remotes.iterkeys():
+ log.debug(remote)
+ log.debug(osds.remotes[remote])
+ for role in osds.remotes[remote]:
+ if string.find(role, "osd.") != 0:
+ continue
+ osdid = int(role.split('.')[1])
+ log.info("process osd.{id} on {remote}".
+ format(id=osdid, remote=remote))
+ cmd = (prefix + "--op list").format(id=osdid)
+ proc = remote.run(args=cmd.split(), check_status=False,
+ stdout=StringIO())
+ if proc.exitstatus != 0:
+ log.error("Bad exit status {ret} from --op list request".
+ format(ret=proc.exitstatus))
+ ERRORS += 1
+ else:
+ for pgline in proc.stdout.getvalue().splitlines():
+ if not pgline:
+ continue
+ (pg, obj) = json.loads(pgline)
+ name = obj['oid']
+ if name in db:
+ pgswithobjects.add(pg)
+ objsinpg.setdefault(pg, []).append(name)
+ db[name].setdefault("pg2json",
+ {})[pg] = json.dumps(obj)
+
+ log.info(db)
+ log.info(pgswithobjects)
+ log.info(objsinpg)
+
+ if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL:
+ # Test get-bytes
+ log.info("Test get-bytes and set-bytes")
+ for basename in db.keys():
+ file = os.path.join(DATADIR, basename)
+ GETNAME = os.path.join(DATADIR, "get")
+ SETNAME = os.path.join(DATADIR, "set")
+
+ for remote in osds.remotes.iterkeys():
+ for role in osds.remotes[remote]:
+ if string.find(role, "osd.") != 0:
+ continue
+ osdid = int(role.split('.')[1])
+ if osdid not in pgs:
+ continue
+
+ for pg, JSON in db[basename]["pg2json"].iteritems():
+ if pg in pgs[osdid]:
+ cmd = ((prefix + "--pgid {pg}").
+ format(id=osdid, pg=pg).split())
+ cmd.append(run.Raw("'{json}'".format(json=JSON)))
+ cmd += ("get-bytes {fname}".
+ format(fname=GETNAME).split())
+ proc = remote.run(args=cmd, check_status=False)
+ if proc.exitstatus != 0:
+ remote.run(args="rm -f {getfile}".
+ format(getfile=GETNAME).split())
+ log.error("Bad exit status {ret}".
+ format(ret=proc.exitstatus))
+ ERRORS += 1
+ continue
+ cmd = ("diff -q {file} {getfile}".
+ format(file=file, getfile=GETNAME))
+ proc = remote.run(args=cmd.split())
+ if proc.exitstatus != 0:
+ log.error("Data from get-bytes differ")
+ # log.debug("Got:")
+ # cat_file(logging.DEBUG, GETNAME)
+ # log.debug("Expected:")
+ # cat_file(logging.DEBUG, file)
+ ERRORS += 1
+ remote.run(args="rm -f {getfile}".
+ format(getfile=GETNAME).split())
+
+ data = ("put-bytes going into {file}\n".
+ format(file=file))
+ teuthology.write_file(remote, SETNAME, data)
+ cmd = ((prefix + "--pgid {pg}").
+ format(id=osdid, pg=pg).split())
+ cmd.append(run.Raw("'{json}'".format(json=JSON)))
+ cmd += ("set-bytes {fname}".
+ format(fname=SETNAME).split())
+ proc = remote.run(args=cmd, check_status=False)
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.info("set-bytes failed for object {obj} "
+ "in pg {pg} osd.{id} ret={ret}".
+ format(obj=basename, pg=pg,
+ id=osdid, ret=proc.exitstatus))
+ ERRORS += 1
+
+ cmd = ((prefix + "--pgid {pg}").
+ format(id=osdid, pg=pg).split())
+ cmd.append(run.Raw("'{json}'".format(json=JSON)))
+ cmd += "get-bytes -".split()
+ proc = remote.run(args=cmd, check_status=False,
+ stdout=StringIO())
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.error("get-bytes after "
+ "set-bytes ret={ret}".
+ format(ret=proc.exitstatus))
+ ERRORS += 1
+ else:
+ if data != proc.stdout.getvalue():
+ log.error("Data inconsistent after "
+ "set-bytes, got:")
+ log.error(proc.stdout.getvalue())
+ ERRORS += 1
+
+ cmd = ((prefix + "--pgid {pg}").
+ format(id=osdid, pg=pg).split())
+ cmd.append(run.Raw("'{json}'".format(json=JSON)))
+ cmd += ("set-bytes {fname}".
+ format(fname=file).split())
+ proc = remote.run(args=cmd, check_status=False)
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.info("set-bytes failed for object {obj} "
+ "in pg {pg} osd.{id} ret={ret}".
+ format(obj=basename, pg=pg,
+ id=osdid, ret=proc.exitstatus))
+ ERRORS += 1
+
+ log.info("Test list-attrs get-attr")
+ for basename in db.keys():
+ file = os.path.join(DATADIR, basename)
+ GETNAME = os.path.join(DATADIR, "get")
+ SETNAME = os.path.join(DATADIR, "set")
+
+ for remote in osds.remotes.iterkeys():
+ for role in osds.remotes[remote]:
+ if string.find(role, "osd.") != 0:
+ continue
+ osdid = int(role.split('.')[1])
+ if osdid not in pgs:
+ continue
+
+ for pg, JSON in db[basename]["pg2json"].iteritems():
+ if pg in pgs[osdid]:
+ cmd = ((prefix + "--pgid {pg}").
+ format(id=osdid, pg=pg).split())
+ cmd.append(run.Raw("'{json}'".format(json=JSON)))
+ cmd += ["list-attrs"]
+ proc = remote.run(args=cmd, check_status=False,
+ stdout=StringIO(), stderr=StringIO())
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.error("Bad exit status {ret}".
+ format(ret=proc.exitstatus))
+ ERRORS += 1
+ continue
+ keys = proc.stdout.getvalue().split()
+ values = dict(db[basename]["xattr"])
+
+ for key in keys:
+ if (key == "_" or
+ key == "snapset" or
+ key == "hinfo_key"):
+ continue
+ key = key.strip("_")
+ if key not in values:
+ log.error("The key {key} should be present".
+ format(key=key))
+ ERRORS += 1
+ continue
+ exp = values.pop(key)
+ cmd = ((prefix + "--pgid {pg}").
+ format(id=osdid, pg=pg).split())
+ cmd.append(run.Raw("'{json}'".format(json=JSON)))
+ cmd += ("get-attr {key}".
+ format(key="_" + key).split())
+ proc = remote.run(args=cmd, check_status=False,
+ stdout=StringIO())
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.error("get-attr failed with {ret}".
+ format(ret=proc.exitstatus))
+ ERRORS += 1
+ continue
+ val = proc.stdout.getvalue()
+ if exp != val:
+ log.error("For key {key} got value {got} "
+ "instead of {expected}".
+ format(key=key, got=val,
+ expected=exp))
+ ERRORS += 1
+ if "hinfo_key" in keys:
+ cmd_prefix = prefix.format(id=osdid)
+ cmd = """
+ expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64)
+ echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} -
+ test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder
+ echo $expected | base64 --decode | \
+ {prefix} --pgid {pg} '{json}' set-attr {key} -
+ test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected
+ """.format(prefix=cmd_prefix, pg=pg, json=JSON,
+ key="hinfo_key")
+ log.debug(cmd)
+ proc = remote.run(args=['bash', '-e', '-x',
+ '-c', cmd],
+ check_status=False,
+ stdout=StringIO(),
+ stderr=StringIO())
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.error("failed with " +
+ str(proc.exitstatus))
+ log.error(proc.stdout.getvalue() + " " +
+ proc.stderr.getvalue())
+ ERRORS += 1
+
+ if len(values) != 0:
+ log.error("Not all keys found, remaining keys:")
+ log.error(values)
+
+ log.info("Test pg info")
+ for remote in osds.remotes.iterkeys():
+ for role in osds.remotes[remote]:
+ if string.find(role, "osd.") != 0:
+ continue
+ osdid = int(role.split('.')[1])
+ if osdid not in pgs:
+ continue
+
+ for pg in pgs[osdid]:
+ cmd = ((prefix + "--op info --pgid {pg}").
+ format(id=osdid, pg=pg).split())
+ proc = remote.run(args=cmd, check_status=False,
+ stdout=StringIO())
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.error("Failure of --op info command with {ret}".
+ format(proc.exitstatus))
+ ERRORS += 1
+ continue
+ info = proc.stdout.getvalue()
+ if not str(pg) in info:
+ log.error("Bad data from info: {info}".format(info=info))
+ ERRORS += 1
+
+ log.info("Test pg logging")
+ for remote in osds.remotes.iterkeys():
+ for role in osds.remotes[remote]:
+ if string.find(role, "osd.") != 0:
+ continue
+ osdid = int(role.split('.')[1])
+ if osdid not in pgs:
+ continue
+
+ for pg in pgs[osdid]:
+ cmd = ((prefix + "--op log --pgid {pg}").
+ format(id=osdid, pg=pg).split())
+ proc = remote.run(args=cmd, check_status=False,
+ stdout=StringIO())
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.error("Getting log failed for pg {pg} "
+ "from osd.{id} with {ret}".
+ format(pg=pg, id=osdid, ret=proc.exitstatus))
+ ERRORS += 1
+ continue
+ HASOBJ = pg in pgswithobjects
+ MODOBJ = "modify" in proc.stdout.getvalue()
+ if HASOBJ != MODOBJ:
+ log.error("Bad log for pg {pg} from osd.{id}".
+ format(pg=pg, id=osdid))
+ MSG = (HASOBJ and [""] or ["NOT "])[0]
+ log.error("Log should {msg}have a modify entry".
+ format(msg=MSG))
+ ERRORS += 1
+
+ log.info("Test pg export")
+ EXP_ERRORS = 0
+ for remote in osds.remotes.iterkeys():
+ for role in osds.remotes[remote]:
+ if string.find(role, "osd.") != 0:
+ continue
+ osdid = int(role.split('.')[1])
+ if osdid not in pgs:
+ continue
+
+ for pg in pgs[osdid]:
+ fpath = os.path.join(DATADIR, "osd{id}.{pg}".
+ format(id=osdid, pg=pg))
+
+ cmd = ((prefix + "--op export --pgid {pg} --file {file}").
+ format(id=osdid, pg=pg, file=fpath))
+ proc = remote.run(args=cmd, check_status=False,
+ stdout=StringIO())
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.error("Exporting failed for pg {pg} "
+ "on osd.{id} with {ret}".
+ format(pg=pg, id=osdid, ret=proc.exitstatus))
+ EXP_ERRORS += 1
+
+ ERRORS += EXP_ERRORS
+
+ log.info("Test pg removal")
+ RM_ERRORS = 0
+ for remote in osds.remotes.iterkeys():
+ for role in osds.remotes[remote]:
+ if string.find(role, "osd.") != 0:
+ continue
+ osdid = int(role.split('.')[1])
+ if osdid not in pgs:
+ continue
+
+ for pg in pgs[osdid]:
+ cmd = ((prefix + "--op remove --pgid {pg}").
+ format(pg=pg, id=osdid))
+ proc = remote.run(args=cmd, check_status=False,
+ stdout=StringIO())
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.error("Removing failed for pg {pg} "
+ "on osd.{id} with {ret}".
+ format(pg=pg, id=osdid, ret=proc.exitstatus))
+ RM_ERRORS += 1
+
+ ERRORS += RM_ERRORS
+
+ IMP_ERRORS = 0
+ if EXP_ERRORS == 0 and RM_ERRORS == 0:
+ log.info("Test pg import")
+
+ for remote in osds.remotes.iterkeys():
+ for role in osds.remotes[remote]:
+ if string.find(role, "osd.") != 0:
+ continue
+ osdid = int(role.split('.')[1])
+ if osdid not in pgs:
+ continue
+
+ for pg in pgs[osdid]:
+ fpath = os.path.join(DATADIR, "osd{id}.{pg}".
+ format(id=osdid, pg=pg))
+
+ cmd = ((prefix + "--op import --file {file}").
+ format(id=osdid, file=fpath))
+ proc = remote.run(args=cmd, check_status=False,
+ stdout=StringIO())
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.error("Import failed from {file} with {ret}".
+ format(file=fpath, ret=proc.exitstatus))
+ IMP_ERRORS += 1
+ else:
+ log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
+
+ ERRORS += IMP_ERRORS
+
+ if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
+ log.info("Restarting OSDs....")
+ # They are still look to be up because of setting nodown
+ for osd in manager.get_osd_status()['up']:
+ manager.revive_osd(osd)
+ # Wait for health?
+ time.sleep(5)
+ # Let scrub after test runs verify consistency of all copies
+ log.info("Verify replicated import data")
+ objects = range(1, NUM_OBJECTS + 1)
+ for i in objects:
+ NAME = REP_NAME + "{num}".format(num=i)
+ TESTNAME = os.path.join(DATADIR, "gettest")
+ REFNAME = os.path.join(DATADIR, NAME)
+
+ proc = rados(ctx, cli_remote,
+ ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False)
+
+ ret = proc.wait()
+ if ret != 0:
+ log.error("After import, rados get failed with {ret}".
+ format(ret=proc.exitstatus))
+ ERRORS += 1
+ continue
+
+ cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME,
+ ref=REFNAME)
+ proc = cli_remote.run(args=cmd, check_status=False)
+ proc.wait()
+ if proc.exitstatus != 0:
+ log.error("Data comparison failed for {obj}".format(obj=NAME))
+ ERRORS += 1
+
+ return ERRORS
--- /dev/null
+"""
+Chef-solo task
+"""
+import logging
+
+from teuthology.orchestra import run
+from teuthology import misc
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Run chef-solo on all nodes.
+ """
+ log.info('Running chef-solo...')
+
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'wget',
+# '-q',
+ '-O-',
+# 'https://raw.github.com/ceph/ceph-qa-chef/master/solo/solo-from-scratch',
+ 'http://git.ceph.com/?p=ceph-qa-chef.git;a=blob_plain;f=solo/solo-from-scratch;hb=HEAD',
+ run.Raw('|'),
+ 'sh',
+ '-x',
+ ],
+ wait=False,
+ )
+ )
+
+ log.info('Reconnecting after ceph-qa-chef run')
+ misc.reconnect(ctx, 10) #Reconnect for ulimit and other ceph-qa-chef changes
+
--- /dev/null
+"""
+Mount cifs clients. Unmount when finished.
+"""
+import contextlib
+import logging
+import os
+
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Mount/unmount a cifs client.
+
+ The config is optional and defaults to mounting on all clients. If
+ a config is given, it is expected to be a list of clients to do
+ this operation on.
+
+ Example that starts smbd and mounts cifs on all nodes::
+
+ tasks:
+ - ceph:
+ - samba:
+ - cifs-mount:
+ - interactive:
+
+ Example that splits smbd and cifs:
+
+ tasks:
+ - ceph:
+ - samba: [samba.0]
+ - cifs-mount: [client.0]
+ - ceph-fuse: [client.1]
+ - interactive:
+
+ Example that specifies the share name:
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ - samba:
+ samba.0:
+ cephfuse: "{testdir}/mnt.0"
+ - cifs-mount:
+ client.0:
+ share: cephfuse
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ log.info('Mounting cifs clients...')
+
+ if config is None:
+ config = dict(('client.{id}'.format(id=id_), None)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
+ elif isinstance(config, list):
+ config = dict((name, None) for name in config)
+
+ clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
+
+ from .samba import get_sambas
+ samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')]
+ sambas = list(get_sambas(ctx=ctx, roles=samba_roles))
+ (ip, _) = sambas[0][1].ssh.get_transport().getpeername()
+ log.info('samba ip: {ip}'.format(ip=ip))
+
+ for id_, remote in clients:
+ mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
+ log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format(
+ id=id_, remote=remote,mnt=mnt))
+
+ remote.run(
+ args=[
+ 'mkdir',
+ '--',
+ mnt,
+ ],
+ )
+
+ rolestr = 'client.{id_}'.format(id_=id_)
+ unc = "ceph"
+ log.info("config: {c}".format(c=config))
+ if config[rolestr] is not None and 'share' in config[rolestr]:
+ unc = config[rolestr]['share']
+
+ remote.run(
+ args=[
+ 'sudo',
+ 'mount',
+ '-t',
+ 'cifs',
+ '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc),
+ '-o',
+ 'username=ubuntu,password=ubuntu',
+ mnt,
+ ],
+ )
+
+ remote.run(
+ args=[
+ 'sudo',
+ 'chown',
+ 'ubuntu:ubuntu',
+ '{m}/'.format(m=mnt),
+ ],
+ )
+
+ try:
+ yield
+ finally:
+ log.info('Unmounting cifs clients...')
+ for id_, remote in clients:
+ remote.run(
+ args=[
+ 'sudo',
+ 'umount',
+ mnt,
+ ],
+ )
+ for id_, remote in clients:
+ while True:
+ try:
+ remote.run(
+ args=[
+ 'rmdir', '--', mnt,
+ run.Raw('2>&1'),
+ run.Raw('|'),
+ 'grep', 'Device or resource busy',
+ ],
+ )
+ import time
+ time.sleep(1)
+ except Exception:
+ break
--- /dev/null
+"""
+Cram tests
+"""
+import logging
+import os
+
+from teuthology import misc as teuthology
+from teuthology.parallel import parallel
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Run all cram tests from the specified urls on the specified
+ clients. Each client runs tests in parallel.
+
+ Limitations:
+ Tests must have a .t suffix. Tests with duplicate names will
+ overwrite each other, so only the last one will run.
+
+ For example::
+
+ tasks:
+ - ceph:
+ - cram:
+ clients:
+ client.0:
+ - http://ceph.com/qa/test.t
+ - http://ceph.com/qa/test2.t]
+ client.1: [http://ceph.com/qa/test.t]
+
+ You can also run a list of cram tests on all clients::
+
+ tasks:
+ - ceph:
+ - cram:
+ clients:
+ all: [http://ceph.com/qa/test.t]
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ assert isinstance(config, dict)
+ assert 'clients' in config and isinstance(config['clients'], dict), \
+ 'configuration must contain a dictionary of clients'
+
+ clients = teuthology.replace_all_with_clients(ctx.cluster,
+ config['clients'])
+ testdir = teuthology.get_testdir(ctx)
+
+ try:
+ for client, tests in clients.iteritems():
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
+ remote.run(
+ args=[
+ 'mkdir', '--', client_dir,
+ run.Raw('&&'),
+ 'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
+ run.Raw('&&'),
+ '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
+ 'install', 'cram',
+ ],
+ )
+ for test in tests:
+ log.info('fetching test %s for %s', test, client)
+ assert test.endswith('.t'), 'tests must end in .t'
+ remote.run(
+ args=[
+ 'wget', '-nc', '-nv', '-P', client_dir, '--', test,
+ ],
+ )
+
+ with parallel() as p:
+ for role in clients.iterkeys():
+ p.spawn(_run_tests, ctx, role)
+ finally:
+ for client, tests in clients.iteritems():
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
+ test_files = set([test.rsplit('/', 1)[1] for test in tests])
+
+ # remove test files unless they failed
+ for test_file in test_files:
+ abs_file = os.path.join(client_dir, test_file)
+ remote.run(
+ args=[
+ 'test', '-f', abs_file + '.err',
+ run.Raw('||'),
+ 'rm', '-f', '--', abs_file,
+ ],
+ )
+
+ # ignore failure since more than one client may
+ # be run on a host, and the client dir should be
+ # non-empty if the test failed
+ remote.run(
+ args=[
+ 'rm', '-rf', '--',
+ '{tdir}/virtualenv'.format(tdir=testdir),
+ run.Raw(';'),
+ 'rmdir', '--ignore-fail-on-non-empty', client_dir,
+ ],
+ )
+
+def _run_tests(ctx, role):
+ """
+ For each role, check to make sure it's a client, then run the cram on that client
+
+ :param ctx: Context
+ :param role: Roles
+ """
+ assert isinstance(role, basestring)
+ PREFIX = 'client.'
+ assert role.startswith(PREFIX)
+ id_ = role[len(PREFIX):]
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ ceph_ref = ctx.summary.get('ceph-sha1', 'master')
+
+ testdir = teuthology.get_testdir(ctx)
+ log.info('Running tests for %s...', role)
+ remote.run(
+ args=[
+ run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
+ run.Raw('CEPH_ID="{id}"'.format(id=id_)),
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/virtualenv/bin/cram'.format(tdir=testdir),
+ '-v', '--',
+ run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)),
+ ],
+ logger=log.getChild(role),
+ )
--- /dev/null
+#!/usr/bin/env python
+import contextlib
+import logging
+from cStringIO import StringIO
+import textwrap
+from configparser import ConfigParser
+import time
+
+from teuthology.orchestra import run
+from teuthology import misc
+from teuthology.contextutil import nested
+
+log = logging.getLogger(__name__)
+
+DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git'
+DS_STABLE_BRANCHES = ("havana", "grizzly")
+
+is_devstack_node = lambda role: role.startswith('devstack')
+is_osd_node = lambda role: role.startswith('osd')
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ if config is None:
+ config = {}
+ if not isinstance(config, dict):
+ raise TypeError("config must be a dict")
+ with nested(lambda: install(ctx=ctx, config=config),
+ lambda: smoke(ctx=ctx, config=config),
+ ):
+ yield
+
+
+@contextlib.contextmanager
+def install(ctx, config):
+ """
+ Install OpenStack DevStack and configure it to use a Ceph cluster for
+ Glance and Cinder.
+
+ Requires one node with a role 'devstack'
+
+ Since devstack runs rampant on the system it's used on, typically you will
+ want to reprovision that machine after using devstack on it.
+
+ Also, the default 2GB of RAM that is given to vps nodes is insufficient. I
+ recommend 4GB. Downburst can be instructed to give 4GB to a vps node by
+ adding this to the yaml:
+
+ downburst:
+ ram: 4G
+
+ This was created using documentation found here:
+ https://github.com/openstack-dev/devstack/blob/master/README.md
+ http://ceph.com/docs/master/rbd/rbd-openstack/
+ """
+ if config is None:
+ config = {}
+ if not isinstance(config, dict):
+ raise TypeError("config must be a dict")
+
+ devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
+ an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
+
+ devstack_branch = config.get("branch", "master")
+ install_devstack(devstack_node, devstack_branch)
+ try:
+ configure_devstack_and_ceph(ctx, config, devstack_node, an_osd_node)
+ yield
+ finally:
+ pass
+
+
+def install_devstack(devstack_node, branch="master"):
+ log.info("Cloning DevStack repo...")
+
+ args = ['git', 'clone', DEVSTACK_GIT_REPO]
+ devstack_node.run(args=args)
+
+ if branch != "master":
+ if branch in DS_STABLE_BRANCHES and not branch.startswith("stable"):
+ branch = "stable/" + branch
+ log.info("Checking out {branch} branch...".format(branch=branch))
+ cmd = "cd devstack && git checkout " + branch
+ devstack_node.run(args=cmd)
+
+ log.info("Installing DevStack...")
+ args = ['cd', 'devstack', run.Raw('&&'), './stack.sh']
+ devstack_node.run(args=args)
+
+
+def configure_devstack_and_ceph(ctx, config, devstack_node, ceph_node):
+ pool_size = config.get('pool_size', '128')
+ create_pools(ceph_node, pool_size)
+ distribute_ceph_conf(devstack_node, ceph_node)
+ # This is where we would install python-ceph and ceph-common but it appears
+ # the ceph task does that for us.
+ generate_ceph_keys(ceph_node)
+ distribute_ceph_keys(devstack_node, ceph_node)
+ secret_uuid = set_libvirt_secret(devstack_node, ceph_node)
+ update_devstack_config_files(devstack_node, secret_uuid)
+ set_apache_servername(devstack_node)
+ # Rebooting is the most-often-used method of restarting devstack services
+ misc.reboot(devstack_node)
+ start_devstack(devstack_node)
+ restart_apache(devstack_node)
+
+
+def create_pools(ceph_node, pool_size):
+ log.info("Creating pools on Ceph cluster...")
+
+ for pool_name in ['volumes', 'images', 'backups']:
+ args = ['ceph', 'osd', 'pool', 'create', pool_name, pool_size]
+ ceph_node.run(args=args)
+
+
+def distribute_ceph_conf(devstack_node, ceph_node):
+ log.info("Copying ceph.conf to DevStack node...")
+
+ ceph_conf_path = '/etc/ceph/ceph.conf'
+ ceph_conf = misc.get_file(ceph_node, ceph_conf_path, sudo=True)
+ misc.sudo_write_file(devstack_node, ceph_conf_path, ceph_conf)
+
+
+def generate_ceph_keys(ceph_node):
+ log.info("Generating Ceph keys...")
+
+ ceph_auth_cmds = [
+ ['ceph', 'auth', 'get-or-create', 'client.cinder', 'mon',
+ 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa
+ ['ceph', 'auth', 'get-or-create', 'client.glance', 'mon',
+ 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa
+ ['ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon',
+ 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa
+ ]
+ for cmd in ceph_auth_cmds:
+ ceph_node.run(args=cmd)
+
+
+def distribute_ceph_keys(devstack_node, ceph_node):
+ log.info("Copying Ceph keys to DevStack node...")
+
+ def copy_key(from_remote, key_name, to_remote, dest_path, owner):
+ key_stringio = StringIO()
+ from_remote.run(
+ args=['ceph', 'auth', 'get-or-create', key_name],
+ stdout=key_stringio)
+ key_stringio.seek(0)
+ misc.sudo_write_file(to_remote, dest_path,
+ key_stringio, owner=owner)
+ keys = [
+ dict(name='client.glance',
+ path='/etc/ceph/ceph.client.glance.keyring',
+ # devstack appears to just want root:root
+ #owner='glance:glance',
+ ),
+ dict(name='client.cinder',
+ path='/etc/ceph/ceph.client.cinder.keyring',
+ # devstack appears to just want root:root
+ #owner='cinder:cinder',
+ ),
+ dict(name='client.cinder-backup',
+ path='/etc/ceph/ceph.client.cinder-backup.keyring',
+ # devstack appears to just want root:root
+ #owner='cinder:cinder',
+ ),
+ ]
+ for key_dict in keys:
+ copy_key(ceph_node, key_dict['name'], devstack_node,
+ key_dict['path'], key_dict.get('owner'))
+
+
+def set_libvirt_secret(devstack_node, ceph_node):
+ log.info("Setting libvirt secret...")
+
+ cinder_key_stringio = StringIO()
+ ceph_node.run(args=['ceph', 'auth', 'get-key', 'client.cinder'],
+ stdout=cinder_key_stringio)
+ cinder_key = cinder_key_stringio.getvalue().strip()
+
+ uuid_stringio = StringIO()
+ devstack_node.run(args=['uuidgen'], stdout=uuid_stringio)
+ uuid = uuid_stringio.getvalue().strip()
+
+ secret_path = '/tmp/secret.xml'
+ secret_template = textwrap.dedent("""
+ <secret ephemeral='no' private='no'>
+ <uuid>{uuid}</uuid>
+ <usage type='ceph'>
+ <name>client.cinder secret</name>
+ </usage>
+ </secret>""")
+ misc.sudo_write_file(devstack_node, secret_path,
+ secret_template.format(uuid=uuid))
+ devstack_node.run(args=['sudo', 'virsh', 'secret-define', '--file',
+ secret_path])
+ devstack_node.run(args=['sudo', 'virsh', 'secret-set-value', '--secret',
+ uuid, '--base64', cinder_key])
+ return uuid
+
+
+def update_devstack_config_files(devstack_node, secret_uuid):
+ log.info("Updating DevStack config files to use Ceph...")
+
+ def backup_config(node, file_name, backup_ext='.orig.teuth'):
+ node.run(args=['cp', '-f', file_name, file_name + backup_ext])
+
+ def update_config(config_name, config_stream, update_dict,
+ section='DEFAULT'):
+ parser = ConfigParser()
+ parser.read_file(config_stream)
+ for (key, value) in update_dict.items():
+ parser.set(section, key, value)
+ out_stream = StringIO()
+ parser.write(out_stream)
+ out_stream.seek(0)
+ return out_stream
+
+ updates = [
+ dict(name='/etc/glance/glance-api.conf', options=dict(
+ default_store='rbd',
+ rbd_store_user='glance',
+ rbd_store_pool='images',
+ show_image_direct_url='True',)),
+ dict(name='/etc/cinder/cinder.conf', options=dict(
+ volume_driver='cinder.volume.drivers.rbd.RBDDriver',
+ rbd_pool='volumes',
+ rbd_ceph_conf='/etc/ceph/ceph.conf',
+ rbd_flatten_volume_from_snapshot='false',
+ rbd_max_clone_depth='5',
+ glance_api_version='2',
+ rbd_user='cinder',
+ rbd_secret_uuid=secret_uuid,
+ backup_driver='cinder.backup.drivers.ceph',
+ backup_ceph_conf='/etc/ceph/ceph.conf',
+ backup_ceph_user='cinder-backup',
+ backup_ceph_chunk_size='134217728',
+ backup_ceph_pool='backups',
+ backup_ceph_stripe_unit='0',
+ backup_ceph_stripe_count='0',
+ restore_discard_excess_bytes='true',
+ )),
+ dict(name='/etc/nova/nova.conf', options=dict(
+ libvirt_images_type='rbd',
+ libvirt_images_rbd_pool='volumes',
+ libvirt_images_rbd_ceph_conf='/etc/ceph/ceph.conf',
+ rbd_user='cinder',
+ rbd_secret_uuid=secret_uuid,
+ libvirt_inject_password='false',
+ libvirt_inject_key='false',
+ libvirt_inject_partition='-2',
+ )),
+ ]
+
+ for update in updates:
+ file_name = update['name']
+ options = update['options']
+ config_str = misc.get_file(devstack_node, file_name, sudo=True)
+ config_stream = StringIO(config_str)
+ backup_config(devstack_node, file_name)
+ new_config_stream = update_config(file_name, config_stream, options)
+ misc.sudo_write_file(devstack_node, file_name, new_config_stream)
+
+
+def set_apache_servername(node):
+ # Apache complains: "Could not reliably determine the server's fully
+ # qualified domain name, using 127.0.0.1 for ServerName"
+ # So, let's make sure it knows its name.
+ log.info("Setting Apache ServerName...")
+
+ hostname = node.hostname
+ config_file = '/etc/apache2/conf.d/servername'
+ misc.sudo_write_file(node, config_file,
+ "ServerName {name}".format(name=hostname))
+
+
+def start_devstack(devstack_node):
+ log.info("Patching devstack start script...")
+ # This causes screen to start headless - otherwise rejoin-stack.sh fails
+ # because there is no terminal attached.
+ cmd = "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh"
+ devstack_node.run(args=cmd)
+
+ log.info("Starting devstack...")
+ cmd = "cd devstack && ./rejoin-stack.sh"
+ devstack_node.run(args=cmd)
+
+ # This was added because I was getting timeouts on Cinder requests - which
+ # were trying to access Keystone on port 5000. A more robust way to handle
+ # this would be to introduce a wait-loop on devstack_node that checks to
+ # see if a service is listening on port 5000.
+ log.info("Waiting 30s for devstack to start...")
+ time.sleep(30)
+
+
+def restart_apache(node):
+ node.run(args=['sudo', '/etc/init.d/apache2', 'restart'], wait=True)
+
+
+@contextlib.contextmanager
+def exercise(ctx, config):
+ log.info("Running devstack exercises...")
+
+ if config is None:
+ config = {}
+ if not isinstance(config, dict):
+ raise TypeError("config must be a dict")
+
+ devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
+
+ # TODO: save the log *and* preserve failures
+ #devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
+
+ try:
+ #cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format( # noqa
+ # dir=devstack_archive_dir)
+ cmd = "cd devstack && ./exercise.sh"
+ devstack_node.run(args=cmd, wait=True)
+ yield
+ finally:
+ pass
+
+
+def create_devstack_archive(ctx, devstack_node):
+ test_dir = misc.get_testdir(ctx)
+ devstack_archive_dir = "{test_dir}/archive/devstack".format(
+ test_dir=test_dir)
+ devstack_node.run(args="mkdir -p " + devstack_archive_dir)
+ return devstack_archive_dir
+
+
+@contextlib.contextmanager
+def smoke(ctx, config):
+ log.info("Running a basic smoketest...")
+
+ devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
+ an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
+
+ try:
+ create_volume(devstack_node, an_osd_node, 'smoke0', 1)
+ yield
+ finally:
+ pass
+
+
+def create_volume(devstack_node, ceph_node, vol_name, size):
+ """
+ :param size: The size of the volume, in GB
+ """
+ size = str(size)
+ log.info("Creating a {size}GB volume named {name}...".format(
+ name=vol_name,
+ size=size))
+ args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create',
+ '--display-name', vol_name, size]
+ out_stream = StringIO()
+ devstack_node.run(args=args, stdout=out_stream, wait=True)
+ vol_info = parse_os_table(out_stream.getvalue())
+ log.debug("Volume info: %s", str(vol_info))
+
+ out_stream = StringIO()
+ try:
+ ceph_node.run(args="rbd --id cinder ls -l volumes", stdout=out_stream,
+ wait=True)
+ except run.CommandFailedError:
+ log.debug("Original rbd call failed; retrying without '--id cinder'")
+ ceph_node.run(args="rbd ls -l volumes", stdout=out_stream,
+ wait=True)
+
+ assert vol_info['id'] in out_stream.getvalue(), \
+ "Volume not found on Ceph cluster"
+ assert vol_info['size'] == size, \
+ "Volume size on Ceph cluster is different than specified"
+ return vol_info['id']
+
+
+def parse_os_table(table_str):
+ out_dict = dict()
+ for line in table_str.split('\n'):
+ if line.startswith('|'):
+ items = line.split()
+ out_dict[items[1]] = items[3]
+ return out_dict
--- /dev/null
+"""
+Raise exceptions on osd coredumps or test err directories
+"""
+import contextlib
+import logging
+import time
+from teuthology.orchestra import run
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Die if {testdir}/err exists or if an OSD dumps core
+ """
+ if config is None:
+ config = {}
+
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+ log.info('num_osds is %s' % num_osds)
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ while len(manager.get_osd_status()['up']) < num_osds:
+ time.sleep(10)
+
+ testdir = teuthology.get_testdir(ctx)
+
+ while True:
+ for i in range(num_osds):
+ (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys()
+ p = osd_remote.run(
+ args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ],
+ wait=True,
+ check_status=False,
+ )
+ exit_status = p.exitstatus
+
+ if exit_status == 0:
+ log.info("osd %d has an error" % i)
+ raise Exception("osd %d error" % i)
+
+ log_path = '/var/log/ceph/osd.%d.log' % (i)
+
+ p = osd_remote.run(
+ args = [
+ 'tail', '-1', log_path,
+ run.Raw('|'),
+ 'grep', '-q', 'end dump'
+ ],
+ wait=True,
+ check_status=False,
+ )
+ exit_status = p.exitstatus
+
+ if exit_status == 0:
+ log.info("osd %d dumped core" % i)
+ raise Exception("osd %d dumped core" % i)
+
+ time.sleep(5)
--- /dev/null
+"""
+Special case divergence test
+"""
+import logging
+import time
+
+import ceph_manager
+from teuthology import misc as teuthology
+from util.rados import rados
+
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Test handling of divergent entries with prior_version
+ prior to log_tail
+
+ config: none
+
+ Requires 3 osds.
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'divergent_priors task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+ ctx.manager = manager
+
+ while len(manager.get_osd_status()['up']) < 3:
+ time.sleep(10)
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.raw_cluster_cmd('osd', 'set', 'noout')
+ manager.raw_cluster_cmd('osd', 'set', 'noin')
+ manager.raw_cluster_cmd('osd', 'set', 'nodown')
+ manager.wait_for_clean()
+
+ # something that is always there
+ dummyfile = '/etc/fstab'
+ dummyfile2 = '/etc/resolv.conf'
+
+ # create 1 pg pool
+ log.info('creating foo')
+ manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1')
+
+ osds = [0, 1, 2]
+ for i in osds:
+ manager.set_config(i, osd_min_pg_log_entries=1)
+
+ # determine primary
+ divergent = manager.get_pg_primary('foo', 0)
+ log.info("primary and soon to be divergent is %d", divergent)
+ non_divergent = [0,1,2]
+ non_divergent.remove(divergent)
+
+ log.info('writing initial objects')
+ # write 1000 objects
+ for i in range(1000):
+ rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
+
+ manager.wait_for_clean()
+
+ # blackhole non_divergent
+ log.info("blackholing osds %s", str(non_divergent))
+ for i in non_divergent:
+ manager.set_config(i, filestore_blackhole='')
+
+ # write 1 (divergent) object
+ log.info('writing divergent object existing_0')
+ rados(
+ ctx, mon, ['-p', 'foo', 'put', 'existing_0', dummyfile2],
+ wait=False)
+ time.sleep(10)
+ mon.run(
+ args=['killall', '-9', 'rados'],
+ wait=True,
+ check_status=False)
+
+ # kill all the osds
+ log.info('killing all the osds')
+ for i in osds:
+ manager.kill_osd(i)
+ for i in osds:
+ manager.mark_down_osd(i)
+ for i in osds:
+ manager.mark_out_osd(i)
+
+ # bring up non-divergent
+ log.info("bringing up non_divergent %s", str(non_divergent))
+ for i in non_divergent:
+ manager.revive_osd(i)
+ for i in non_divergent:
+ manager.mark_in_osd(i)
+
+ log.info('making log long to prevent backfill')
+ for i in non_divergent:
+ manager.set_config(i, osd_min_pg_log_entries=100000)
+
+ # write 1 non-divergent object (ensure that old divergent one is divergent)
+ log.info('writing non-divergent object existing_1')
+ rados(ctx, mon, ['-p', 'foo', 'put', 'existing_1', dummyfile2])
+
+ manager.wait_for_recovery()
+
+ # ensure no recovery
+ log.info('delay recovery')
+ for i in non_divergent:
+ manager.set_config(i, osd_recovery_delay_start=100000)
+
+ # bring in our divergent friend
+ log.info("revive divergent %d", divergent)
+ manager.revive_osd(divergent)
+
+ while len(manager.get_osd_status()['up']) < 3:
+ time.sleep(10)
+
+ log.info('delay recovery divergent')
+ manager.set_config(divergent, osd_recovery_delay_start=100000)
+ log.info('mark divergent in')
+ manager.mark_in_osd(divergent)
+
+ log.info('wait for peering')
+ rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
+
+ log.info("killing divergent %d", divergent)
+ manager.kill_osd(divergent)
+ log.info("reviving divergent %d", divergent)
+ manager.revive_osd(divergent)
+
+ log.info('allowing recovery')
+ for i in non_divergent:
+ manager.set_config(i, osd_recovery_delay_start=0)
+
+ log.info('reading existing_0')
+ exit_status = rados(ctx, mon,
+ ['-p', 'foo', 'get', 'existing_0',
+ '-o', '/tmp/existing'])
+ assert exit_status is 0
+ log.info("success")
--- /dev/null
+"""
+Dump_stuck command
+"""
+import logging
+import re
+import time
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+
+log = logging.getLogger(__name__)
+
+def check_stuck(manager, num_inactive, num_unclean, num_stale, timeout=10):
+ """
+ Do checks. Make sure get_stuck_pgs return the right amout of information, then
+ extract health information from the raw_cluster_cmd and compare the results with
+ values passed in. This passes if all asserts pass.
+
+ :param num_manager: Ceph manager
+ :param num_inactive: number of inaactive pages that are stuck
+ :param num_unclean: number of unclean pages that are stuck
+ :paran num_stale: number of stale pages that are stuck
+ :param timeout: timeout value for get_stuck_pgs calls
+ """
+ inactive = manager.get_stuck_pgs('inactive', timeout)
+ assert len(inactive) == num_inactive
+ unclean = manager.get_stuck_pgs('unclean', timeout)
+ assert len(unclean) == num_unclean
+ stale = manager.get_stuck_pgs('stale', timeout)
+ assert len(stale) == num_stale
+
+ # check health output as well
+ health = manager.raw_cluster_cmd('health')
+ log.debug('ceph health is: %s', health)
+ if num_inactive > 0:
+ m = re.search('(\d+) pgs stuck inactive', health)
+ assert int(m.group(1)) == num_inactive
+ if num_unclean > 0:
+ m = re.search('(\d+) pgs stuck unclean', health)
+ assert int(m.group(1)) == num_unclean
+ if num_stale > 0:
+ m = re.search('(\d+) pgs stuck stale', health)
+ assert int(m.group(1)) == num_stale
+
+def task(ctx, config):
+ """
+ Test the dump_stuck command.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ assert config is None, \
+ 'dump_stuck requires no configuration'
+ assert teuthology.num_instances_of_type(ctx.cluster, 'osd') == 2, \
+ 'dump_stuck requires exactly 2 osds'
+
+ timeout = 60
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.wait_for_clean(timeout)
+
+ manager.raw_cluster_cmd('tell', 'mon.0', 'injectargs', '--',
+# '--mon-osd-report-timeout 90',
+ '--mon-pg-stuck-threshold 10')
+
+ check_stuck(
+ manager,
+ num_inactive=0,
+ num_unclean=0,
+ num_stale=0,
+ )
+ num_pgs = manager.get_num_pgs()
+
+ manager.mark_out_osd(0)
+ time.sleep(timeout)
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.wait_for_recovery(timeout)
+
+ check_stuck(
+ manager,
+ num_inactive=0,
+ num_unclean=num_pgs,
+ num_stale=0,
+ )
+
+ manager.mark_in_osd(0)
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.wait_for_clean(timeout)
+
+ check_stuck(
+ manager,
+ num_inactive=0,
+ num_unclean=0,
+ num_stale=0,
+ )
+
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'):
+ manager.kill_osd(id_)
+ manager.mark_down_osd(id_)
+
+ starttime = time.time()
+ done = False
+ while not done:
+ try:
+ check_stuck(
+ manager,
+ num_inactive=0,
+ num_unclean=0,
+ num_stale=num_pgs,
+ )
+ done = True
+ except AssertionError:
+ # wait up to 15 minutes to become stale
+ if time.time() - starttime > 900:
+ raise
+
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'):
+ manager.revive_osd(id_)
+ manager.mark_in_osd(id_)
+ while True:
+ try:
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ break
+ except Exception:
+ log.exception('osds must not be started yet, waiting...')
+ time.sleep(1)
+ manager.wait_for_clean(timeout)
+
+ check_stuck(
+ manager,
+ num_inactive=0,
+ num_unclean=0,
+ num_stale=0,
+ )
--- /dev/null
+"""
+Lost_unfound
+"""
+import logging
+import ceph_manager
+from teuthology import misc as teuthology
+from util.rados import rados
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Test handling of lost objects on an ec pool.
+
+ A pretty rigid cluster is brought up andtested by this task
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'lost_unfound task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+ manager.wait_for_clean()
+
+
+ pool = manager.create_pool_with_unique_name(
+ ec_pool=True,
+ ec_m=2,
+ ec_k=2)
+
+ # something that is always there
+ dummyfile = '/etc/fstab'
+
+ # kludge to make sure they get a map
+ rados(ctx, mon, ['-p', pool, 'put', 'dummy', dummyfile])
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.wait_for_recovery()
+
+ # create old objects
+ for f in range(1, 10):
+ rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', pool, 'rm', 'existed_%d' % f])
+
+ # delay recovery, and make the pg log very long (to prevent backfill)
+ manager.raw_cluster_cmd(
+ 'tell', 'osd.1',
+ 'injectargs',
+ '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
+ )
+
+ manager.kill_osd(0)
+ manager.mark_down_osd(0)
+ manager.kill_osd(3)
+ manager.mark_down_osd(3)
+
+ for f in range(1, 10):
+ rados(ctx, mon, ['-p', pool, 'put', 'new_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile])
+
+ # take out osd.1 and a necessary shard of those objects.
+ manager.kill_osd(1)
+ manager.mark_down_osd(1)
+ manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
+ manager.revive_osd(0)
+ manager.wait_till_osd_is_up(0)
+ manager.revive_osd(3)
+ manager.wait_till_osd_is_up(3)
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+ manager.wait_till_active()
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+
+ # verify that there are unfound objects
+ unfound = manager.get_num_unfound_objects()
+ log.info("there are %d unfound objects" % unfound)
+ assert unfound
+
+ # mark stuff lost
+ pgs = manager.get_pg_stats()
+ for pg in pgs:
+ if pg['stat_sum']['num_objects_unfound'] > 0:
+ # verify that i can list them direct from the osd
+ log.info('listing missing/lost in %s state %s', pg['pgid'],
+ pg['state']);
+ m = manager.list_pg_missing(pg['pgid'])
+ log.info('%s' % m)
+ assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
+
+ log.info("reverting unfound in %s", pg['pgid'])
+ manager.raw_cluster_cmd('pg', pg['pgid'],
+ 'mark_unfound_lost', 'delete')
+ else:
+ log.info("no unfound in %s", pg['pgid'])
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
+ manager.raw_cluster_cmd('tell', 'osd.3', 'debug', 'kick_recovery_wq', '5')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+ manager.wait_for_recovery()
+
+ # verify result
+ for f in range(1, 10):
+ err = rados(ctx, mon, ['-p', pool, 'get', 'new_%d' % f, '-'])
+ assert err
+ err = rados(ctx, mon, ['-p', pool, 'get', 'existed_%d' % f, '-'])
+ assert err
+ err = rados(ctx, mon, ['-p', pool, 'get', 'existing_%d' % f, '-'])
+ assert err
+
+ # see if osd.1 can cope
+ manager.revive_osd(1)
+ manager.wait_till_osd_is_up(1)
+ manager.wait_for_clean()
--- /dev/null
+"""
+Filestore/filejournal handler
+"""
+import logging
+from teuthology.orchestra import run
+import random
+
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Test filestore/filejournal handling of non-idempotent events.
+
+ Currently this is a kludge; we require the ceph task preceeds us just
+ so that we get the tarball installed to run the test binary.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ # just use the first client...
+ client = clients[0];
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+
+ testdir = teuthology.get_testdir(ctx)
+
+ dir = '%s/data/test.%s' % (testdir, client)
+
+ seed = str(int(random.uniform(1,100)))
+
+ try:
+ log.info('creating a working dir')
+ remote.run(args=['mkdir', dir])
+ remote.run(
+ args=[
+ 'cd', dir,
+ run.Raw('&&'),
+ 'wget','-q', '-Orun_seed_to.sh',
+ 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to.sh;hb=HEAD',
+ run.Raw('&&'),
+ 'wget','-q', '-Orun_seed_to_range.sh',
+ 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to_range.sh;hb=HEAD',
+ run.Raw('&&'),
+ 'chmod', '+x', 'run_seed_to.sh', 'run_seed_to_range.sh',
+ ]);
+
+ log.info('running a series of tests')
+ proc = remote.run(
+ args=[
+ 'cd', dir,
+ run.Raw('&&'),
+ './run_seed_to_range.sh', seed, '50', '300',
+ ],
+ wait=False,
+ check_status=False)
+ result = proc.wait();
+
+ if result != 0:
+ remote.run(
+ args=[
+ 'cp', '-a', dir, '{tdir}/archive/idempotent_failure'.format(tdir=testdir),
+ ])
+ raise Exception("./run_seed_to_range.sh errored out")
+
+ finally:
+ remote.run(args=[
+ 'rm', '-rf', '--', dir
+ ])
+
--- /dev/null
+"""
+Mount/unmount a ``kernel`` client.
+"""
+import contextlib
+import logging
+import os
+
+from teuthology import misc as teuthology
+from util.kclient import write_secret_file
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Mount/unmount a ``kernel`` client.
+
+ The config is optional and defaults to mounting on all clients. If
+ a config is given, it is expected to be a list of clients to do
+ this operation on. This lets you e.g. set up one client with
+ ``ceph-fuse`` and another with ``kclient``.
+
+ Example that mounts all clients::
+
+ tasks:
+ - ceph:
+ - kclient:
+ - interactive:
+
+ Example that uses both ``kclient` and ``ceph-fuse``::
+
+ tasks:
+ - ceph:
+ - ceph-fuse: [client.0]
+ - kclient: [client.1]
+ - interactive:
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ log.info('Mounting kernel clients...')
+ assert config is None or isinstance(config, list), \
+ "task kclient got invalid config"
+
+ if config is None:
+ config = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ clients = list(teuthology.get_clients(ctx=ctx, roles=config))
+
+ testdir = teuthology.get_testdir(ctx)
+
+ for id_, remote in clients:
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format(
+ id=id_, remote=remote, mnt=mnt))
+
+ # figure mon ips
+ remotes_and_roles = ctx.cluster.remotes.items()
+ roles = [roles for (remote_, roles) in remotes_and_roles]
+ ips = [host for (host, port) in (remote_.ssh.get_transport().getpeername() for (remote_, roles) in remotes_and_roles)]
+ mons = teuthology.get_mons(roles, ips).values()
+
+ keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+ secret = '{tdir}/data/client.{id}.secret'.format(tdir=testdir, id=id_)
+ write_secret_file(ctx, remote, 'client.{id}'.format(id=id_),
+ keyring, secret)
+
+ remote.run(
+ args=[
+ 'mkdir',
+ '--',
+ mnt,
+ ],
+ )
+
+ remote.run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '/sbin/mount.ceph',
+ '{mons}:/'.format(mons=','.join(mons)),
+ mnt,
+ '-v',
+ '-o',
+ 'name={id},secretfile={secret}'.format(id=id_,
+ secret=secret),
+ ],
+ )
+
+ try:
+ yield
+ finally:
+ log.info('Unmounting kernel clients...')
+ for id_, remote in clients:
+ log.debug('Unmounting client client.{id}...'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ remote.run(
+ args=[
+ 'sudo',
+ 'umount',
+ mnt,
+ ],
+ )
+ remote.run(
+ args=[
+ 'rmdir',
+ '--',
+ mnt,
+ ],
+ )
--- /dev/null
+"""
+locktests
+"""
+import logging
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Run locktests, from the xfstests suite, on the given
+ clients. Whether the clients are ceph-fuse or kernel does not
+ matter, and the two clients can refer to the same mount.
+
+ The config is a list of two clients to run the locktest on. The
+ first client will be the host.
+
+ For example:
+ tasks:
+ - ceph:
+ - ceph-fuse: [client.0, client.1]
+ - locktest:
+ [client.0, client.1]
+
+ This task does not yield; there would be little point.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+
+ assert isinstance(config, list)
+ log.info('fetching and building locktests...')
+ (host,) = ctx.cluster.only(config[0]).remotes
+ (client,) = ctx.cluster.only(config[1]).remotes
+ ( _, _, host_id) = config[0].partition('.')
+ ( _, _, client_id) = config[1].partition('.')
+ testdir = teuthology.get_testdir(ctx)
+ hostmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=host_id)
+ clientmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=client_id)
+
+ try:
+ for client_name in config:
+ log.info('building on {client_}'.format(client_=client_name))
+ ctx.cluster.only(client_name).run(
+ args=[
+ # explicitly does not support multiple autotest tasks
+ # in a single run; the result archival would conflict
+ 'mkdir', '{tdir}/archive/locktest'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'mkdir', '{tdir}/locktest'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'wget',
+ '-nv',
+ 'https://raw.github.com/gregsfortytwo/xfstests-ceph/master/src/locktest.c',
+ '-O', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'g++', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
+ '-o', '{tdir}/locktest/locktest'.format(tdir=testdir)
+ ],
+ logger=log.getChild('locktest_client.{id}'.format(id=client_name)),
+ )
+
+ log.info('built locktest on each client')
+
+ host.run(args=['sudo', 'touch',
+ '{mnt}/locktestfile'.format(mnt=hostmnt),
+ run.Raw('&&'),
+ 'sudo', 'chown', 'ubuntu.ubuntu',
+ '{mnt}/locktestfile'.format(mnt=hostmnt)
+ ]
+ )
+
+ log.info('starting on host')
+ hostproc = host.run(
+ args=[
+ '{tdir}/locktest/locktest'.format(tdir=testdir),
+ '-p', '6788',
+ '-d',
+ '{mnt}/locktestfile'.format(mnt=hostmnt),
+ ],
+ wait=False,
+ logger=log.getChild('locktest.host'),
+ )
+ log.info('starting on client')
+ (_,_,hostaddr) = host.name.partition('@')
+ clientproc = client.run(
+ args=[
+ '{tdir}/locktest/locktest'.format(tdir=testdir),
+ '-p', '6788',
+ '-d',
+ '-h', hostaddr,
+ '{mnt}/locktestfile'.format(mnt=clientmnt),
+ ],
+ logger=log.getChild('locktest.client'),
+ wait=False
+ )
+
+ hostresult = hostproc.wait()
+ clientresult = clientproc.wait()
+ if (hostresult != 0) or (clientresult != 0):
+ raise Exception("Did not pass locking test!")
+ log.info('finished locktest executable with results {r} and {s}'. \
+ format(r=hostresult, s=clientresult))
+
+ finally:
+ log.info('cleaning up host dir')
+ host.run(
+ args=[
+ 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'rmdir', '{tdir}/locktest'
+ ],
+ logger=log.getChild('.{id}'.format(id=config[0])),
+ )
+ log.info('cleaning up client dir')
+ client.run(
+ args=[
+ 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'rmdir', '{tdir}/locktest'.format(tdir=testdir)
+ ],
+ logger=log.getChild('.{id}'.format(\
+ id=config[1])),
+ )
--- /dev/null
+"""
+Lost_unfound
+"""
+import logging
+import time
+import ceph_manager
+from teuthology import misc as teuthology
+from util.rados import rados
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Test handling of lost objects.
+
+ A pretty rigid cluseter is brought up andtested by this task
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'lost_unfound task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ while len(manager.get_osd_status()['up']) < 3:
+ time.sleep(10)
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_clean()
+
+ # something that is always there
+ dummyfile = '/etc/fstab'
+
+ # take an osd out until the very end
+ manager.kill_osd(2)
+ manager.mark_down_osd(2)
+ manager.mark_out_osd(2)
+
+ # kludge to make sure they get a map
+ rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.wait_for_recovery()
+
+ # create old objects
+ for f in range(1, 10):
+ rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'rm', 'existed_%d' % f])
+
+ # delay recovery, and make the pg log very long (to prevent backfill)
+ manager.raw_cluster_cmd(
+ 'tell', 'osd.1',
+ 'injectargs',
+ '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
+ )
+
+ manager.kill_osd(0)
+ manager.mark_down_osd(0)
+
+ for f in range(1, 10):
+ rados(ctx, mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
+
+ # bring osd.0 back up, let it peer, but don't replicate the new
+ # objects...
+ log.info('osd.0 command_args is %s' % 'foo')
+ log.info(ctx.daemons.get_daemon('osd', 0).command_args)
+ ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([
+ '--osd-recovery-delay-start', '1000'
+ ])
+ manager.revive_osd(0)
+ manager.mark_in_osd(0)
+ manager.wait_till_osd_is_up(0)
+
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.wait_till_active()
+
+ # take out osd.1 and the only copy of those objects.
+ manager.kill_osd(1)
+ manager.mark_down_osd(1)
+ manager.mark_out_osd(1)
+ manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
+
+ # bring up osd.2 so that things would otherwise, in theory, recovery fully
+ manager.revive_osd(2)
+ manager.mark_in_osd(2)
+ manager.wait_till_osd_is_up(2)
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_till_active()
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+
+ # verify that there are unfound objects
+ unfound = manager.get_num_unfound_objects()
+ log.info("there are %d unfound objects" % unfound)
+ assert unfound
+
+ # mark stuff lost
+ pgs = manager.get_pg_stats()
+ for pg in pgs:
+ if pg['stat_sum']['num_objects_unfound'] > 0:
+ primary = 'osd.%d' % pg['acting'][0]
+
+ # verify that i can list them direct from the osd
+ log.info('listing missing/lost in %s state %s', pg['pgid'],
+ pg['state']);
+ m = manager.list_pg_missing(pg['pgid'])
+ #log.info('%s' % m)
+ assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
+ num_unfound=0
+ for o in m['objects']:
+ if len(o['locations']) == 0:
+ num_unfound += 1
+ assert m['num_unfound'] == num_unfound
+
+ log.info("reverting unfound in %s on %s", pg['pgid'], primary)
+ manager.raw_cluster_cmd('pg', pg['pgid'],
+ 'mark_unfound_lost', 'revert')
+ else:
+ log.info("no unfound in %s", pg['pgid'])
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_recovery()
+
+ # verify result
+ for f in range(1, 10):
+ err = rados(ctx, mon, ['-p', 'data', 'get', 'new_%d' % f, '-'])
+ assert err
+ err = rados(ctx, mon, ['-p', 'data', 'get', 'existed_%d' % f, '-'])
+ assert err
+ err = rados(ctx, mon, ['-p', 'data', 'get', 'existing_%d' % f, '-'])
+ assert not err
+
+ # see if osd.1 can cope
+ manager.revive_osd(1)
+ manager.mark_in_osd(1)
+ manager.wait_till_osd_is_up(1)
+ manager.wait_for_clean()
--- /dev/null
+"""
+Force pg creation on all osds
+"""
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+import logging
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Create the specified number of pools and write 16 objects to them (thereby forcing
+ the PG creation on each OSD). This task creates pools from all the clients,
+ in parallel. It is easy to add other daemon types which have the appropriate
+ permissions, but I don't think anything else does.
+ The config is just the number of pools to create. I recommend setting
+ "mon create pg interval" to a very low value in your ceph config to speed
+ this up.
+
+ You probably want to do this to look at memory consumption, and
+ maybe to test how performance changes with the number of PGs. For example:
+
+ tasks:
+ - ceph:
+ config:
+ mon:
+ mon create pg interval: 1
+ - manypools: 3000
+ - radosbench:
+ clients: [client.0]
+ time: 360
+ """
+
+ log.info('creating {n} pools'.format(n=config))
+
+ poolnum = int(config)
+ creator_remotes = []
+ client_roles = teuthology.all_roles_of_type(ctx.cluster, 'client')
+ log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles))
+ for role in client_roles:
+ log.info('role={role_}'.format(role_=role))
+ (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys()
+ creator_remotes.append((creator_remote, 'client.{id}'.format(id=role)))
+
+ remaining_pools = poolnum
+ poolprocs=dict()
+ while (remaining_pools > 0):
+ log.info('{n} pools remaining to create'.format(n=remaining_pools))
+ for remote, role_ in creator_remotes:
+ poolnum = remaining_pools
+ remaining_pools -= 1
+ if remaining_pools < 0:
+ continue
+ log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
+ proc = remote.run(
+ args=[
+ 'rados',
+ '--name', role_,
+ 'mkpool', 'pool{num}'.format(num=poolnum), '-1',
+ run.Raw('&&'),
+ 'rados',
+ '--name', role_,
+ '--pool', 'pool{num}'.format(num=poolnum),
+ 'bench', '0', 'write', '-t', '16', '--block-size', '1'
+ ],
+ wait = False
+ )
+ log.info('waiting for pool and object creates')
+ poolprocs[remote] = proc
+
+ run.wait(poolprocs.itervalues())
+
+ log.info('created all {n} pools and wrote 16 objects to each'.format(n=poolnum))
--- /dev/null
+
+import logging
+import contextlib
+import time
+import ceph_manager
+from teuthology import misc
+from teuthology.orchestra.run import CommandFailedError, Raw
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Go through filesystem creation with a synthetic failure in an MDS
+ in its 'up:creating' state, to exercise the retry behaviour.
+ """
+ # Grab handles to the teuthology objects of interest
+ mdslist = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
+ if len(mdslist) != 1:
+ # Require exactly one MDS, the code path for creation failure when
+ # a standby is available is different
+ raise RuntimeError("This task requires exactly one MDS")
+
+ mds_id = mdslist[0]
+ (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys()
+ manager = ceph_manager.CephManager(
+ mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'),
+ )
+
+ # Stop the MDS and reset the filesystem so that next start will go into CREATING
+ mds = ctx.daemons.get_daemon('mds', mds_id)
+ mds.stop()
+ data_pool_id = manager.get_pool_num("data")
+ md_pool_id = manager.get_pool_num("metadata")
+ manager.raw_cluster_cmd_result('mds', 'newfs', md_pool_id.__str__(), data_pool_id.__str__(),
+ '--yes-i-really-mean-it')
+
+ # Start the MDS with mds_kill_create_at set, it will crash during creation
+ mds.restart_with_args(["--mds_kill_create_at=1"])
+ try:
+ mds.wait_for_exit()
+ except CommandFailedError as e:
+ if e.exitstatus == 1:
+ log.info("MDS creation killed as expected")
+ else:
+ log.error("Unexpected status code %s" % e.exitstatus)
+ raise
+
+ # Since I have intentionally caused a crash, I will clean up the resulting core
+ # file to avoid task.internal.coredump seeing it as a failure.
+ log.info("Removing core file from synthetic MDS failure")
+ mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))])
+
+ # It should have left the MDS map state still in CREATING
+ status = manager.get_mds_status(mds_id)
+ assert status['state'] == 'up:creating'
+
+ # Start the MDS again without the kill flag set, it should proceed with creation successfully
+ mds.restart()
+
+ # Wait for state ACTIVE
+ t = 0
+ create_timeout = 120
+ while True:
+ status = manager.get_mds_status(mds_id)
+ if status['state'] == 'up:active':
+ log.info("MDS creation completed successfully")
+ break
+ elif status['state'] == 'up:creating':
+ log.info("MDS still in creating state")
+ if t > create_timeout:
+ log.error("Creating did not complete within %ss" % create_timeout)
+ raise RuntimeError("Creating did not complete within %ss" % create_timeout)
+ t += 1
+ time.sleep(1)
+ else:
+ log.error("Unexpected MDS state: %s" % status['state'])
+ assert(status['state'] in ['up:active', 'up:creating'])
+
+ # The system should be back up in a happy healthy state, go ahead and run any further tasks
+ # inside this context.
+ yield
--- /dev/null
+"""
+Thrash mds by simulating failures
+"""
+import logging
+import contextlib
+import ceph_manager
+import random
+import time
+from gevent.greenlet import Greenlet
+from gevent.event import Event
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+
+class MDSThrasher(Greenlet):
+ """
+ MDSThrasher::
+
+ The MDSThrasher thrashes MDSs during execution of other tasks (workunits, etc).
+
+ The config is optional. Many of the config parameters are a a maximum value
+ to use when selecting a random value from a range. To always use the maximum
+ value, set no_random to true. The config is a dict containing some or all of:
+
+ seed: [no default] seed the random number generator
+
+ randomize: [default: true] enables randomization and use the max/min values
+
+ max_thrash: [default: 1] the maximum number of MDSs that will be thrashed at
+ any given time.
+
+ max_thrash_delay: [default: 30] maximum number of seconds to delay before
+ thrashing again.
+
+ max_revive_delay: [default: 10] maximum number of seconds to delay before
+ bringing back a thrashed MDS
+
+ thrash_in_replay: [default: 0.0] likelihood that the MDS will be thrashed
+ during replay. Value should be between 0.0 and 1.0
+
+ max_replay_thrash_delay: [default: 4] maximum number of seconds to delay while in
+ the replay state before thrashing
+
+ thrash_weights: allows specific MDSs to be thrashed more/less frequently. This option
+ overrides anything specified by max_thrash. This option is a dict containing
+ mds.x: weight pairs. For example, [mds.a: 0.7, mds.b: 0.3, mds.c: 0.0]. Each weight
+ is a value from 0.0 to 1.0. Any MDSs not specified will be automatically
+ given a weight of 0.0. For a given MDS, by default the trasher delays for up
+ to max_thrash_delay, trashes, waits for the MDS to recover, and iterates. If a non-zero
+ weight is specified for an MDS, for each iteration the thrasher chooses whether to thrash
+ during that iteration based on a random value [0-1] not exceeding the weight of that MDS.
+
+ Examples::
+
+
+ The following example sets the likelihood that mds.a will be thrashed
+ to 80%, mds.b to 20%, and other MDSs will not be thrashed. It also sets the
+ likelihood that an MDS will be thrashed in replay to 40%.
+ Thrash weights do not have to sum to 1.
+
+ tasks:
+ - ceph:
+ - mds_thrash:
+ thrash_weights:
+ - mds.a: 0.8
+ - mds.b: 0.2
+ thrash_in_replay: 0.4
+ - ceph-fuse:
+ - workunit:
+ clients:
+ all: [suites/fsx.sh]
+
+ The following example disables randomization, and uses the max delay values:
+
+ tasks:
+ - ceph:
+ - mds_thrash:
+ max_thrash_delay: 10
+ max_revive_delay: 1
+ max_replay_thrash_delay: 4
+
+ """
+
+ def __init__(self, ctx, manager, config, logger, failure_group, weight):
+ super(MDSThrasher, self).__init__()
+
+ self.ctx = ctx
+ self.manager = manager
+ assert self.manager.is_clean()
+
+ self.stopping = Event()
+ self.logger = logger
+ self.config = config
+
+ self.randomize = bool(self.config.get('randomize', True))
+ self.max_thrash_delay = float(self.config.get('thrash_delay', 30.0))
+ self.thrash_in_replay = float(self.config.get('thrash_in_replay', False))
+ assert self.thrash_in_replay >= 0.0 and self.thrash_in_replay <= 1.0, 'thrash_in_replay ({v}) must be between [0.0, 1.0]'.format(
+ v=self.thrash_in_replay)
+
+ self.max_replay_thrash_delay = float(self.config.get('max_replay_thrash_delay', 4.0))
+
+ self.max_revive_delay = float(self.config.get('max_revive_delay', 10.0))
+
+ self.failure_group = failure_group
+ self.weight = weight
+
+ def _run(self):
+ try:
+ self.do_thrash()
+ except:
+ # Log exceptions here so we get the full backtrace (it's lost
+ # by the time someone does a .get() on this greenlet)
+ self.logger.exception("Exception in do_thrash:")
+ raise
+
+ def log(self, x):
+ """Write data to logger assigned to this MDThrasher"""
+ self.logger.info(x)
+
+ def stop(self):
+ self.stopping.set()
+
+ def do_thrash(self):
+ """
+ Perform the random thrashing action
+ """
+ self.log('starting mds_do_thrash for failure group: ' + ', '.join(
+ ['mds.{_id}'.format(_id=_f) for _f in self.failure_group]))
+ while not self.stopping.is_set():
+ delay = self.max_thrash_delay
+ if self.randomize:
+ delay = random.randrange(0.0, self.max_thrash_delay)
+
+ if delay > 0.0:
+ self.log('waiting for {delay} secs before thrashing'.format(delay=delay))
+ self.stopping.wait(delay)
+ if self.stopping.is_set():
+ continue
+
+ skip = random.randrange(0.0, 1.0)
+ if self.weight < 1.0 and skip > self.weight:
+ self.log('skipping thrash iteration with skip ({skip}) > weight ({weight})'.format(skip=skip,
+ weight=self.weight))
+ continue
+
+ # find the active mds in the failure group
+ statuses = [self.manager.get_mds_status(m) for m in self.failure_group]
+ actives = filter(lambda s: s and s['state'] == 'up:active', statuses)
+ assert len(actives) == 1, 'Can only have one active in a failure group'
+
+ active_mds = actives[0]['name']
+ active_rank = actives[0]['rank']
+
+ self.log('kill mds.{id} (rank={r})'.format(id=active_mds, r=active_rank))
+ self.manager.kill_mds_by_rank(active_rank)
+
+ # wait for mon to report killed mds as crashed
+ last_laggy_since = None
+ itercount = 0
+ while True:
+ failed = self.manager.get_mds_status_all()['failed']
+ status = self.manager.get_mds_status(active_mds)
+ if not status:
+ break
+ if 'laggy_since' in status:
+ last_laggy_since = status['laggy_since']
+ break
+ if any([(f == active_mds) for f in failed]):
+ break
+ self.log(
+ 'waiting till mds map indicates mds.{_id} is laggy/crashed, in failed state, or mds.{_id} is removed from mdsmap'.format(
+ _id=active_mds))
+ itercount = itercount + 1
+ if itercount > 10:
+ self.log('mds map: {status}'.format(status=self.manager.get_mds_status_all()))
+ time.sleep(2)
+ if last_laggy_since:
+ self.log(
+ 'mds.{_id} reported laggy/crashed since: {since}'.format(_id=active_mds, since=last_laggy_since))
+ else:
+ self.log('mds.{_id} down, removed from mdsmap'.format(_id=active_mds, since=last_laggy_since))
+
+ # wait for a standby mds to takeover and become active
+ takeover_mds = None
+ takeover_rank = None
+ itercount = 0
+ while True:
+ statuses = [self.manager.get_mds_status(m) for m in self.failure_group]
+ actives = filter(lambda s: s and s['state'] == 'up:active', statuses)
+ if len(actives) > 0:
+ assert len(actives) == 1, 'Can only have one active in failure group'
+ takeover_mds = actives[0]['name']
+ takeover_rank = actives[0]['rank']
+ break
+ itercount = itercount + 1
+ if itercount > 10:
+ self.log('mds map: {status}'.format(status=self.manager.get_mds_status_all()))
+
+ self.log('New active mds is mds.{_id}'.format(_id=takeover_mds))
+
+ # wait for a while before restarting old active to become new
+ # standby
+ delay = self.max_revive_delay
+ if self.randomize:
+ delay = random.randrange(0.0, self.max_revive_delay)
+
+ self.log('waiting for {delay} secs before reviving mds.{id}'.format(
+ delay=delay, id=active_mds))
+ time.sleep(delay)
+
+ self.log('reviving mds.{id}'.format(id=active_mds))
+ self.manager.revive_mds(active_mds, standby_for_rank=takeover_rank)
+
+ status = {}
+ while True:
+ status = self.manager.get_mds_status(active_mds)
+ if status and (status['state'] == 'up:standby' or status['state'] == 'up:standby-replay'):
+ break
+ self.log(
+ 'waiting till mds map indicates mds.{_id} is in standby or standby-replay'.format(_id=active_mds))
+ time.sleep(2)
+ self.log('mds.{_id} reported in {state} state'.format(_id=active_mds, state=status['state']))
+
+ # don't do replay thrashing right now
+ continue
+ # this might race with replay -> active transition...
+ if status['state'] == 'up:replay' and random.randrange(0.0, 1.0) < self.thrash_in_replay:
+
+ delay = self.max_replay_thrash_delay
+ if self.randomize:
+ delay = random.randrange(0.0, self.max_replay_thrash_delay)
+ time.sleep(delay)
+ self.log('kill replaying mds.{id}'.format(id=self.to_kill))
+ self.manager.kill_mds(self.to_kill)
+
+ delay = self.max_revive_delay
+ if self.randomize:
+ delay = random.randrange(0.0, self.max_revive_delay)
+
+ self.log('waiting for {delay} secs before reviving mds.{id}'.format(
+ delay=delay, id=self.to_kill))
+ time.sleep(delay)
+
+ self.log('revive mds.{id}'.format(id=self.to_kill))
+ self.manager.revive_mds(self.to_kill)
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Stress test the mds by thrashing while another task/workunit
+ is running.
+
+ Please refer to MDSThrasher class for further information on the
+ available options.
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'mds_thrash task only accepts a dict for configuration'
+ mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
+ assert len(mdslist) > 1, \
+ 'mds_thrash task requires at least 2 metadata servers'
+
+ # choose random seed
+ seed = None
+ if 'seed' in config:
+ seed = int(config['seed'])
+ else:
+ seed = int(time.time())
+ log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
+ random.seed(seed)
+
+ max_thrashers = config.get('max_thrash', 1)
+ thrashers = {}
+
+ (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys()
+ manager = ceph_manager.CephManager(
+ first, ctx=ctx, logger=log.getChild('ceph_manager'),
+ )
+
+ # make sure everyone is in active, standby, or standby-replay
+ log.info('Wait for all MDSs to reach steady state...')
+ statuses = None
+ statuses_by_rank = None
+ while True:
+ statuses = {m: manager.get_mds_status(m) for m in mdslist}
+ statuses_by_rank = {}
+ for _, s in statuses.iteritems():
+ if isinstance(s, dict):
+ statuses_by_rank[s['rank']] = s
+
+ ready = filter(lambda (_, s): s is not None and (s['state'] == 'up:active'
+ or s['state'] == 'up:standby'
+ or s['state'] == 'up:standby-replay'),
+ statuses.items())
+ if len(ready) == len(statuses):
+ break
+ time.sleep(2)
+ log.info('Ready to start thrashing')
+
+ # setup failure groups
+ failure_groups = {}
+ actives = {s['name']: s for (_, s) in statuses.iteritems() if s['state'] == 'up:active'}
+ log.info('Actives is: {d}'.format(d=actives))
+ log.info('Statuses is: {d}'.format(d=statuses_by_rank))
+ for active in actives:
+ for (r, s) in statuses.iteritems():
+ if s['standby_for_name'] == active:
+ if not active in failure_groups:
+ failure_groups[active] = []
+ log.info('Assigning mds rank {r} to failure group {g}'.format(r=r, g=active))
+ failure_groups[active].append(r)
+
+ manager.wait_for_clean()
+ for (active, standbys) in failure_groups.iteritems():
+ weight = 1.0
+ if 'thrash_weights' in config:
+ weight = int(config['thrash_weights'].get('mds.{_id}'.format(_id=active), '0.0'))
+
+ failure_group = [active]
+ failure_group.extend(standbys)
+
+ thrasher = MDSThrasher(
+ ctx, manager, config,
+ logger=log.getChild('mds_thrasher.failure_group.[{a}, {sbs}]'.format(
+ a=active,
+ sbs=', '.join(standbys)
+ )
+ ),
+ failure_group=failure_group,
+ weight=weight)
+ thrasher.start()
+ thrashers[active] = thrasher
+
+ # if thrash_weights isn't specified and we've reached max_thrash,
+ # we're done
+ if not 'thrash_weights' in config and len(thrashers) == max_thrashers:
+ break
+
+ try:
+ log.debug('Yielding')
+ yield
+ finally:
+ log.info('joining mds_thrashers')
+ for t in thrashers:
+ log.info('join thrasher for failure group [{fg}]'.format(fg=', '.join(failure_group)))
+ thrashers[t].stop()
+ thrashers[t].join()
+ log.info('done joining')
--- /dev/null
+instance-id: test
+local-hostname: test
--- /dev/null
+"""
+Handle clock skews in monitors.
+"""
+import logging
+import contextlib
+import ceph_manager
+import time
+import gevent
+from StringIO import StringIO
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+class ClockSkewCheck:
+ """
+ Periodically check if there are any clock skews among the monitors in the
+ quorum. By default, assume no skews are supposed to exist; that can be
+ changed using the 'expect-skew' option. If 'fail-on-skew' is set to false,
+ then we will always succeed and only report skews if any are found.
+
+ This class does not spawn a thread. It assumes that, if that is indeed
+ wanted, it should be done by a third party (for instance, the task using
+ this class). We intend it as such in order to reuse this class if need be.
+
+ This task accepts the following options:
+
+ interval amount of seconds to wait in-between checks. (default: 30.0)
+ max-skew maximum skew, in seconds, that is considered tolerable before
+ issuing a warning. (default: 0.05)
+ expect-skew 'true' or 'false', to indicate whether to expect a skew during
+ the run or not. If 'true', the test will fail if no skew is
+ found, and succeed if a skew is indeed found; if 'false', it's
+ the other way around. (default: false)
+ never-fail Don't fail the run if a skew is detected and we weren't
+ expecting it, or if no skew is detected and we were expecting
+ it. (default: False)
+
+ at-least-once Runs at least once, even if we are told to stop.
+ (default: True)
+ at-least-once-timeout If we were told to stop but we are attempting to
+ run at least once, timeout after this many seconds.
+ (default: 600)
+
+ Example:
+ Expect a skew higher than 0.05 seconds, but only report it without
+ failing the teuthology run.
+
+ - mon_clock_skew_check:
+ interval: 30
+ max-skew: 0.05
+ expect_skew: true
+ never-fail: true
+ """
+
+ def __init__(self, ctx, manager, config, logger):
+ self.ctx = ctx
+ self.manager = manager
+
+ self.stopping = False
+ self.logger = logger
+ self.config = config
+
+ if self.config is None:
+ self.config = dict()
+
+ self.check_interval = float(self.config.get('interval', 30.0))
+
+ first_mon = teuthology.get_first_mon(ctx, config)
+ remote = ctx.cluster.only(first_mon).remotes.keys()[0]
+ proc = remote.run(
+ args=[
+ 'sudo',
+ 'ceph-mon',
+ '-i', first_mon[4:],
+ '--show-config-value', 'mon_clock_drift_allowed'
+ ], stdout=StringIO(), wait=True
+ )
+ self.max_skew = self.config.get('max-skew', float(proc.stdout.getvalue()))
+
+ self.expect_skew = self.config.get('expect-skew', False)
+ self.never_fail = self.config.get('never-fail', False)
+ self.at_least_once = self.config.get('at-least-once', True)
+ self.at_least_once_timeout = self.config.get('at-least-once-timeout', 600.0)
+
+ def info(self, x):
+ """
+ locally define logger for info messages
+ """
+ self.logger.info(x)
+
+ def warn(self, x):
+ """
+ locally define logger for warnings
+ """
+ self.logger.warn(x)
+
+ def debug(self, x):
+ """
+ locally define logger for debug messages
+ """
+ self.logger.info(x)
+ self.logger.debug(x)
+
+ def finish(self):
+ """
+ Break out of the do_check loop.
+ """
+ self.stopping = True
+
+ def sleep_interval(self):
+ """
+ If a sleep interval is set, sleep for that amount of time.
+ """
+ if self.check_interval > 0.0:
+ self.debug('sleeping for {s} seconds'.format(
+ s=self.check_interval))
+ time.sleep(self.check_interval)
+
+ def print_skews(self, skews):
+ """
+ Display skew values.
+ """
+ total = len(skews)
+ if total > 0:
+ self.info('---------- found {n} skews ----------'.format(n=total))
+ for mon_id, values in skews.iteritems():
+ self.info('mon.{id}: {v}'.format(id=mon_id, v=values))
+ self.info('-------------------------------------')
+ else:
+ self.info('---------- no skews were found ----------')
+
+ def do_check(self):
+ """
+ Clock skew checker. Loops until finish() is called.
+ """
+ self.info('start checking for clock skews')
+ skews = dict()
+ ran_once = False
+
+ started_on = None
+
+ while not self.stopping or (self.at_least_once and not ran_once):
+
+ if self.at_least_once and not ran_once and self.stopping:
+ if started_on is None:
+ self.info('kicking-off timeout (if any)')
+ started_on = time.time()
+ elif self.at_least_once_timeout > 0.0:
+ assert time.time() - started_on < self.at_least_once_timeout, \
+ 'failed to obtain a timecheck before timeout expired'
+
+ quorum_size = len(teuthology.get_mon_names(self.ctx))
+ self.manager.wait_for_mon_quorum_size(quorum_size)
+
+ health = self.manager.get_mon_health(True)
+ timechecks = health['timechecks']
+
+ clean_check = False
+
+ if timechecks['round_status'] == 'finished':
+ assert (timechecks['round'] % 2) == 0, \
+ 'timecheck marked as finished but round ' \
+ 'disagrees (r {r})'.format(
+ r=timechecks['round'])
+ clean_check = True
+ else:
+ assert timechecks['round_status'] == 'on-going', \
+ 'timecheck status expected \'on-going\' ' \
+ 'but found \'{s}\' instead'.format(
+ s=timechecks['round_status'])
+ if 'mons' in timechecks.keys() and len(timechecks['mons']) > 1:
+ self.info('round still on-going, but there are available reports')
+ else:
+ self.info('no timechecks available just yet')
+ self.sleep_interval()
+ continue
+
+ assert len(timechecks['mons']) > 1, \
+ 'there are not enough reported timechecks; ' \
+ 'expected > 1 found {n}'.format(n=len(timechecks['mons']))
+
+ for check in timechecks['mons']:
+ mon_skew = float(check['skew'])
+ mon_health = check['health']
+ mon_id = check['name']
+ if abs(mon_skew) > self.max_skew:
+ assert mon_health == 'HEALTH_WARN', \
+ 'mon.{id} health is \'{health}\' but skew {s} > max {ms}'.format(
+ id=mon_id,health=mon_health,s=abs(mon_skew),ms=self.max_skew)
+
+ log_str = 'mon.{id} with skew {s} > max {ms}'.format(
+ id=mon_id,s=abs(mon_skew),ms=self.max_skew)
+
+ """ add to skew list """
+ details = check['details']
+ skews[mon_id] = {'skew': mon_skew, 'details': details}
+
+ if self.expect_skew:
+ self.info('expected skew: {str}'.format(str=log_str))
+ else:
+ self.warn('unexpected skew: {str}'.format(str=log_str))
+
+ if clean_check or (self.expect_skew and len(skews) > 0):
+ ran_once = True
+ self.print_skews(skews)
+ self.sleep_interval()
+
+ total = len(skews)
+ self.print_skews(skews)
+
+ error_str = ''
+ found_error = False
+
+ if self.expect_skew:
+ if total == 0:
+ error_str = 'We were expecting a skew, but none was found!'
+ found_error = True
+ else:
+ if total > 0:
+ error_str = 'We were not expecting a skew, but we did find it!'
+ found_error = True
+
+ if found_error:
+ self.info(error_str)
+ if not self.never_fail:
+ assert False, error_str
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Use clas ClockSkewCheck to check for clock skews on the monitors.
+ This task will spawn a thread running ClockSkewCheck's do_check().
+
+ All the configuration will be directly handled by ClockSkewCheck,
+ so please refer to the class documentation for further information.
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'mon_clock_skew_check task only accepts a dict for configuration'
+ log.info('Beginning mon_clock_skew_check...')
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ skew_check = ClockSkewCheck(ctx,
+ manager, config,
+ logger=log.getChild('mon_clock_skew_check'))
+ skew_check_thread = gevent.spawn(skew_check.do_check)
+ try:
+ yield
+ finally:
+ log.info('joining mon_clock_skew_check')
+ skew_check.finish()
+ skew_check_thread.get()
+
+
--- /dev/null
+"""
+Monitor recovery
+"""
+import logging
+import ceph_manager
+from teuthology import misc as teuthology
+
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Test monitor recovery.
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ mons = [f.split('.')[1] for f in teuthology.get_mon_names(ctx)]
+ log.info("mon ids = %s" % mons)
+
+ manager.wait_for_mon_quorum_size(len(mons))
+
+ log.info('verifying all monitors are in the quorum')
+ for m in mons:
+ s = manager.get_mon_status(m)
+ assert s['state'] == 'leader' or s['state'] == 'peon'
+ assert len(s['quorum']) == len(mons)
+
+ log.info('restarting each monitor in turn')
+ for m in mons:
+ # stop a monitor
+ manager.kill_mon(m)
+ manager.wait_for_mon_quorum_size(len(mons) - 1)
+
+ # restart
+ manager.revive_mon(m)
+ manager.wait_for_mon_quorum_size(len(mons))
+
+ # in forward and reverse order,
+ rmons = mons
+ rmons.reverse()
+ for mons in mons, rmons:
+ log.info('stopping all monitors')
+ for m in mons:
+ manager.kill_mon(m)
+
+ log.info('forming a minimal quorum for %s, then adding monitors' % mons)
+ qnum = (len(mons) / 2) + 1
+ num = 0
+ for m in mons:
+ manager.revive_mon(m)
+ num += 1
+ if num >= qnum:
+ manager.wait_for_mon_quorum_size(num)
+
+ # on both leader and non-leader ranks...
+ for rank in [0, 1]:
+ # take one out
+ log.info('removing mon %s' % mons[rank])
+ manager.kill_mon(mons[rank])
+ manager.wait_for_mon_quorum_size(len(mons) - 1)
+
+ log.info('causing some monitor log activity')
+ m = 30
+ for n in range(1, m):
+ manager.raw_cluster_cmd('log', '%d of %d' % (n, m))
+
+ log.info('adding mon %s back in' % mons[rank])
+ manager.revive_mon(mons[rank])
+ manager.wait_for_mon_quorum_size(len(mons))
--- /dev/null
+"""
+Monitor thrash
+"""
+import logging
+import contextlib
+import ceph_manager
+import random
+import time
+import gevent
+import json
+import math
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def _get_mons(ctx):
+ """
+ Get monitor names from the context value.
+ """
+ mons = [f[len('mon.'):] for f in teuthology.get_mon_names(ctx)]
+ return mons
+
+class MonitorThrasher:
+ """
+ How it works::
+
+ - pick a monitor
+ - kill it
+ - wait for quorum to be formed
+ - sleep for 'revive_delay' seconds
+ - revive monitor
+ - wait for quorum to be formed
+ - sleep for 'thrash_delay' seconds
+
+ Options::
+
+ seed Seed to use on the RNG to reproduce a previous
+ behaviour (default: None; i.e., not set)
+ revive_delay Number of seconds to wait before reviving
+ the monitor (default: 10)
+ thrash_delay Number of seconds to wait in-between
+ test iterations (default: 0)
+ thrash_store Thrash monitor store before killing the monitor being thrashed (default: False)
+ thrash_store_probability Probability of thrashing a monitor's store
+ (default: 50)
+ thrash_many Thrash multiple monitors instead of just one. If
+ 'maintain-quorum' is set to False, then we will
+ thrash up to as many monitors as there are
+ available. (default: False)
+ maintain_quorum Always maintain quorum, taking care on how many
+ monitors we kill during the thrashing. If we
+ happen to only have one or two monitors configured,
+ if this option is set to True, then we won't run
+ this task as we cannot guarantee maintenance of
+ quorum. Setting it to false however would allow the
+ task to run with as many as just one single monitor.
+ (default: True)
+ freeze_mon_probability: how often to freeze the mon instead of killing it,
+ in % (default: 0)
+ freeze_mon_duration: how many seconds to freeze the mon (default: 15)
+ scrub Scrub after each iteration (default: True)
+
+ Note: if 'store-thrash' is set to True, then 'maintain-quorum' must also
+ be set to True.
+
+ For example::
+
+ tasks:
+ - ceph:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ thrash_store: true
+ thrash_store_probability: 40
+ seed: 31337
+ maintain_quorum: true
+ thrash_many: true
+ - ceph-fuse:
+ - workunit:
+ clients:
+ all:
+ - mon/workloadgen.sh
+ """
+ def __init__(self, ctx, manager, config, logger):
+ self.ctx = ctx
+ self.manager = manager
+ self.manager.wait_for_clean()
+
+ self.stopping = False
+ self.logger = logger
+ self.config = config
+
+ if self.config is None:
+ self.config = dict()
+
+ """ Test reproducibility """
+ self.random_seed = self.config.get('seed', None)
+
+ if self.random_seed is None:
+ self.random_seed = int(time.time())
+
+ self.rng = random.Random()
+ self.rng.seed(int(self.random_seed))
+
+ """ Monitor thrashing """
+ self.revive_delay = float(self.config.get('revive_delay', 10.0))
+ self.thrash_delay = float(self.config.get('thrash_delay', 0.0))
+
+ self.thrash_many = self.config.get('thrash_many', False)
+ self.maintain_quorum = self.config.get('maintain_quorum', True)
+
+ self.scrub = self.config.get('scrub', True)
+
+ self.freeze_mon_probability = float(self.config.get('freeze_mon_probability', 10))
+ self.freeze_mon_duration = float(self.config.get('freeze_mon_duration', 15.0))
+
+ assert self.max_killable() > 0, \
+ 'Unable to kill at least one monitor with the current config.'
+
+ """ Store thrashing """
+ self.store_thrash = self.config.get('store_thrash', False)
+ self.store_thrash_probability = int(
+ self.config.get('store_thrash_probability', 50))
+ if self.store_thrash:
+ assert self.store_thrash_probability > 0, \
+ 'store_thrash is set, probability must be > 0'
+ assert self.maintain_quorum, \
+ 'store_thrash = true must imply maintain_quorum = true'
+
+ self.thread = gevent.spawn(self.do_thrash)
+
+ def log(self, x):
+ """
+ locally log info messages
+ """
+ self.logger.info(x)
+
+ def do_join(self):
+ """
+ Break out of this processes thrashing loop.
+ """
+ self.stopping = True
+ self.thread.get()
+
+ def should_thrash_store(self):
+ """
+ If allowed, indicate that we should thrash a certain percentage of
+ the time as determined by the store_thrash_probability value.
+ """
+ if not self.store_thrash:
+ return False
+ return self.rng.randrange(0, 101) < self.store_thrash_probability
+
+ def thrash_store(self, mon):
+ """
+ Thrash the monitor specified.
+ :param mon: monitor to thrash
+ """
+ addr = self.ctx.ceph.conf['mon.%s' % mon]['mon addr']
+ self.log('thrashing mon.{id}@{addr} store'.format(id=mon, addr=addr))
+ out = self.manager.raw_cluster_cmd('-m', addr, 'sync', 'force')
+ j = json.loads(out)
+ assert j['ret'] == 0, \
+ 'error forcing store sync on mon.{id}:\n{ret}'.format(
+ id=mon,ret=out)
+
+ def should_freeze_mon(self):
+ """
+ Indicate that we should freeze a certain percentago of the time
+ as determined by the freeze_mon_probability value.
+ """
+ return self.rng.randrange(0, 101) < self.freeze_mon_probability
+
+ def freeze_mon(self, mon):
+ """
+ Send STOP signal to freeze the monitor.
+ """
+ log.info('Sending STOP to mon %s', mon)
+ self.manager.signal_mon(mon, 19) # STOP
+
+ def unfreeze_mon(self, mon):
+ """
+ Send CONT signal to unfreeze the monitor.
+ """
+ log.info('Sending CONT to mon %s', mon)
+ self.manager.signal_mon(mon, 18) # CONT
+
+ def kill_mon(self, mon):
+ """
+ Kill the monitor specified
+ """
+ self.log('killing mon.{id}'.format(id=mon))
+ self.manager.kill_mon(mon)
+
+ def revive_mon(self, mon):
+ """
+ Revive the monitor specified
+ """
+ self.log('killing mon.{id}'.format(id=mon))
+ self.log('reviving mon.{id}'.format(id=mon))
+ self.manager.revive_mon(mon)
+
+ def max_killable(self):
+ """
+ Return the maximum number of monitors we can kill.
+ """
+ m = len(_get_mons(self.ctx))
+ if self.maintain_quorum:
+ return max(math.ceil(m/2.0)-1, 0)
+ else:
+ return m
+
+ def do_thrash(self):
+ """
+ Cotinuously loop and thrash the monitors.
+ """
+ self.log('start thrashing')
+ self.log('seed: {s}, revive delay: {r}, thrash delay: {t} '\
+ 'thrash many: {tm}, maintain quorum: {mq} '\
+ 'store thrash: {st}, probability: {stp} '\
+ 'freeze mon: prob {fp} duration {fd}'.format(
+ s=self.random_seed,r=self.revive_delay,t=self.thrash_delay,
+ tm=self.thrash_many, mq=self.maintain_quorum,
+ st=self.store_thrash,stp=self.store_thrash_probability,
+ fp=self.freeze_mon_probability,fd=self.freeze_mon_duration,
+ ))
+
+ while not self.stopping:
+ mons = _get_mons(self.ctx)
+ self.manager.wait_for_mon_quorum_size(len(mons))
+ self.log('making sure all monitors are in the quorum')
+ for m in mons:
+ s = self.manager.get_mon_status(m)
+ assert s['state'] == 'leader' or s['state'] == 'peon'
+ assert len(s['quorum']) == len(mons)
+
+ kill_up_to = self.rng.randrange(1, self.max_killable()+1)
+ mons_to_kill = self.rng.sample(mons, kill_up_to)
+ self.log('monitors to thrash: {m}'.format(m=mons_to_kill))
+
+ mons_to_freeze = []
+ for mon in mons:
+ if mon in mons_to_kill:
+ continue
+ if self.should_freeze_mon():
+ mons_to_freeze.append(mon)
+ self.log('monitors to freeze: {m}'.format(m=mons_to_freeze))
+
+ for mon in mons_to_kill:
+ self.log('thrashing mon.{m}'.format(m=mon))
+
+ """ we only thrash stores if we are maintaining quorum """
+ if self.should_thrash_store() and self.maintain_quorum:
+ self.thrash_store(mon)
+
+ self.kill_mon(mon)
+
+ if mons_to_freeze:
+ for mon in mons_to_freeze:
+ self.freeze_mon(mon)
+ self.log('waiting for {delay} secs to unfreeze mons'.format(
+ delay=self.freeze_mon_duration))
+ time.sleep(self.freeze_mon_duration)
+ for mon in mons_to_freeze:
+ self.unfreeze_mon(mon)
+
+ if self.maintain_quorum:
+ self.manager.wait_for_mon_quorum_size(len(mons)-len(mons_to_kill))
+ for m in mons:
+ if m in mons_to_kill:
+ continue
+ s = self.manager.get_mon_status(m)
+ assert s['state'] == 'leader' or s['state'] == 'peon'
+ assert len(s['quorum']) == len(mons)-len(mons_to_kill)
+
+ self.log('waiting for {delay} secs before reviving monitors'.format(
+ delay=self.revive_delay))
+ time.sleep(self.revive_delay)
+
+ for mon in mons_to_kill:
+ self.revive_mon(mon)
+ # do more freezes
+ if mons_to_freeze:
+ for mon in mons_to_freeze:
+ self.freeze_mon(mon)
+ self.log('waiting for {delay} secs to unfreeze mons'.format(
+ delay=self.freeze_mon_duration))
+ time.sleep(self.freeze_mon_duration)
+ for mon in mons_to_freeze:
+ self.unfreeze_mon(mon)
+
+ self.manager.wait_for_mon_quorum_size(len(mons))
+ for m in mons:
+ s = self.manager.get_mon_status(m)
+ assert s['state'] == 'leader' or s['state'] == 'peon'
+ assert len(s['quorum']) == len(mons)
+
+ if self.scrub:
+ self.log('triggering scrub')
+ try:
+ self.manager.raw_cluster_cmd('scrub')
+ except Exception:
+ log.exception("Saw exception while triggering scrub")
+
+ if self.thrash_delay > 0.0:
+ self.log('waiting for {delay} secs before continuing thrashing'.format(
+ delay=self.thrash_delay))
+ time.sleep(self.thrash_delay)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Stress test the monitor by thrashing them while another task/workunit
+ is running.
+
+ Please refer to MonitorThrasher class for further information on the
+ available options.
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'mon_thrash task only accepts a dict for configuration'
+ assert len(_get_mons(ctx)) > 2, \
+ 'mon_thrash task requires at least 3 monitors'
+ log.info('Beginning mon_thrash...')
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+ thrash_proc = MonitorThrasher(ctx,
+ manager, config,
+ logger=log.getChild('mon_thrasher'))
+ try:
+ log.debug('Yielding')
+ yield
+ finally:
+ log.info('joining mon_thrasher')
+ thrash_proc.do_join()
+ mons = _get_mons(ctx)
+ manager.wait_for_mon_quorum_size(len(mons))
--- /dev/null
+"""
+Multibench testing
+"""
+import contextlib
+import logging
+import radosbench
+import time
+import copy
+import gevent
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run multibench
+
+ The config should be as follows:
+
+ multibench:
+ time: <seconds to run total>
+ segments: <number of concurrent benches>
+ radosbench: <config for radosbench>
+
+ example:
+
+ tasks:
+ - ceph:
+ - multibench:
+ clients: [client.0]
+ time: 360
+ - interactive:
+ """
+ log.info('Beginning multibench...')
+ assert isinstance(config, dict), \
+ "please list clients to run on"
+
+ def run_one(num):
+ """Run test spawn from gevent"""
+ start = time.time()
+ benchcontext = copy.copy(config.get('radosbench'))
+ iterations = 0
+ while time.time() - start < int(config.get('time', 600)):
+ log.info("Starting iteration %s of segment %s"%(iterations, num))
+ benchcontext['pool'] = str(num) + "-" + str(iterations)
+ with radosbench.task(ctx, benchcontext):
+ time.sleep()
+ iterations += 1
+ log.info("Starting %s threads"%(str(config.get('segments', 3)),))
+ segments = [
+ gevent.spawn(run_one, i)
+ for i in range(0, int(config.get('segments', 3)))]
+
+ try:
+ yield
+ finally:
+ [i.get() for i in segments]
--- /dev/null
+"""
+Test Object locations going down
+"""
+import logging
+import ceph_manager
+from teuthology import misc as teuthology
+from util.rados import rados
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Test handling of object location going down
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'lost_unfound task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ while len(manager.get_osd_status()['up']) < 3:
+ manager.sleep(10)
+ manager.wait_for_clean()
+
+ # something that is always there
+ dummyfile = '/etc/fstab'
+
+ # take 0, 1 out
+ manager.mark_out_osd(0)
+ manager.mark_out_osd(1)
+ manager.wait_for_clean()
+
+ # delay recovery, and make the pg log very long (to prevent backfill)
+ manager.raw_cluster_cmd(
+ 'tell', 'osd.0',
+ 'injectargs',
+ '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
+ )
+ # delay recovery, and make the pg log very long (to prevent backfill)
+ manager.raw_cluster_cmd(
+ 'tell', 'osd.1',
+ 'injectargs',
+ '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
+ )
+ # delay recovery, and make the pg log very long (to prevent backfill)
+ manager.raw_cluster_cmd(
+ 'tell', 'osd.2',
+ 'injectargs',
+ '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
+ )
+ # delay recovery, and make the pg log very long (to prevent backfill)
+ manager.raw_cluster_cmd(
+ 'tell', 'osd.3',
+ 'injectargs',
+ '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
+ )
+
+ # kludge to make sure they get a map
+ rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
+
+ # create old objects
+ for f in range(1, 10):
+ rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
+
+ manager.mark_out_osd(3)
+ manager.wait_till_active()
+
+ manager.mark_in_osd(0)
+ manager.wait_till_active()
+
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+
+ manager.mark_out_osd(2)
+ manager.wait_till_active()
+
+ # bring up 1
+ manager.mark_in_osd(1)
+ manager.wait_till_active()
+
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ log.info("Getting unfound objects")
+ unfound = manager.get_num_unfound_objects()
+ assert not unfound
+
+ manager.kill_osd(2)
+ manager.mark_down_osd(2)
+ manager.kill_osd(3)
+ manager.mark_down_osd(3)
+
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ log.info("Getting unfound objects")
+ unfound = manager.get_num_unfound_objects()
+ assert unfound
--- /dev/null
+"""
+Run omapbench executable within teuthology
+"""
+import contextlib
+import logging
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run omapbench
+
+ The config should be as follows::
+
+ omapbench:
+ clients: [client list]
+ threads: <threads at once>
+ objects: <number of objects to write>
+ entries: <number of entries per object map>
+ keysize: <number of characters per object map key>
+ valsize: <number of characters per object map val>
+ increment: <interval to show in histogram (in ms)>
+ omaptype: <how the omaps should be generated>
+
+ example::
+
+ tasks:
+ - ceph:
+ - omapbench:
+ clients: [client.0]
+ threads: 30
+ objects: 1000
+ entries: 10
+ keysize: 10
+ valsize: 100
+ increment: 100
+ omaptype: uniform
+ - interactive:
+ """
+ log.info('Beginning omapbench...')
+ assert isinstance(config, dict), \
+ "please list clients to run on"
+ omapbench = {}
+ testdir = teuthology.get_testdir(ctx)
+ print(str(config.get('increment',-1)))
+ for role in config.get('clients', ['client.0']):
+ assert isinstance(role, basestring)
+ PREFIX = 'client.'
+ assert role.startswith(PREFIX)
+ id_ = role[len(PREFIX):]
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ proc = remote.run(
+ args=[
+ "/bin/sh", "-c",
+ " ".join(['adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage',
+ 'omapbench',
+ '--name', role[len(PREFIX):],
+ '-t', str(config.get('threads', 30)),
+ '-o', str(config.get('objects', 1000)),
+ '--entries', str(config.get('entries',10)),
+ '--keysize', str(config.get('keysize',10)),
+ '--valsize', str(config.get('valsize',1000)),
+ '--inc', str(config.get('increment',10)),
+ '--omaptype', str(config.get('omaptype','uniform'))
+ ]).format(tdir=testdir),
+ ],
+ logger=log.getChild('omapbench.{id}'.format(id=id_)),
+ stdin=run.PIPE,
+ wait=False
+ )
+ omapbench[id_] = proc
+
+ try:
+ yield
+ finally:
+ log.info('joining omapbench')
+ run.wait(omapbench.itervalues())
--- /dev/null
+"""
+Osd backfill test
+"""
+import logging
+import ceph_manager
+import time
+from teuthology import misc as teuthology
+
+
+log = logging.getLogger(__name__)
+
+
+def rados_start(ctx, remote, cmd):
+ """
+ Run a remote rados command (currently used to only write data)
+ """
+ log.info("rados %s" % ' '.join(cmd))
+ testdir = teuthology.get_testdir(ctx)
+ pre = [
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'rados',
+ ];
+ pre.extend(cmd)
+ proc = remote.run(
+ args=pre,
+ wait=False,
+ )
+ return proc
+
+def task(ctx, config):
+ """
+ Test backfill
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'thrashosds task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+ log.info('num_osds is %s' % num_osds)
+ assert num_osds == 3
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ while len(manager.get_osd_status()['up']) < 3:
+ manager.sleep(10)
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_clean()
+
+ # write some data
+ p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096',
+ '--no-cleanup'])
+ err = p.wait();
+ log.info('err is %d' % err)
+
+ # mark osd.0 out to trigger a rebalance/backfill
+ manager.mark_out_osd(0)
+
+ # also mark it down to it won't be included in pg_temps
+ manager.kill_osd(0)
+ manager.mark_down_osd(0)
+
+ # wait for everything to peer and be happy...
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_recovery()
+
+ # write some new data
+ p = rados_start(ctx, mon, ['-p', 'data', 'bench', '30', 'write', '-b', '4096',
+ '--no-cleanup'])
+
+ time.sleep(15)
+
+ # blackhole + restart osd.1
+ # this triggers a divergent backfill target
+ manager.blackhole_kill_osd(1)
+ time.sleep(2)
+ manager.revive_osd(1)
+
+ # wait for our writes to complete + succeed
+ err = p.wait()
+ log.info('err is %d' % err)
+
+ # cluster must recover
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_recovery()
+
+ # re-add osd.0
+ manager.revive_osd(0)
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_clean()
+
+
--- /dev/null
+"""
+Handle osdfailsafe configuration settings (nearfull ratio and full ratio)
+"""
+from cStringIO import StringIO
+import logging
+import time
+
+import ceph_manager
+from teuthology.orchestra import run
+from util.rados import rados
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Test handling of osd_failsafe_nearfull_ratio and osd_failsafe_full_ratio
+ configuration settings
+
+ In order for test to pass must use log-whitelist as follows
+
+ tasks:
+ - chef:
+ - install:
+ - ceph:
+ log-whitelist: ['OSD near full', 'OSD full dropping all updates']
+ - osd_failsafe_enospc:
+
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'osd_failsafe_enospc task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+ ctx.manager = manager
+
+ # Give 2 seconds for injectargs + osd_op_complaint_time (30) + 2 * osd_heartbeat_interval (6) + 6 padding
+ sleep_time = 50
+
+ # something that is always there
+ dummyfile = '/etc/fstab'
+ dummyfile2 = '/etc/resolv.conf'
+
+ # create 1 pg pool with 1 rep which can only be on osd.0
+ osds = manager.get_osd_dump()
+ for osd in osds:
+ if osd['osd'] != 0:
+ manager.mark_out_osd(osd['osd'])
+
+ log.info('creating pool foo')
+ manager.create_pool("foo")
+ manager.raw_cluster_cmd('osd', 'pool', 'set', 'foo', 'size', '1')
+
+ # State NONE -> NEAR
+ log.info('1. Verify warning messages when exceeding nearfull_ratio')
+
+ proc = mon.run(
+ args=[
+ 'daemon-helper',
+ 'kill',
+ 'ceph', '-w'
+ ],
+ stdin=run.PIPE,
+ stdout=StringIO(),
+ wait=False,
+ )
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .00001')
+
+ time.sleep(sleep_time)
+ proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
+ proc.wait()
+
+ lines = proc.stdout.getvalue().split('\n')
+
+ count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
+ assert count == 2, 'Incorrect number of warning messages expected 2 got %d' % count
+ count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
+ assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
+
+ # State NEAR -> FULL
+ log.info('2. Verify error messages when exceeding full_ratio')
+
+ proc = mon.run(
+ args=[
+ 'daemon-helper',
+ 'kill',
+ 'ceph', '-w'
+ ],
+ stdin=run.PIPE,
+ stdout=StringIO(),
+ wait=False,
+ )
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001')
+
+ time.sleep(sleep_time)
+ proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
+ proc.wait()
+
+ lines = proc.stdout.getvalue().split('\n')
+
+ count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
+ assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count
+
+ log.info('3. Verify write failure when exceeding full_ratio')
+
+ # Write data should fail
+ ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile1', dummyfile])
+ assert ret != 0, 'Expected write failure but it succeeded with exit status 0'
+
+ # Put back default
+ manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97')
+ time.sleep(10)
+
+ # State FULL -> NEAR
+ log.info('4. Verify write success when NOT exceeding full_ratio')
+
+ # Write should succeed
+ ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile2', dummyfile2])
+ assert ret == 0, 'Expected write to succeed, but got exit status %d' % ret
+
+ log.info('5. Verify warning messages again when exceeding nearfull_ratio')
+
+ proc = mon.run(
+ args=[
+ 'daemon-helper',
+ 'kill',
+ 'ceph', '-w'
+ ],
+ stdin=run.PIPE,
+ stdout=StringIO(),
+ wait=False,
+ )
+
+ time.sleep(sleep_time)
+ proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
+ proc.wait()
+
+ lines = proc.stdout.getvalue().split('\n')
+
+ count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
+ assert count == 1 or count == 2, 'Incorrect number of warning messages expected 1 or 2 got %d' % count
+ count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
+ assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .90')
+ time.sleep(10)
+
+ # State NONE -> FULL
+ log.info('6. Verify error messages again when exceeding full_ratio')
+
+ proc = mon.run(
+ args=[
+ 'daemon-helper',
+ 'kill',
+ 'ceph', '-w'
+ ],
+ stdin=run.PIPE,
+ stdout=StringIO(),
+ wait=False,
+ )
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001')
+
+ time.sleep(sleep_time)
+ proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
+ proc.wait()
+
+ lines = proc.stdout.getvalue().split('\n')
+
+ count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
+ assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count
+ count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
+ assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count
+
+ # State FULL -> NONE
+ log.info('7. Verify no messages settings back to default')
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97')
+ time.sleep(10)
+
+ proc = mon.run(
+ args=[
+ 'daemon-helper',
+ 'kill',
+ 'ceph', '-w'
+ ],
+ stdin=run.PIPE,
+ stdout=StringIO(),
+ wait=False,
+ )
+
+ time.sleep(sleep_time)
+ proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
+ proc.wait()
+
+ lines = proc.stdout.getvalue().split('\n')
+
+ count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
+ assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count
+ count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
+ assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
+
+ log.info('Test Passed')
+
+ # Bring all OSDs back in
+ manager.remove_pool("foo")
+ for osd in osds:
+ if osd['osd'] != 0:
+ manager.mark_in_osd(osd['osd'])
--- /dev/null
+"""
+osd recovery
+"""
+import logging
+import ceph_manager
+import time
+from teuthology import misc as teuthology
+
+
+log = logging.getLogger(__name__)
+
+
+def rados_start(testdir, remote, cmd):
+ """
+ Run a remote rados command (currently used to only write data)
+ """
+ log.info("rados %s" % ' '.join(cmd))
+ pre = [
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'rados',
+ ];
+ pre.extend(cmd)
+ proc = remote.run(
+ args=pre,
+ wait=False,
+ )
+ return proc
+
+def task(ctx, config):
+ """
+ Test (non-backfill) recovery
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'task only accepts a dict for configuration'
+ testdir = teuthology.get_testdir(ctx)
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+ log.info('num_osds is %s' % num_osds)
+ assert num_osds == 3
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ while len(manager.get_osd_status()['up']) < 3:
+ manager.sleep(10)
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_clean()
+
+ # test some osdmap flags
+ manager.raw_cluster_cmd('osd', 'set', 'noin')
+ manager.raw_cluster_cmd('osd', 'set', 'noout')
+ manager.raw_cluster_cmd('osd', 'set', 'noup')
+ manager.raw_cluster_cmd('osd', 'set', 'nodown')
+ manager.raw_cluster_cmd('osd', 'unset', 'noin')
+ manager.raw_cluster_cmd('osd', 'unset', 'noout')
+ manager.raw_cluster_cmd('osd', 'unset', 'noup')
+ manager.raw_cluster_cmd('osd', 'unset', 'nodown')
+
+ # write some new data
+ p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '60', 'write', '-b', '4096',
+ '--no-cleanup'])
+
+ time.sleep(15)
+
+ # trigger a divergent target:
+ # blackhole + restart osd.1 (shorter log)
+ manager.blackhole_kill_osd(1)
+ # kill osd.2 (longer log... we'll make it divergent below)
+ manager.kill_osd(2)
+ time.sleep(2)
+ manager.revive_osd(1)
+
+ # wait for our writes to complete + succeed
+ err = p.wait()
+ log.info('err is %d' % err)
+
+ # cluster must repeer
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.wait_for_active_or_down()
+
+ # write some more (make sure osd.2 really is divergent)
+ p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096'])
+ p.wait();
+
+ # revive divergent osd
+ manager.revive_osd(2)
+
+ while len(manager.get_osd_status()['up']) < 3:
+ log.info('waiting a bit...')
+ time.sleep(2)
+ log.info('3 are up!')
+
+ # cluster must recover
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_clean()
+
+
+def test_incomplete_pgs(ctx, config):
+ """
+ Test handling of incomplete pgs. Requires 4 osds.
+ """
+ testdir = teuthology.get_testdir(ctx)
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+ log.info('num_osds is %s' % num_osds)
+ assert num_osds == 4
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ while len(manager.get_osd_status()['up']) < 4:
+ time.sleep(10)
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+ manager.wait_for_clean()
+
+ log.info('Testing incomplete pgs...')
+
+ for i in range(4):
+ manager.set_config(
+ i,
+ osd_recovery_delay_start=1000)
+
+ # move data off of osd.0, osd.1
+ manager.raw_cluster_cmd('osd', 'out', '0', '1')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+ manager.wait_for_clean()
+
+ # lots of objects in rbd (no pg log, will backfill)
+ p = rados_start(testdir, mon,
+ ['-p', 'rbd', 'bench', '60', 'write', '-b', '1',
+ '--no-cleanup'])
+ p.wait()
+
+ # few objects in metadata pool (with pg log, normal recovery)
+ for f in range(1, 20):
+ p = rados_start(testdir, mon, ['-p', 'metadata', 'put',
+ 'foo.%d' % f, '/etc/passwd'])
+ p.wait()
+
+ # move it back
+ manager.raw_cluster_cmd('osd', 'in', '0', '1')
+ manager.raw_cluster_cmd('osd', 'out', '2', '3')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
+ manager.wait_for_active()
+
+ assert not manager.is_clean()
+ assert not manager.is_recovered()
+
+ # kill 2 + 3
+ log.info('stopping 2,3')
+ manager.kill_osd(2)
+ manager.kill_osd(3)
+ log.info('...')
+ manager.raw_cluster_cmd('osd', 'down', '2', '3')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.wait_for_active_or_down()
+
+ assert manager.get_num_down() > 0
+
+ # revive 2 + 3
+ manager.revive_osd(2)
+ manager.revive_osd(3)
+ while len(manager.get_osd_status()['up']) < 4:
+ log.info('waiting a bit...')
+ time.sleep(2)
+ log.info('all are up!')
+
+ for i in range(4):
+ manager.kick_recovery_wq(i)
+
+ # cluster must recover
+ manager.wait_for_clean()
--- /dev/null
+"""
+Peer test (Single test, not much configurable here)
+"""
+import logging
+import json
+
+import ceph_manager
+from teuthology import misc as teuthology
+from util.rados import rados
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Test peering.
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'peer task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ while len(manager.get_osd_status()['up']) < 3:
+ manager.sleep(10)
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_clean()
+
+ for i in range(3):
+ manager.set_config(
+ i,
+ osd_recovery_delay_start=120)
+
+ # take on osd down
+ manager.kill_osd(2)
+ manager.mark_down_osd(2)
+
+ # kludge to make sure they get a map
+ rados(ctx, mon, ['-p', 'data', 'get', 'dummy', '-'])
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.wait_for_recovery()
+
+ # kill another and revive 2, so that some pgs can't peer.
+ manager.kill_osd(1)
+ manager.mark_down_osd(1)
+ manager.revive_osd(2)
+ manager.wait_till_osd_is_up(2)
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+
+ manager.wait_for_active_or_down()
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+
+ # look for down pgs
+ num_down_pgs = 0
+ pgs = manager.get_pg_stats()
+ for pg in pgs:
+ out = manager.raw_cluster_cmd('pg', pg['pgid'], 'query')
+ log.debug("out string %s",out)
+ j = json.loads(out)
+ log.info("pg is %s, query json is %s", pg, j)
+
+ if pg['state'].count('down'):
+ num_down_pgs += 1
+ # verify that it is blocked on osd.1
+ rs = j['recovery_state']
+ assert len(rs) > 0
+ assert rs[0]['name'] == 'Started/Primary/Peering/GetInfo'
+ assert rs[1]['name'] == 'Started/Primary/Peering'
+ assert rs[1]['blocked']
+ assert rs[1]['down_osds_we_would_probe'] == [1]
+ assert len(rs[1]['peering_blocked_by']) == 1
+ assert rs[1]['peering_blocked_by'][0]['osd'] == 1
+
+ assert num_down_pgs > 0
+
+ # bring it all back
+ manager.revive_osd(1)
+ manager.wait_till_osd_is_up(1)
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_clean()
--- /dev/null
+"""
+Remotely run peering tests.
+"""
+import logging
+import time
+from teuthology import misc as teuthology
+import ceph_manager
+
+log = logging.getLogger(__name__)
+
+from args import argify
+
+POOLNAME = "POOLNAME"
+ARGS = [
+ ('num_pgs', 'number of pgs to create', 256, int),
+ ('max_time', 'seconds to complete peering', 0, int),
+ ('runs', 'trials to run', 10, int),
+ ('num_objects', 'objects to create', 256 * 1024, int),
+ ('object_size', 'size in bytes for objects', 64, int),
+ ('creation_time_limit', 'time limit for pool population', 60*60, int),
+ ('create_threads', 'concurrent writes for create', 256, int)
+ ]
+
+def setup(ctx, config):
+ """
+ Setup peering test on remotes.
+ """
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ ctx.manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+ ctx.manager.clear_pools()
+ ctx.manager.create_pool(POOLNAME, config.num_pgs)
+ log.info("populating pool")
+ ctx.manager.rados_write_objects(
+ POOLNAME,
+ config.num_objects,
+ config.object_size,
+ config.creation_time_limit,
+ config.create_threads)
+ log.info("done populating pool")
+
+def do_run(ctx, config):
+ """
+ Perform the test.
+ """
+ start = time.time()
+ # mark in osd
+ ctx.manager.mark_in_osd(0)
+ log.info("writing out objects")
+ ctx.manager.rados_write_objects(
+ POOLNAME,
+ config.num_pgs, # write 1 object per pg or so
+ 1,
+ config.creation_time_limit,
+ config.num_pgs, # lots of concurrency
+ cleanup = True)
+ peering_end = time.time()
+
+ log.info("peering done, waiting on recovery")
+ ctx.manager.wait_for_clean()
+
+ log.info("recovery done")
+ recovery_end = time.time()
+ if config.max_time:
+ assert(peering_end - start < config.max_time)
+ ctx.manager.mark_out_osd(0)
+ ctx.manager.wait_for_clean()
+ return {
+ 'time_to_active': peering_end - start,
+ 'time_to_clean': recovery_end - start
+ }
+
+@argify("peering_speed_test", ARGS)
+def task(ctx, config):
+ """
+ Peering speed test
+ """
+ setup(ctx, config)
+ ctx.manager.mark_out_osd(0)
+ ctx.manager.wait_for_clean()
+ ret = []
+ for i in range(config.runs):
+ log.info("Run {i}".format(i = i))
+ ret.append(do_run(ctx, config))
+
+ ctx.manager.mark_in_osd(0)
+ ctx.summary['recovery_times'] = {
+ 'runs': ret
+ }
--- /dev/null
+"""
+Qemu task
+"""
+from cStringIO import StringIO
+
+import contextlib
+import logging
+import os
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from tasks import rbd
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+DEFAULT_NUM_RBD = 1
+DEFAULT_IMAGE_URL = 'http://ceph.com/qa/ubuntu-12.04.qcow2'
+DEFAULT_MEM = 4096 # in megabytes
+
+@contextlib.contextmanager
+def create_dirs(ctx, config):
+ """
+ Handle directory creation and cleanup
+ """
+ testdir = teuthology.get_testdir(ctx)
+ for client, client_config in config.iteritems():
+ assert 'test' in client_config, 'You must specify a test to run'
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote.run(
+ args=[
+ 'install', '-d', '-m0755', '--',
+ '{tdir}/qemu'.format(tdir=testdir),
+ '{tdir}/archive/qemu'.format(tdir=testdir),
+ ]
+ )
+ try:
+ yield
+ finally:
+ for client, client_config in config.iteritems():
+ assert 'test' in client_config, 'You must specify a test to run'
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote.run(
+ args=[
+ 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true',
+ ]
+ )
+
+@contextlib.contextmanager
+def generate_iso(ctx, config):
+ """Execute system commands to generate iso"""
+ log.info('generating iso...')
+ testdir = teuthology.get_testdir(ctx)
+ for client, client_config in config.iteritems():
+ assert 'test' in client_config, 'You must specify a test to run'
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ src_dir = os.path.dirname(__file__)
+ userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
+ metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
+
+ with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
+ test_setup = ''.join(f.readlines())
+ # configuring the commands to setup the nfs mount
+ mnt_dir = "/export/{client}".format(client=client)
+ test_setup = test_setup.format(
+ mnt_dir=mnt_dir
+ )
+
+ with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
+ test_teardown = ''.join(f.readlines())
+
+ user_data = test_setup
+ if client_config.get('type', 'filesystem') == 'filesystem':
+ for i in xrange(0, client_config.get('num_rbd', DEFAULT_NUM_RBD)):
+ dev_letter = chr(ord('b') + i)
+ user_data += """
+- |
+ #!/bin/bash
+ mkdir /mnt/test_{dev_letter}
+ mkfs -t xfs /dev/vd{dev_letter}
+ mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
+""".format(dev_letter=dev_letter)
+
+ # this may change later to pass the directories as args to the
+ # script or something. xfstests needs that.
+ user_data += """
+- |
+ #!/bin/bash
+ test -d /mnt/test_b && cd /mnt/test_b
+ /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
+""" + test_teardown
+
+ teuthology.write_file(remote, userdata_path, StringIO(user_data))
+
+ with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
+ teuthology.write_file(remote, metadata_path, f)
+
+ test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
+ remote.run(
+ args=[
+ 'wget', '-nv', '-O', test_file,
+ client_config['test'],
+ run.Raw('&&'),
+ 'chmod', '755', test_file,
+ ],
+ )
+ remote.run(
+ args=[
+ 'genisoimage', '-quiet', '-input-charset', 'utf-8',
+ '-volid', 'cidata', '-joliet', '-rock',
+ '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
+ '-graft-points',
+ 'user-data={userdata}'.format(userdata=userdata_path),
+ 'meta-data={metadata}'.format(metadata=metadata_path),
+ 'test.sh={file}'.format(file=test_file),
+ ],
+ )
+ try:
+ yield
+ finally:
+ for client in config.iterkeys():
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote.run(
+ args=[
+ 'rm', '-f',
+ '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
+ os.path.join(testdir, 'qemu', 'userdata.' + client),
+ os.path.join(testdir, 'qemu', 'metadata.' + client),
+ '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
+ ],
+ )
+
+@contextlib.contextmanager
+def download_image(ctx, config):
+ """Downland base image, remove image file when done"""
+ log.info('downloading base image')
+ testdir = teuthology.get_testdir(ctx)
+ for client, client_config in config.iteritems():
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
+ remote.run(
+ args=[
+ 'wget', '-nv', '-O', base_file, DEFAULT_IMAGE_URL,
+ ]
+ )
+ try:
+ yield
+ finally:
+ log.debug('cleaning up base image files')
+ for client in config.iterkeys():
+ base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
+ tdir=testdir,
+ client=client,
+ )
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote.run(
+ args=[
+ 'rm', '-f', base_file,
+ ],
+ )
+
+
+def _setup_nfs_mount(remote, client, mount_dir):
+ """
+ Sets up an nfs mount on the remote that the guest can use to
+ store logs. This nfs mount is also used to touch a file
+ at the end of the test to indiciate if the test was successful
+ or not.
+ """
+ export_dir = "/export/{client}".format(client=client)
+ log.info("Creating the nfs export directory...")
+ remote.run(args=[
+ 'sudo', 'mkdir', '-p', export_dir,
+ ])
+ log.info("Mounting the test directory...")
+ remote.run(args=[
+ 'sudo', 'mount', '--bind', mount_dir, export_dir,
+ ])
+ log.info("Adding mount to /etc/exports...")
+ export = "{dir} *(rw,no_root_squash,no_subtree_check,insecure)".format(
+ dir=export_dir
+ )
+ remote.run(args=[
+ 'echo', export, run.Raw("|"),
+ 'sudo', 'tee', '-a', "/etc/exports",
+ ])
+ log.info("Restarting NFS...")
+ if remote.os.package_type == "deb":
+ remote.run(args=['sudo', 'service', 'nfs-kernel-server', 'restart'])
+ else:
+ remote.run(args=['sudo', 'systemctl', 'restart', 'nfs'])
+
+
+def _teardown_nfs_mount(remote, client):
+ """
+ Tears down the nfs mount on the remote used for logging and reporting the
+ status of the tests being ran in the guest.
+ """
+ log.info("Tearing down the nfs mount for {remote}".format(remote=remote))
+ export_dir = "/export/{client}".format(client=client)
+ log.info("Stopping NFS...")
+ if remote.os.package_type == "deb":
+ remote.run(args=[
+ 'sudo', 'service', 'nfs-kernel-server', 'stop'
+ ])
+ else:
+ remote.run(args=[
+ 'sudo', 'systemctl', 'stop', 'nfs'
+ ])
+ log.info("Unmounting exported directory...")
+ remote.run(args=[
+ 'sudo', 'umount', export_dir
+ ])
+ log.info("Deleting exported directory...")
+ remote.run(args=[
+ 'sudo', 'rm', '-r', '/export'
+ ])
+ log.info("Deleting export from /etc/exports...")
+ remote.run(args=[
+ 'sudo', 'sed', '-i', '$ d', '/etc/exports'
+ ])
+ log.info("Starting NFS...")
+ if remote.os.package_type == "deb":
+ remote.run(args=[
+ 'sudo', 'service', 'nfs-kernel-server', 'start'
+ ])
+ else:
+ remote.run(args=[
+ 'sudo', 'systemctl', 'start', 'nfs'
+ ])
+
+
+@contextlib.contextmanager
+def run_qemu(ctx, config):
+ """Setup kvm environment and start qemu"""
+ procs = []
+ testdir = teuthology.get_testdir(ctx)
+ for client, client_config in config.iteritems():
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client)
+ remote.run(
+ args=[
+ 'mkdir', log_dir, run.Raw('&&'),
+ 'sudo', 'modprobe', 'kvm',
+ ]
+ )
+
+ # make an nfs mount to use for logging and to
+ # allow to test to tell teuthology the tests outcome
+ _setup_nfs_mount(remote, client, log_dir)
+
+ base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
+ tdir=testdir,
+ client=client
+ )
+ qemu_cmd = 'qemu-system-x86_64'
+ if remote.os.package_type == "rpm":
+ qemu_cmd = "/usr/libexec/qemu-kvm"
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'daemon-helper',
+ 'term',
+ qemu_cmd, '-enable-kvm', '-nographic',
+ '-m', str(client_config.get('memory', DEFAULT_MEM)),
+ # base OS device
+ '-drive',
+ 'file={base},format=qcow2,if=virtio'.format(base=base_file),
+ # cd holding metadata for cloud-init
+ '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
+ ]
+
+ cachemode = 'none'
+ ceph_config = ctx.ceph.conf.get('global', {})
+ ceph_config.update(ctx.ceph.conf.get('client', {}))
+ ceph_config.update(ctx.ceph.conf.get(client, {}))
+ if ceph_config.get('rbd cache'):
+ if ceph_config.get('rbd cache max dirty', 1) > 0:
+ cachemode = 'writeback'
+ else:
+ cachemode = 'writethrough'
+
+ for i in xrange(client_config.get('num_rbd', DEFAULT_NUM_RBD)):
+ args.extend([
+ '-drive',
+ 'file=rbd:rbd/{img}:id={id},format=raw,if=virtio,cache={cachemode}'.format(
+ img='{client}.{num}'.format(client=client, num=i),
+ id=client[len('client.'):],
+ cachemode=cachemode,
+ ),
+ ])
+
+ log.info('starting qemu...')
+ procs.append(
+ remote.run(
+ args=args,
+ logger=log.getChild(client),
+ stdin=run.PIPE,
+ wait=False,
+ )
+ )
+
+ try:
+ yield
+ finally:
+ log.info('waiting for qemu tests to finish...')
+ run.wait(procs)
+
+ log.debug('checking that qemu tests succeeded...')
+ for client in config.iterkeys():
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ # teardown nfs mount
+ _teardown_nfs_mount(remote, client)
+ # check for test status
+ remote.run(
+ args=[
+ 'test', '-f',
+ '{tdir}/archive/qemu/{client}/success'.format(
+ tdir=testdir,
+ client=client
+ ),
+ ],
+ )
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run a test inside of QEMU on top of rbd. Only one test
+ is supported per client.
+
+ For example, you can specify which clients to run on::
+
+ tasks:
+ - ceph:
+ - qemu:
+ client.0:
+ test: http://ceph.com/qa/test.sh
+ client.1:
+ test: http://ceph.com/qa/test2.sh
+
+ Or use the same settings on all clients:
+
+ tasks:
+ - ceph:
+ - qemu:
+ all:
+ test: http://ceph.com/qa/test.sh
+
+ For tests that don't need a filesystem, set type to block::
+
+ tasks:
+ - ceph:
+ - qemu:
+ client.0:
+ test: http://ceph.com/qa/test.sh
+ type: block
+
+ The test should be configured to run on /dev/vdb and later
+ devices.
+
+ If you want to run a test that uses more than one rbd image,
+ specify how many images to use::
+
+ tasks:
+ - ceph:
+ - qemu:
+ client.0:
+ test: http://ceph.com/qa/test.sh
+ type: block
+ num_rbd: 2
+
+ You can set the amount of memory the VM has (default is 1024 MB)::
+
+ tasks:
+ - ceph:
+ - qemu:
+ client.0:
+ test: http://ceph.com/qa/test.sh
+ memory: 512 # megabytes
+ """
+ assert isinstance(config, dict), \
+ "task qemu only supports a dictionary for configuration"
+
+ config = teuthology.replace_all_with_clients(ctx.cluster, config)
+
+ managers = []
+ for client, client_config in config.iteritems():
+ num_rbd = client_config.get('num_rbd', 1)
+ assert num_rbd > 0, 'at least one rbd device must be used'
+ for i in xrange(num_rbd):
+ create_config = {
+ client: {
+ 'image_name':
+ '{client}.{num}'.format(client=client, num=i),
+ }
+ }
+ managers.append(
+ lambda create_config=create_config:
+ rbd.create_image(ctx=ctx, config=create_config)
+ )
+
+ managers.extend([
+ lambda: create_dirs(ctx=ctx, config=config),
+ lambda: generate_iso(ctx=ctx, config=config),
+ lambda: download_image(ctx=ctx, config=config),
+ lambda: run_qemu(ctx=ctx, config=config),
+ ])
+
+ with contextutil.nested(*managers):
+ yield
--- /dev/null
+"""
+Rados modle-based integration tests
+"""
+import contextlib
+import logging
+import gevent
+from teuthology import misc as teuthology
+
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run RadosModel-based integration tests.
+
+ The config should be as follows::
+
+ rados:
+ clients: [client list]
+ ops: <number of ops>
+ objects: <number of objects to use>
+ max_in_flight: <max number of operations in flight>
+ object_size: <size of objects in bytes>
+ min_stride_size: <minimum write stride size in bytes>
+ max_stride_size: <maximum write stride size in bytes>
+ op_weights: <dictionary mapping operation type to integer weight>
+ runs: <number of times to run> - the pool is remade between runs
+ ec_pool: use an ec pool
+
+ For example::
+
+ tasks:
+ - ceph:
+ - rados:
+ clients: [client.0]
+ ops: 1000
+ max_seconds: 0 # 0 for no limit
+ objects: 25
+ max_in_flight: 16
+ object_size: 4000000
+ min_stride_size: 1024
+ max_stride_size: 4096
+ op_weights:
+ read: 20
+ write: 10
+ delete: 2
+ snap_create: 3
+ rollback: 2
+ snap_remove: 0
+ ec_pool: true
+ runs: 10
+ - interactive:
+
+ Optionally, you can provide the pool name to run against:
+
+ tasks:
+ - ceph:
+ - exec:
+ client.0:
+ - ceph osd pool create foo
+ - rados:
+ clients: [client.0]
+ pools: [foo]
+ ...
+
+ Alternatively, you can provide a pool prefix:
+
+ tasks:
+ - ceph:
+ - exec:
+ client.0:
+ - ceph osd pool create foo.client.0
+ - rados:
+ clients: [client.0]
+ pool_prefix: foo
+ ...
+
+ """
+ log.info('Beginning rados...')
+ assert isinstance(config, dict), \
+ "please list clients to run on"
+
+ object_size = int(config.get('object_size', 4000000))
+ op_weights = config.get('op_weights', {})
+ testdir = teuthology.get_testdir(ctx)
+ args = [
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'ceph_test_rados']
+ if config.get('ec_pool', False):
+ args.extend(['--ec-pool'])
+ args.extend([
+ '--op', 'read', str(op_weights.get('read', 100)),
+ '--op', 'write', str(op_weights.get('write', 100)),
+ '--op', 'delete', str(op_weights.get('delete', 10)),
+ '--max-ops', str(config.get('ops', 10000)),
+ '--objects', str(config.get('objects', 500)),
+ '--max-in-flight', str(config.get('max_in_flight', 16)),
+ '--size', str(object_size),
+ '--min-stride-size', str(config.get('min_stride_size', object_size / 10)),
+ '--max-stride-size', str(config.get('max_stride_size', object_size / 5)),
+ '--max-seconds', str(config.get('max_seconds', 0))
+ ])
+ for field in [
+ 'copy_from', 'is_dirty', 'undirty', 'cache_flush',
+ 'cache_try_flush', 'cache_evict',
+ 'snap_create', 'snap_remove', 'rollback', 'setattr', 'rmattr',
+ 'watch', 'append',
+ ]:
+ if field in op_weights:
+ args.extend([
+ '--op', field, str(op_weights[field]),
+ ])
+
+ def thread():
+ """Thread spawned by gevent"""
+ clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ log.info('clients are %s' % clients)
+ for i in range(int(config.get('runs', '1'))):
+ log.info("starting run %s out of %s", str(i), config.get('runs', '1'))
+ tests = {}
+ existing_pools = config.get('pools', [])
+ created_pools = []
+ for role in config.get('clients', clients):
+ assert isinstance(role, basestring)
+ PREFIX = 'client.'
+ assert role.startswith(PREFIX)
+ id_ = role[len(PREFIX):]
+
+ pool = config.get('pool', None)
+ if not pool and existing_pools:
+ pool = existing_pools.pop()
+ else:
+ pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False))
+ created_pools.append(pool)
+
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ proc = remote.run(
+ args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
+ ["--pool", pool],
+ logger=log.getChild("rados.{id}".format(id=id_)),
+ stdin=run.PIPE,
+ wait=False
+ )
+ tests[id_] = proc
+ run.wait(tests.itervalues())
+
+ for pool in created_pools:
+ ctx.manager.remove_pool(pool)
+
+ running = gevent.spawn(thread)
+
+ try:
+ yield
+ finally:
+ log.info('joining rados')
+ running.get()
--- /dev/null
+"""
+Rados benchmarking
+"""
+import contextlib
+import logging
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run radosbench
+
+ The config should be as follows:
+
+ radosbench:
+ clients: [client list]
+ time: <seconds to run>
+ pool: <pool to use>
+ size: write size to use
+ unique_pool: use a unique pool, defaults to False
+ ec_pool: create ec pool, defaults to False
+ create_pool: create pool, defaults to False
+
+ example:
+
+ tasks:
+ - ceph:
+ - radosbench:
+ clients: [client.0]
+ time: 360
+ - interactive:
+ """
+ log.info('Beginning radosbench...')
+ assert isinstance(config, dict), \
+ "please list clients to run on"
+ radosbench = {}
+
+ testdir = teuthology.get_testdir(ctx)
+
+ for role in config.get('clients', ['client.0']):
+ assert isinstance(role, basestring)
+ PREFIX = 'client.'
+ assert role.startswith(PREFIX)
+ id_ = role[len(PREFIX):]
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+
+ pool = 'data'
+ if config.get('create_pool', True):
+ if config.get('pool'):
+ pool = config.get('pool')
+ if pool != 'data':
+ ctx.manager.create_pool(pool, ec_pool=config.get('ec_pool', False))
+ else:
+ pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False))
+
+ proc = remote.run(
+ args=[
+ "/bin/sh", "-c",
+ " ".join(['adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage',
+ 'rados',
+ '--name', role,
+ '-b', str(config.get('size', 4<<20)),
+ '-p' , pool,
+ 'bench', str(config.get('time', 360)), 'write',
+ ]).format(tdir=testdir),
+ ],
+ logger=log.getChild('radosbench.{id}'.format(id=id_)),
+ stdin=run.PIPE,
+ wait=False
+ )
+ radosbench[id_] = proc
+
+ try:
+ yield
+ finally:
+ timeout = config.get('time', 360) * 5
+ log.info('joining radosbench (timing out after %ss)', timeout)
+ run.wait(radosbench.itervalues(), timeout=timeout)
+
+ if pool is not 'data':
+ ctx.manager.remove_pool(pool)
--- /dev/null
+"""
+Rgw admin testing against a running instance
+"""
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+
+import copy
+import json
+import logging
+import time
+
+from cStringIO import StringIO
+
+import boto.exception
+import boto.s3.connection
+import boto.s3.acl
+
+import util.rgw as rgw_utils
+
+from teuthology import misc as teuthology
+from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops
+
+log = logging.getLogger(__name__)
+
+def get_acl(key):
+ """
+ Helper function to get the xml acl from a key, ensuring that the xml
+ version tag is removed from the acl response
+ """
+ raw_acl = key.get_xml_acl()
+
+ def remove_version(string):
+ return string.split(
+ '<?xml version="1.0" encoding="UTF-8"?>'
+ )[-1]
+
+ def remove_newlines(string):
+ return string.strip('\n')
+
+ return remove_version(
+ remove_newlines(raw_acl)
+ )
+
+
+def task(ctx, config):
+ """
+ Test radosgw-admin functionality against a running rgw instance.
+ """
+ global log
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ multi_region_run = rgw_utils.multi_region_enabled(ctx)
+
+ client = clients[0]; # default choice, multi-region code may overwrite this
+ if multi_region_run:
+ client = rgw_utils.get_master_client(ctx, clients)
+
+ # once the client is chosen, pull the host name and assigned port out of
+ # the role_endpoints that were assigned by the rgw task
+ (remote_host, remote_port) = ctx.rgw.role_endpoints[client]
+
+ ##
+ user1='foo'
+ user2='fud'
+ subuser1='foo:foo1'
+ subuser2='foo:foo2'
+ display_name1='Foo'
+ display_name2='Fud'
+ email='foo@foo.com'
+ email2='bar@bar.com'
+ access_key='9te6NH5mcdcq0Tc5i8i1'
+ secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
+ access_key2='p5YnriCv1nAtykxBrupQ'
+ secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
+ swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
+ swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
+
+ bucket_name='myfoo'
+ bucket_name2='mybar'
+
+ # connect to rgw
+ connection = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=remote_port,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+ connection2 = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key2,
+ aws_secret_access_key=secret_key2,
+ is_secure=False,
+ port=remote_port,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # legend (test cases can be easily grep-ed out)
+ # TESTCASE 'testname','object','method','operation','assertion'
+ # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert err
+
+ # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user1,
+ '--display-name', display_name1,
+ '--email', email,
+ '--access-key', access_key,
+ '--secret', secret_key,
+ '--max-buckets', '4'
+ ],
+ check_status=True)
+
+ # TESTCASE 'duplicate email','user','create','existing user email','fails'
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user2,
+ '--display-name', display_name2,
+ '--email', email,
+ ])
+ assert err
+
+ # TESTCASE 'info-existing','user','info','existing user','returns correct info'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+ assert out['user_id'] == user1
+ assert out['email'] == email
+ assert out['display_name'] == display_name1
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+ assert not out['suspended']
+
+ # this whole block should only be run if regions have been configured
+ if multi_region_run:
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ # post-sync, validate that user1 exists on the sync destination host
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, dest_client, ['metadata', 'list', 'user'])
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
+ assert out['user_id'] == user1
+ assert out['email'] == email
+ assert out['display_name'] == display_name1
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+ assert not out['suspended']
+
+ # compare the metadata between different regions, make sure it matches
+ log.debug('compare the metadata between different regions, make sure it matches')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client,
+ ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
+ assert out1 == out2
+
+ # suspend a user on the master, then check the status on the destination
+ log.debug('suspend a user on the master, then check the status on the destination')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, source_client, ['user', 'suspend', '--uid', user1])
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
+ assert out['suspended']
+
+ # delete a user on the master, then check that it's gone on the destination
+ log.debug('delete a user on the master, then check that it\'s gone on the destination')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, source_client, ['user', 'rm', '--uid', user1], check_status=True)
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user1])
+ assert out is None
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
+ assert out is None
+
+ # then recreate it so later tests pass
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user1,
+ '--display-name', display_name1,
+ '--email', email,
+ '--access-key', access_key,
+ '--secret', secret_key,
+ '--max-buckets', '4'
+ ],
+ check_status=True)
+
+ # now do the multi-region bucket tests
+ log.debug('now do the multi-region bucket tests')
+
+ # Create a second user for the following tests
+ log.debug('Create a second user for the following tests')
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user2,
+ '--display-name', display_name2,
+ '--email', email2,
+ '--access-key', access_key2,
+ '--secret', secret_key2,
+ '--max-buckets', '4'
+ ],
+ check_status=True)
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user2], check_status=True)
+ assert out is not None
+
+ # create a bucket and do a sync
+ log.debug('create a bucket and do a sync')
+ bucket = connection.create_bucket(bucket_name2)
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # compare the metadata for the bucket between different regions, make sure it matches
+ log.debug('compare the metadata for the bucket between different regions, make sure it matches')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ assert out1 == out2
+
+ # get the bucket.instance info and compare that
+ src_bucket_id = out1['data']['bucket']['bucket_id']
+ dest_bucket_id = out2['data']['bucket']['bucket_id']
+ (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
+ 'bucket.instance:{bucket_name}:{bucket_instance}'.format(
+ bucket_name=bucket_name2,bucket_instance=src_bucket_id)],
+ check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
+ 'bucket.instance:{bucket_name}:{bucket_instance}'.format(
+ bucket_name=bucket_name2,bucket_instance=dest_bucket_id)],
+ check_status=True)
+ del out1['data']['bucket_info']['bucket']['pool']
+ del out1['data']['bucket_info']['bucket']['index_pool']
+ del out2['data']['bucket_info']['bucket']['pool']
+ del out2['data']['bucket_info']['bucket']['index_pool']
+ assert out1 == out2
+
+ same_region = 0
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+
+ source_region = rgw_utils.region_for_client(ctx, source_client)
+ dest_region = rgw_utils.region_for_client(ctx, dest_client)
+
+ # 301 is only returned for requests to something in a different region
+ if source_region == dest_region:
+ log.debug('301 is only returned for requests to something in a different region')
+ same_region += 1
+ continue
+
+ # Attempt to create a new connection with user1 to the destination RGW
+ log.debug('Attempt to create a new connection with user1 to the destination RGW')
+ # and use that to attempt a delete (that should fail)
+ exception_encountered = False
+ try:
+ (dest_remote_host, dest_remote_port) = ctx.rgw.role_endpoints[dest_client]
+ connection_dest = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=dest_remote_port,
+ host=dest_remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # this should fail
+ connection_dest.delete_bucket(bucket_name2)
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 301
+ exception_encountered = True
+
+ # confirm that the expected exception was seen
+ assert exception_encountered
+
+ # now delete the bucket on the source RGW and do another sync
+ log.debug('now delete the bucket on the source RGW and do another sync')
+ bucket.delete()
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ if same_region == len(ctx.radosgw_agent.config):
+ bucket.delete()
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # make sure that the bucket no longer exists in either region
+ log.debug('make sure that the bucket no longer exists in either region')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
+ 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
+ (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
+ 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
+ # Both of the previous calls should have errors due to requesting
+ # metadata for non-existent buckets
+ assert err1
+ assert err2
+
+ # create a bucket and then sync it
+ log.debug('create a bucket and then sync it')
+ bucket = connection.create_bucket(bucket_name2)
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # compare the metadata for the bucket between different regions, make sure it matches
+ log.debug('compare the metadata for the bucket between different regions, make sure it matches')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ assert out1 == out2
+
+ # Now delete the bucket and recreate it with a different user
+ log.debug('Now delete the bucket and recreate it with a different user')
+ # within the same window of time and then sync.
+ bucket.delete()
+ bucket = connection2.create_bucket(bucket_name2)
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # compare the metadata for the bucket between different regions, make sure it matches
+ log.debug('compare the metadata for the bucket between different regions, make sure it matches')
+ # user2 should own the bucket in both regions
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ assert out1 == out2
+ assert out1['data']['owner'] == user2
+ assert out1['data']['owner'] != user1
+
+ # now we're going to use this bucket to test meta-data update propagation
+ log.debug('now we\'re going to use this bucket to test meta-data update propagation')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+
+ # get the metadata so we can tweak it
+ log.debug('get the metadata so we can tweak it')
+ (err, orig_data) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+
+ # manually edit mtime for this bucket to be 300 seconds in the past
+ log.debug('manually edit mtime for this bucket to be 300 seconds in the past')
+ new_data = copy.deepcopy(orig_data)
+ new_data['mtime'] = orig_data['mtime'] - 300
+ assert new_data != orig_data
+ (err, out) = rgwadmin(ctx, source_client,
+ ['metadata', 'put', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ stdin=StringIO(json.dumps(new_data)),
+ check_status=True)
+
+ # get the metadata and make sure that the 'put' worked
+ log.debug('get the metadata and make sure that the \'put\' worked')
+ (err, out) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ assert out == new_data
+
+ # sync to propagate the new metadata
+ log.debug('sync to propagate the new metadata')
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # get the metadata from the dest and compare it to what we just set
+ log.debug('get the metadata from the dest and compare it to what we just set')
+ # and what the source region has.
+ (err1, out1) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ # yeah for the transitive property
+ assert out1 == out2
+ assert out1 == new_data
+
+ # now we delete the bucket
+ log.debug('now we delete the bucket')
+ bucket.delete()
+
+ log.debug('sync to propagate the deleted bucket')
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # Delete user2 as later tests do not expect it to exist.
+ # Verify that it is gone on both regions
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, source_client,
+ ['user', 'rm', '--uid', user2], check_status=True)
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ # The two 'user info' calls should fail and not return any data
+ # since we just deleted this user.
+ (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user2])
+ assert out is None
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user2])
+ assert out is None
+
+ # Test data sync
+
+ # First create a bucket for data sync test purpose
+ bucket = connection.create_bucket(bucket_name + 'data')
+
+ # Create a tiny file and check if in sync
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ if c_config.get('metadata-only'):
+ continue
+
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ k = boto.s3.key.Key(bucket)
+ k.key = 'tiny_file'
+ k.set_contents_from_string("123456789")
+ time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
+ rgw_utils.radosgw_agent_sync_all(ctx, data=True)
+ (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
+ dest_connection = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=dest_port,
+ host=dest_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+ dest_k = dest_connection.get_bucket(bucket_name + 'data').get_key('tiny_file')
+ assert k.get_contents_as_string() == dest_k.get_contents_as_string()
+
+ # check that deleting it removes it from the dest zone
+ k.delete()
+ time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
+ rgw_utils.radosgw_agent_sync_all(ctx, data=True)
+
+ dest_bucket = dest_connection.get_bucket(bucket_name + 'data')
+ dest_k = dest_bucket.get_key('tiny_file')
+ assert dest_k == None, 'object not deleted from destination zone'
+
+ # finally we delete the bucket
+ bucket.delete()
+
+ bucket = connection.create_bucket(bucket_name + 'data2')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ if c_config.get('metadata-only'):
+ continue
+
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
+ dest_connection = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=dest_port,
+ host=dest_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+ for i in range(20):
+ k = boto.s3.key.Key(bucket)
+ k.key = 'tiny_file_' + str(i)
+ k.set_contents_from_string(str(i) * 100)
+
+ time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
+ rgw_utils.radosgw_agent_sync_all(ctx, data=True)
+
+ for i in range(20):
+ dest_k = dest_connection.get_bucket(bucket_name + 'data2').get_key('tiny_file_' + str(i))
+ assert (str(i) * 100) == dest_k.get_contents_as_string()
+ k = boto.s3.key.Key(bucket)
+ k.key = 'tiny_file_' + str(i)
+ k.delete()
+
+ # check that deleting removes the objects from the dest zone
+ time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
+ rgw_utils.radosgw_agent_sync_all(ctx, data=True)
+
+ for i in range(20):
+ dest_bucket = dest_connection.get_bucket(bucket_name + 'data2')
+ dest_k = dest_bucket.get_key('tiny_file_' + str(i))
+ assert dest_k == None, 'object %d not deleted from destination zone' % i
+ bucket.delete()
+
+ # end of 'if multi_region_run:'
+
+ # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
+ check_status=True)
+
+ # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+ assert out['suspended']
+
+ # TESTCASE 're-enable','user','enable','suspended user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], check_status=True)
+
+ # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+ assert not out['suspended']
+
+ # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'key', 'create', '--uid', user1,
+ '--access-key', access_key2, '--secret', secret_key2,
+ ], check_status=True)
+
+ # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1],
+ check_status=True)
+ assert len(out['keys']) == 2
+ assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
+ assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
+
+ # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'key', 'rm', '--uid', user1,
+ '--access-key', access_key2,
+ ], check_status=True)
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+
+ # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+ subuser_access = 'full'
+ subuser_perm = 'full-control'
+
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'create', '--subuser', subuser1,
+ '--access', subuser_access
+ ], check_status=True)
+
+ # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'modify', '--subuser', subuser1,
+ '--secret', swift_secret1,
+ '--key-type', 'swift',
+ ], check_status=True)
+
+ # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+
+ assert out['subusers'][0]['permissions'] == subuser_perm
+
+ # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+ assert len(out['swift_keys']) == 1
+ assert out['swift_keys'][0]['user'] == subuser1
+ assert out['swift_keys'][0]['secret_key'] == swift_secret1
+
+ # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'create', '--subuser', subuser2,
+ '--secret', swift_secret2,
+ '--key-type', 'swift',
+ ], check_status=True)
+
+ # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+ assert len(out['swift_keys']) == 2
+ assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
+ assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
+
+ # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'key', 'rm', '--subuser', subuser1,
+ '--key-type', 'swift',
+ ], check_status=True)
+ assert len(out['swift_keys']) == 1
+
+ # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'rm', '--subuser', subuser1,
+ ], check_status=True)
+ assert len(out['subusers']) == 1
+
+ # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'rm', '--subuser', subuser2,
+ '--key-type', 'swift', '--purge-keys',
+ ], check_status=True)
+ assert len(out['swift_keys']) == 0
+ assert len(out['subusers']) == 0
+
+ # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1],
+ check_status=True)
+ assert len(out) == 0
+
+ if multi_region_run:
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
+ assert len(out) == 0
+
+ # create a first bucket
+ bucket = connection.create_bucket(bucket_name)
+
+ # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
+ assert len(out) == 1
+ assert out[0] == bucket_name
+
+ # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True)
+ assert len(out) >= 1
+ assert bucket_name in out;
+
+ # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4'
+ bucket2 = connection.create_bucket(bucket_name + '2')
+ bucket3 = connection.create_bucket(bucket_name + '3')
+ bucket4 = connection.create_bucket(bucket_name + '4')
+ # the 5th should fail.
+ failed = False
+ try:
+ connection.create_bucket(bucket_name + '5')
+ except Exception:
+ failed = True
+ assert failed
+
+ # delete the buckets
+ bucket2.delete()
+ bucket3.delete()
+ bucket4.delete()
+
+ # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
+ (err, out) = rgwadmin(ctx, client, [
+ 'bucket', 'stats', '--bucket', bucket_name], check_status=True)
+ assert out['owner'] == user1
+ bucket_id = out['id']
+
+ # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], check_status=True)
+ assert len(out) == 1
+ assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
+
+ # use some space
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('one')
+
+ # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
+ (err, out) = rgwadmin(ctx, client, [
+ 'bucket', 'stats', '--bucket', bucket_name], check_status=True)
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 1
+ assert out['usage']['rgw.main']['size_kb'] > 0
+
+ # reclaim it
+ key.delete()
+
+ # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
+ (err, out) = rgwadmin(ctx, client,
+ ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name],
+ check_status=True)
+
+ # create a second user to link the bucket to
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user2,
+ '--display-name', display_name2,
+ '--access-key', access_key2,
+ '--secret', secret_key2,
+ '--max-buckets', '1',
+ ],
+ check_status=True)
+
+ # try creating an object with the first user before the bucket is relinked
+ denied = False
+ key = boto.s3.key.Key(bucket)
+
+ try:
+ key.set_contents_from_string('two')
+ except boto.exception.S3ResponseError:
+ denied = True
+
+ assert not denied
+
+ # delete the object
+ key.delete()
+
+ # link the bucket to another user
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name],
+ check_status=True)
+
+ # try to remove user, should fail (has a linked bucket)
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
+ assert err
+
+ # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name],
+ check_status=True)
+
+ # relink the bucket to the first user and delete the second user
+ (err, out) = rgwadmin(ctx, client,
+ ['bucket', 'link', '--uid', user1, '--bucket', bucket_name],
+ check_status=True)
+
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2],
+ check_status=True)
+
+ # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
+
+ # upload an object
+ object_name = 'four'
+ key = boto.s3.key.Key(bucket, object_name)
+ key.set_contents_from_string(object_name)
+
+ # now delete it
+ (err, out) = rgwadmin(ctx, client,
+ ['object', 'rm', '--bucket', bucket_name, '--object', object_name],
+ check_status=True)
+
+ # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
+ (err, out) = rgwadmin(ctx, client, [
+ 'bucket', 'stats', '--bucket', bucket_name],
+ check_status=True)
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 0
+
+ # list log objects
+ # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects'
+ (err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True)
+ assert len(out) > 0
+
+ for obj in out:
+ # TESTCASE 'log-show','log','show','after activity','returns expected info'
+ if obj[:4] == 'meta' or obj[:4] == 'data':
+ continue
+
+ (err, rgwlog) = rgwadmin(ctx, client, ['log', 'show', '--object', obj],
+ check_status=True)
+ assert len(rgwlog) > 0
+
+ # exempt bucket_name2 from checking as it was only used for multi-region tests
+ assert rgwlog['bucket'].find(bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0
+ assert rgwlog['bucket'] != bucket_name or rgwlog['bucket_id'] == bucket_id
+ assert rgwlog['bucket_owner'] == user1 or rgwlog['bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2
+ for entry in rgwlog['log_entries']:
+ log.debug('checking log entry: ', entry)
+ assert entry['bucket'] == rgwlog['bucket']
+ possible_buckets = [bucket_name + '5', bucket_name2]
+ user = entry['user']
+ assert user == user1 or user.endswith('system-user') or \
+ rgwlog['bucket'] in possible_buckets
+
+ # TESTCASE 'log-rm','log','rm','delete log objects','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj],
+ check_status=True)
+
+ # TODO: show log by bucket+date
+
+ # need to wait for all usage data to get flushed, should take up to 30 seconds
+ timestamp = time.time()
+ while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj']) # last operation we did is delete obj, wait for it to flush
+ if get_user_successful_ops(out, user1) > 0:
+ break
+ time.sleep(1)
+
+ assert time.time() - timestamp <= (20 * 60)
+
+ # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True)
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+
+ user_summary = get_user_summary(out, user1)
+
+ total = user_summary['total']
+ assert total['successful_ops'] > 0
+
+ # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
+ check_status=True)
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ for entry in user_summary['categories']:
+ assert entry['successful_ops'] > 0
+ assert user_summary['user'] == user1
+
+ # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
+ test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
+ for cat in test_categories:
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat],
+ check_status=True)
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ assert user_summary['user'] == user1
+ assert len(user_summary['categories']) == 1
+ entry = user_summary['categories'][0]
+ assert entry['category'] == cat
+ assert entry['successful_ops'] > 0
+
+ # the usage flush interval is 30 seconds, wait that much an then some
+ # to make sure everything has been flushed
+ time.sleep(35)
+
+ # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
+ (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1],
+ check_status=True)
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
+ check_status=True)
+ assert len(out['entries']) == 0
+ assert len(out['summary']) == 0
+
+ # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
+ check_status=True)
+
+ # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
+ try:
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('five')
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 403
+
+ # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1],
+ check_status=True)
+
+ # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('six')
+
+ # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection'
+
+ # create an object large enough to be split into multiple parts
+ test_string = 'foo'*10000000
+
+ big_key = boto.s3.key.Key(bucket)
+ big_key.set_contents_from_string(test_string)
+
+ # now delete the head
+ big_key.delete()
+
+ # wait a bit to give the garbage collector time to cycle
+ time.sleep(15)
+
+ (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
+
+ assert len(out) > 0
+
+ # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage'
+ (err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True)
+
+ #confirm
+ (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
+
+ assert len(out) == 0
+
+ # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
+ assert err
+
+ # delete should fail because ``key`` still exists
+ try:
+ bucket.delete()
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 409
+
+ key.delete()
+ bucket.delete()
+
+ # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
+ bucket = connection.create_bucket(bucket_name)
+
+ # create an object
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('seven')
+
+ # should be private already but guarantee it
+ key.set_acl('private')
+
+ (err, out) = rgwadmin(ctx, client,
+ ['policy', '--bucket', bucket.name, '--object', key.key],
+ check_status=True)
+
+ acl = get_acl(key)
+
+ assert acl == out.strip('\n')
+
+ # add another grantee by making the object public read
+ key.set_acl('public-read')
+
+ (err, out) = rgwadmin(ctx, client,
+ ['policy', '--bucket', bucket.name, '--object', key.key],
+ check_status=True)
+
+ acl = get_acl(key)
+
+ assert acl == out.strip('\n')
+
+ # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key_name = ['eight', 'nine', 'ten', 'eleven']
+ for i in range(4):
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string(key_name[i])
+
+ (err, out) = rgwadmin(ctx, client,
+ ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'],
+ check_status=True)
+
+ # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
+ caps='user=read'
+ (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps])
+
+ assert out['caps'][0]['perm'] == 'read'
+
+ # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps])
+
+ assert not out['caps']
+
+ # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
+ assert err
+
+ # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('twelve')
+
+ (err, out) = rgwadmin(ctx, client,
+ ['user', 'rm', '--uid', user1, '--purge-data' ],
+ check_status=True)
+
+ # TESTCASE 'rm-user3','user','rm','deleted user','fails'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert err
+
+ # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
+ #
+
+ (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
+ orig_placement_pools = len(out['placement_pools'])
+
+ # removed this test, it is not correct to assume that zone has default placement, it really
+ # depends on how we set it up before
+ #
+ # assert len(out) > 0
+ # assert len(out['placement_pools']) == 1
+
+ # default_rule = out['placement_pools'][0]
+ # assert default_rule['key'] == 'default-placement'
+
+ rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}}
+
+ out['placement_pools'].append(rule)
+
+ (err, out) = rgwadmin(ctx, client, ['zone', 'set'],
+ stdin=StringIO(json.dumps(out)),
+ check_status=True)
+
+ (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
+ assert len(out) > 0
+ assert len(out['placement_pools']) == orig_placement_pools + 1
--- /dev/null
+"""
+Run a series of rgw admin commands through the rest interface.
+
+The test cases in this file have been annotated for inventory.
+To extract the inventory (in csv format) use the command:
+
+ grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+
+"""
+from cStringIO import StringIO
+import logging
+import json
+
+import boto.exception
+import boto.s3.connection
+import boto.s3.acl
+
+import requests
+import time
+
+from boto.connection import AWSAuthConnection
+from teuthology import misc as teuthology
+from util.rgw import get_user_summary, get_user_successful_ops
+
+log = logging.getLogger(__name__)
+
+def rgwadmin(ctx, client, cmd):
+ """
+ Perform rgw admin command
+
+ :param client: client
+ :param cmd: command to execute.
+ :return: command exit status, json result.
+ """
+ log.info('radosgw-admin: %s' % cmd)
+ testdir = teuthology.get_testdir(ctx)
+ pre = [
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '--log-to-stderr',
+ '--format', 'json',
+ ]
+ pre.extend(cmd)
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ proc = remote.run(
+ args=pre,
+ check_status=False,
+ stdout=StringIO(),
+ stderr=StringIO(),
+ )
+ r = proc.exitstatus
+ out = proc.stdout.getvalue()
+ j = None
+ if not r and out != '':
+ try:
+ j = json.loads(out)
+ log.info(' json result: %s' % j)
+ except ValueError:
+ j = out
+ log.info(' raw result: %s' % j)
+ return (r, j)
+
+
+def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False):
+ """
+ perform a rest command
+ """
+ log.info('radosgw-admin-rest: %s %s' % (cmd, params))
+ put_cmds = ['create', 'link', 'add']
+ post_cmds = ['unlink', 'modify']
+ delete_cmds = ['trim', 'rm', 'process']
+ get_cmds = ['check', 'info', 'show', 'list']
+
+ bucket_sub_resources = ['object', 'policy', 'index']
+ user_sub_resources = ['subuser', 'key', 'caps']
+ zone_sub_resources = ['pool', 'log', 'garbage']
+
+ def get_cmd_method_and_handler(cmd):
+ """
+ Get the rest command and handler from information in cmd and
+ from the imported requests object.
+ """
+ if cmd[1] in put_cmds:
+ return 'PUT', requests.put
+ elif cmd[1] in delete_cmds:
+ return 'DELETE', requests.delete
+ elif cmd[1] in post_cmds:
+ return 'POST', requests.post
+ elif cmd[1] in get_cmds:
+ return 'GET', requests.get
+
+ def get_resource(cmd):
+ """
+ Get the name of the resource from information in cmd.
+ """
+ if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources:
+ if cmd[0] == 'bucket':
+ return 'bucket', ''
+ else:
+ return 'bucket', cmd[0]
+ elif cmd[0] == 'user' or cmd[0] in user_sub_resources:
+ if cmd[0] == 'user':
+ return 'user', ''
+ else:
+ return 'user', cmd[0]
+ elif cmd[0] == 'usage':
+ return 'usage', ''
+ elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources:
+ if cmd[0] == 'zone':
+ return 'zone', ''
+ else:
+ return 'zone', cmd[0]
+
+ def build_admin_request(conn, method, resource = '', headers=None, data='',
+ query_args=None, params=None):
+ """
+ Build an administative request adapted from the build_request()
+ method of boto.connection
+ """
+
+ path = conn.calling_format.build_path_base('admin', resource)
+ auth_path = conn.calling_format.build_auth_path('admin', resource)
+ host = conn.calling_format.build_host(conn.server_name(), 'admin')
+ if query_args:
+ path += '?' + query_args
+ boto.log.debug('path=%s' % path)
+ auth_path += '?' + query_args
+ boto.log.debug('auth_path=%s' % auth_path)
+ return AWSAuthConnection.build_base_http_request(conn, method, path,
+ auth_path, params, headers, data, host)
+
+ method, handler = get_cmd_method_and_handler(cmd)
+ resource, query_args = get_resource(cmd)
+ request = build_admin_request(connection, method, resource,
+ query_args=query_args, headers=headers)
+
+ url = '{protocol}://{host}{path}'.format(protocol=request.protocol,
+ host=request.host, path=request.path)
+
+ request.authorize(connection=connection)
+ result = handler(url, params=params, headers=request.headers)
+
+ if raw:
+ log.info(' text result: %s' % result.txt)
+ return result.status_code, result.txt
+ else:
+ log.info(' json result: %s' % result.json())
+ return result.status_code, result.json()
+
+
+def task(ctx, config):
+ """
+ Test radosgw-admin functionality through the RESTful interface
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ # just use the first client...
+ client = clients[0]
+
+ ##
+ admin_user = 'ada'
+ admin_display_name = 'Ms. Admin User'
+ admin_access_key = 'MH1WC2XQ1S8UISFDZC8W'
+ admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG'
+ admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write'
+
+ user1 = 'foo'
+ user2 = 'fud'
+ subuser1 = 'foo:foo1'
+ subuser2 = 'foo:foo2'
+ display_name1 = 'Foo'
+ display_name2 = 'Fud'
+ email = 'foo@foo.com'
+ access_key = '9te6NH5mcdcq0Tc5i8i1'
+ secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
+ access_key2 = 'p5YnriCv1nAtykxBrupQ'
+ secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
+ swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
+ swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
+
+ bucket_name = 'myfoo'
+
+ # legend (test cases can be easily grep-ed out)
+ # TESTCASE 'testname','object','method','operation','assertion'
+ # TESTCASE 'create-admin-user','user','create','administrative user','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', admin_user,
+ '--display-name', admin_display_name,
+ '--access-key', admin_access_key,
+ '--secret', admin_secret_key,
+ '--max-buckets', '0',
+ '--caps', admin_caps
+ ])
+ logging.error(out)
+ logging.error(err)
+ assert not err
+
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote_host = remote.name.split('@')[1]
+ admin_conn = boto.s3.connection.S3Connection(
+ aws_access_key_id=admin_access_key,
+ aws_secret_access_key=admin_secret_key,
+ is_secure=False,
+ port=7280,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1})
+ assert ret == 404
+
+ # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['user', 'create'],
+ {'uid' : user1,
+ 'display-name' : display_name1,
+ 'email' : email,
+ 'access-key' : access_key,
+ 'secret-key' : secret_key,
+ 'max-buckets' : '4'
+ })
+
+ assert ret == 200
+
+ # TESTCASE 'info-existing','user','info','existing user','returns correct info'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+
+ assert out['user_id'] == user1
+ assert out['email'] == email
+ assert out['display_name'] == display_name1
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+ assert not out['suspended']
+
+ # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
+ assert ret == 200
+
+ # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert out['suspended']
+
+ # TESTCASE 're-enable','user','enable','suspended user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
+ assert not err
+
+ # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert not out['suspended']
+
+ # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['key', 'create'],
+ {'uid' : user1,
+ 'access-key' : access_key2,
+ 'secret-key' : secret_key2
+ })
+
+
+ assert ret == 200
+
+ # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['keys']) == 2
+ assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
+ assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
+
+ # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['key', 'rm'],
+ {'uid' : user1,
+ 'access-key' : access_key2
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+
+ # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'create'],
+ {'subuser' : subuser1,
+ 'secret-key' : swift_secret1,
+ 'key-type' : 'swift'
+ })
+
+ assert ret == 200
+
+ # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['swift_keys']) == 1
+ assert out['swift_keys'][0]['user'] == subuser1
+ assert out['swift_keys'][0]['secret_key'] == swift_secret1
+
+ # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'create'],
+ {'subuser' : subuser2,
+ 'secret-key' : swift_secret2,
+ 'key-type' : 'swift'
+ })
+
+ assert ret == 200
+
+ # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['swift_keys']) == 2
+ assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
+ assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
+
+ # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['key', 'rm'],
+ {'subuser' : subuser1,
+ 'key-type' :'swift'
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert len(out['swift_keys']) == 1
+
+ # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'rm'],
+ {'subuser' : subuser1
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert len(out['subusers']) == 1
+
+ # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'rm'],
+ {'subuser' : subuser2,
+ 'key-type' : 'swift',
+ '{purge-keys' :True
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert len(out['swift_keys']) == 0
+ assert len(out['subusers']) == 0
+
+ # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out) == 0
+
+ # connect to rgw
+ connection = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=7280,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
+ assert ret == 200
+ assert len(out) == 0
+
+ # create a first bucket
+ bucket = connection.create_bucket(bucket_name)
+
+ # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out) == 1
+ assert out[0] == bucket_name
+
+ # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+
+ assert ret == 200
+ assert out['owner'] == user1
+ bucket_id = out['id']
+
+ # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
+ assert ret == 200
+ assert len(out) == 1
+ assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
+
+ # use some space
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('one')
+
+ # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+ assert ret == 200
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 1
+ assert out['usage']['rgw.main']['size_kb'] > 0
+
+ # reclaim it
+ key.delete()
+
+ # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name})
+
+ assert ret == 200
+
+ # create a second user to link the bucket to
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['user', 'create'],
+ {'uid' : user2,
+ 'display-name' : display_name2,
+ 'access-key' : access_key2,
+ 'secret-key' : secret_key2,
+ 'max-buckets' : '1',
+ })
+
+ assert ret == 200
+
+ # try creating an object with the first user before the bucket is relinked
+ denied = False
+ key = boto.s3.key.Key(bucket)
+
+ try:
+ key.set_contents_from_string('two')
+ except boto.exception.S3ResponseError:
+ denied = True
+
+ assert not denied
+
+ # delete the object
+ key.delete()
+
+ # link the bucket to another user
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user2, 'bucket' : bucket_name})
+
+ assert ret == 200
+
+ # try creating an object with the first user which should cause an error
+ key = boto.s3.key.Key(bucket)
+
+ try:
+ key.set_contents_from_string('three')
+ except boto.exception.S3ResponseError:
+ denied = True
+
+ assert denied
+
+ # relink the bucket to the first user and delete the second user
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user1, 'bucket' : bucket_name})
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2})
+ assert ret == 200
+
+ # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
+
+ # upload an object
+ object_name = 'four'
+ key = boto.s3.key.Key(bucket, object_name)
+ key.set_contents_from_string(object_name)
+
+ # now delete it
+ (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name})
+ assert ret == 200
+
+ # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+ assert ret == 200
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 0
+
+ # create a bucket for deletion stats
+ useless_bucket = connection.create_bucket('useless_bucket')
+ useless_key = useless_bucket.new_key('useless_key')
+ useless_key.set_contents_from_string('useless string')
+
+ # delete it
+ useless_key.delete()
+ useless_bucket.delete()
+
+ # wait for the statistics to flush
+ time.sleep(60)
+
+ # need to wait for all usage data to get flushed, should take up to 30 seconds
+ timestamp = time.time()
+ while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'}) # last operation we did is delete obj, wait for it to flush
+
+ if get_user_successful_ops(out, user1) > 0:
+ break
+ time.sleep(1)
+
+ assert time.time() - timestamp <= (20 * 60)
+
+ # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'])
+ assert ret == 200
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = get_user_summary(out, user1)
+ total = user_summary['total']
+ assert total['successful_ops'] > 0
+
+ # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ for entry in user_summary['categories']:
+ assert entry['successful_ops'] > 0
+ assert user_summary['user'] == user1
+
+ # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
+ test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
+ for cat in test_categories:
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat})
+ assert ret == 200
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ assert user_summary['user'] == user1
+ assert len(user_summary['categories']) == 1
+ entry = user_summary['categories'][0]
+ assert entry['category'] == cat
+ assert entry['successful_ops'] > 0
+
+ # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1})
+ assert ret == 200
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['entries']) == 0
+ assert len(out['summary']) == 0
+
+ # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
+ assert ret == 200
+
+ # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
+ try:
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('five')
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 403
+
+ # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
+ assert ret == 200
+
+ # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('six')
+
+ # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection'
+
+ # create an object large enough to be split into multiple parts
+ test_string = 'foo'*10000000
+
+ big_key = boto.s3.key.Key(bucket)
+ big_key.set_contents_from_string(test_string)
+
+ # now delete the head
+ big_key.delete()
+
+ # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
+ assert ret == 409
+
+ # delete should fail because ``key`` still exists
+ try:
+ bucket.delete()
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 409
+
+ key.delete()
+ bucket.delete()
+
+ # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
+ bucket = connection.create_bucket(bucket_name)
+
+ # create an object
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('seven')
+
+ # should be private already but guarantee it
+ key.set_acl('private')
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
+ assert ret == 200
+
+ acl = key.get_xml_acl()
+ assert acl == out.strip('\n')
+
+ # add another grantee by making the object public read
+ key.set_acl('public-read')
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
+ assert ret == 200
+
+ acl = key.get_xml_acl()
+ assert acl == out.strip('\n')
+
+ # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key_name = ['eight', 'nine', 'ten', 'eleven']
+ for i in range(4):
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string(key_name[i])
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True})
+ assert ret == 200
+
+ # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
+ caps = 'usage=read'
+ (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' : user1, 'user-caps' : caps})
+ assert ret == 200
+ assert out[0]['perm'] == 'read'
+
+ # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' : user1, 'user-caps' : caps})
+ assert ret == 200
+ assert not out
+
+ # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
+ assert ret == 409
+
+ # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('twelve')
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True})
+ assert ret == 200
+
+ # TESTCASE 'rm-user3','user','info','deleted user','fails'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 404
+
--- /dev/null
+"""
+Run rados gateway agent in test mode
+"""
+import contextlib
+import logging
+import argparse
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+import util.rgw as rgw_utils
+
+log = logging.getLogger(__name__)
+
+def run_radosgw_agent(ctx, config):
+ """
+ Run a single radosgw-agent. See task() for config format.
+ """
+ return_list = list()
+ for (client, cconf) in config.items():
+ # don't process entries that are not clients
+ if not client.startswith('client.'):
+ log.debug('key {data} does not start with \'client.\', moving on'.format(
+ data=client))
+ continue
+
+ src_client = cconf['src']
+ dest_client = cconf['dest']
+
+ src_zone = rgw_utils.zone_for_client(ctx, src_client)
+ dest_zone = rgw_utils.zone_for_client(ctx, dest_client)
+
+ log.info("source is %s", src_zone)
+ log.info("dest is %s", dest_zone)
+
+ testdir = teuthology.get_testdir(ctx)
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ # figure out which branch to pull from
+ branch = cconf.get('force-branch', None)
+ if not branch:
+ branch = cconf.get('branch', 'master')
+ sha1 = cconf.get('sha1')
+ remote.run(
+ args=[
+ 'cd', testdir, run.Raw('&&'),
+ 'git', 'clone',
+ '-b', branch,
+# 'https://github.com/ceph/radosgw-agent.git',
+ 'git://git.ceph.com/radosgw-agent.git',
+ 'radosgw-agent.{client}'.format(client=client),
+ ]
+ )
+ if sha1 is not None:
+ remote.run(
+ args=[
+ 'cd', testdir, run.Raw('&&'),
+ run.Raw('&&'),
+ 'git', 'reset', '--hard', sha1,
+ ]
+ )
+ remote.run(
+ args=[
+ 'cd', testdir, run.Raw('&&'),
+ 'cd', 'radosgw-agent.{client}'.format(client=client),
+ run.Raw('&&'),
+ './bootstrap',
+ ]
+ )
+
+ src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client,
+ src_zone)
+ dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client,
+ dest_zone)
+ src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client,
+ src_zone)
+ dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client,
+ dest_zone)
+ sync_scope = cconf.get('sync-scope', None)
+ port = cconf.get('port', 8000)
+ daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port)
+ in_args=[
+ 'daemon-helper',
+ 'kill',
+ '{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir,
+ client=client),
+ '-v',
+ '--src-access-key', src_access,
+ '--src-secret-key', src_secret,
+ '--source', "http://{addr}:{port}".format(addr=src_host, port=src_port),
+ '--dest-access-key', dest_access,
+ '--dest-secret-key', dest_secret,
+ '--max-entries', str(cconf.get('max-entries', 1000)),
+ '--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format(
+ tdir=testdir,
+ client=client),
+ '--object-sync-timeout', '30',
+ ]
+
+ if cconf.get('metadata-only', False):
+ in_args.append('--metadata-only')
+
+ # the test server and full/incremental flags are mutually exclusive
+ if sync_scope is None:
+ in_args.append('--test-server-host')
+ in_args.append('0.0.0.0')
+ in_args.append('--test-server-port')
+ in_args.append(str(port))
+ log.debug('Starting a sync test server on {client}'.format(client=client))
+ # Stash the radosgw-agent server / port # for use by subsequent tasks
+ ctx.radosgw_agent.endpoint = (client, str(port))
+ else:
+ in_args.append('--sync-scope')
+ in_args.append(sync_scope)
+ log.debug('Starting a {scope} sync on {client}'.format(scope=sync_scope,client=client))
+
+ # positional arg for destination must come last
+ in_args.append("http://{addr}:{port}".format(addr=dest_host,
+ port=dest_port))
+
+ return_list.append((client, remote.run(
+ args=in_args,
+ wait=False,
+ stdin=run.PIPE,
+ logger=log.getChild(daemon_name),
+ )))
+ return return_list
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run radosgw-agents in test mode.
+
+ Configuration is clients to run the agents on, with settings for
+ source client, destination client, and port to listen on. Binds
+ to 0.0.0.0. Port defaults to 8000. This must be run on clients
+ that have the correct zone root pools and rgw zone set in
+ ceph.conf, or the task cannot read the region information from the
+ cluster.
+
+ By default, this task will start an HTTP server that will trigger full
+ or incremental syncs based on requests made to it.
+ Alternatively, a single full sync can be triggered by
+ specifying 'sync-scope: full' or a loop of incremental syncs can be triggered
+ by specifying 'sync-scope: incremental' (the loop will sleep
+ '--incremental-sync-delay' seconds between each sync, default is 30 seconds).
+
+ By default, both data and metadata are synced. To only sync
+ metadata, for example because you want to sync between regions,
+ set metadata-only: true.
+
+ An example::
+
+ tasks:
+ - ceph:
+ conf:
+ client.0:
+ rgw zone = foo
+ rgw zone root pool = .root.pool
+ client.1:
+ rgw zone = bar
+ rgw zone root pool = .root.pool2
+ - rgw: # region configuration omitted for brevity
+ - radosgw-agent:
+ client.0:
+ branch: wip-next-feature-branch
+ src: client.0
+ dest: client.1
+ sync-scope: full
+ metadata-only: true
+ # port: 8000 (default)
+ client.1:
+ src: client.1
+ dest: client.0
+ port: 8001
+ """
+ assert isinstance(config, dict), 'rgw_sync_agent requires a dictionary config'
+ log.debug("config is %s", config)
+
+ overrides = ctx.config.get('overrides', {})
+ # merge each client section, but only if it exists in config since there isn't
+ # a sensible default action for this task
+ for client in config.iterkeys():
+ if config[client]:
+ log.debug('config[{client}]: {data}'.format(client=client, data=config[client]))
+ teuthology.deep_merge(config[client], overrides.get('radosgw-agent', {}))
+
+ ctx.radosgw_agent = argparse.Namespace()
+ ctx.radosgw_agent.config = config
+
+ procs = run_radosgw_agent(ctx, config)
+
+ ctx.radosgw_agent.procs = procs
+
+ try:
+ yield
+ finally:
+ testdir = teuthology.get_testdir(ctx)
+ try:
+ for client, proc in procs:
+ log.info("shutting down sync agent on %s", client)
+ proc.stdin.close()
+ proc.wait()
+ finally:
+ for client, proc in procs:
+ ctx.cluster.only(client).run(
+ args=[
+ 'rm', '-rf',
+ '{tdir}/radosgw-agent.{client}'.format(tdir=testdir,
+ client=client)
+ ]
+ )
--- /dev/null
+"""
+Rbd testing task
+"""
+import contextlib
+import logging
+import os
+
+from cStringIO import StringIO
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.parallel import parallel
+from teuthology.task.common_fs_utils import generic_mkfs
+from teuthology.task.common_fs_utils import generic_mount
+from teuthology.task.common_fs_utils import default_image_name
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def create_image(ctx, config):
+ """
+ Create an rbd image.
+
+ For example::
+
+ tasks:
+ - ceph:
+ - rbd.create_image:
+ client.0:
+ image_name: testimage
+ image_size: 100
+ image_format: 1
+ client.1:
+
+ Image size is expressed as a number of megabytes; default value
+ is 10240.
+
+ Image format value must be either 1 or 2; default value is 1.
+
+ """
+ assert isinstance(config, dict) or isinstance(config, list), \
+ "task create_image only supports a list or dictionary for configuration"
+
+ if isinstance(config, dict):
+ images = config.items()
+ else:
+ images = [(role, None) for role in config]
+
+ testdir = teuthology.get_testdir(ctx)
+ for role, properties in images:
+ if properties is None:
+ properties = {}
+ name = properties.get('image_name', default_image_name(role))
+ size = properties.get('image_size', 10240)
+ fmt = properties.get('image_format', 1)
+ (remote,) = ctx.cluster.only(role).remotes.keys()
+ log.info('Creating image {name} with size {size}'.format(name=name,
+ size=size))
+ args = [
+ 'adjust-ulimits',
+ 'ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'rbd',
+ '-p', 'rbd',
+ 'create',
+ '--size', str(size),
+ name,
+ ]
+ # omit format option if using the default (format 1)
+ # since old versions of don't support it
+ if int(fmt) != 1:
+ args += ['--format', str(fmt)]
+ remote.run(args=args)
+ try:
+ yield
+ finally:
+ log.info('Deleting rbd images...')
+ for role, properties in images:
+ if properties is None:
+ properties = {}
+ name = properties.get('image_name', default_image_name(role))
+ (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote.run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'rbd',
+ '-p', 'rbd',
+ 'rm',
+ name,
+ ],
+ )
+
+@contextlib.contextmanager
+def modprobe(ctx, config):
+ """
+ Load the rbd kernel module..
+
+ For example::
+
+ tasks:
+ - ceph:
+ - rbd.create_image: [client.0]
+ - rbd.modprobe: [client.0]
+ """
+ log.info('Loading rbd kernel module...')
+ for role in config:
+ (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote.run(
+ args=[
+ 'sudo',
+ 'modprobe',
+ 'rbd',
+ ],
+ )
+ try:
+ yield
+ finally:
+ log.info('Unloading rbd kernel module...')
+ for role in config:
+ (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote.run(
+ args=[
+ 'sudo',
+ 'modprobe',
+ '-r',
+ 'rbd',
+ # force errors to be ignored; necessary if more
+ # than one device was created, which may mean
+ # the module isn't quite ready to go the first
+ # time through.
+ run.Raw('||'),
+ 'true',
+ ],
+ )
+
+@contextlib.contextmanager
+def dev_create(ctx, config):
+ """
+ Map block devices to rbd images.
+
+ For example::
+
+ tasks:
+ - ceph:
+ - rbd.create_image: [client.0]
+ - rbd.modprobe: [client.0]
+ - rbd.dev_create:
+ client.0: testimage.client.0
+ """
+ assert isinstance(config, dict) or isinstance(config, list), \
+ "task dev_create only supports a list or dictionary for configuration"
+
+ if isinstance(config, dict):
+ role_images = config.items()
+ else:
+ role_images = [(role, None) for role in config]
+
+ log.info('Creating rbd block devices...')
+
+ testdir = teuthology.get_testdir(ctx)
+
+ for role, image in role_images:
+ if image is None:
+ image = default_image_name(role)
+ (remote,) = ctx.cluster.only(role).remotes.keys()
+
+ remote.run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'rbd',
+ '--user', role.rsplit('.')[-1],
+ '-p', 'rbd',
+ 'map',
+ image,
+ run.Raw('&&'),
+ # wait for the symlink to be created by udev
+ 'while', 'test', '!', '-e', '/dev/rbd/rbd/{image}'.format(image=image), run.Raw(';'), 'do',
+ 'sleep', '1', run.Raw(';'),
+ 'done',
+ ],
+ )
+ try:
+ yield
+ finally:
+ log.info('Unmapping rbd devices...')
+ for role, image in role_images:
+ if image is None:
+ image = default_image_name(role)
+ (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote.run(
+ args=[
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'rbd',
+ '-p', 'rbd',
+ 'unmap',
+ '/dev/rbd/rbd/{imgname}'.format(imgname=image),
+ run.Raw('&&'),
+ # wait for the symlink to be deleted by udev
+ 'while', 'test', '-e', '/dev/rbd/rbd/{image}'.format(image=image),
+ run.Raw(';'),
+ 'do',
+ 'sleep', '1', run.Raw(';'),
+ 'done',
+ ],
+ )
+
+
+def rbd_devname_rtn(ctx, image):
+ return '/dev/rbd/rbd/{image}'.format(image=image)
+
+def canonical_path(ctx, role, path):
+ """
+ Determine the canonical path for a given path on the host
+ representing the given role. A canonical path contains no
+ . or .. components, and includes no symbolic links.
+ """
+ version_fp = StringIO()
+ ctx.cluster.only(role).run(
+ args=[ 'readlink', '-f', path ],
+ stdout=version_fp,
+ )
+ canonical_path = version_fp.getvalue().rstrip('\n')
+ version_fp.close()
+ return canonical_path
+
+@contextlib.contextmanager
+def run_xfstests(ctx, config):
+ """
+ Run xfstests over specified devices.
+
+ Warning: both the test and scratch devices specified will be
+ overwritten. Normally xfstests modifies (but does not destroy)
+ the test device, but for now the run script used here re-makes
+ both filesystems.
+
+ Note: Only one instance of xfstests can run on a single host at
+ a time, although this is not enforced.
+
+ This task in its current form needs some improvement. For
+ example, it assumes all roles provided in the config are
+ clients, and that the config provided is a list of key/value
+ pairs. For now please use the xfstests() interface, below.
+
+ For example::
+
+ tasks:
+ - ceph:
+ - rbd.run_xfstests:
+ client.0:
+ count: 2
+ test_dev: 'test_dev'
+ scratch_dev: 'scratch_dev'
+ fs_type: 'xfs'
+ tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48'
+ """
+ with parallel() as p:
+ for role, properties in config.items():
+ p.spawn(run_xfstests_one_client, ctx, role, properties)
+ yield
+
+def run_xfstests_one_client(ctx, role, properties):
+ """
+ Spawned routine to handle xfs tests for a single client
+ """
+ testdir = teuthology.get_testdir(ctx)
+ try:
+ count = properties.get('count')
+ test_dev = properties.get('test_dev')
+ assert test_dev is not None, \
+ "task run_xfstests requires test_dev to be defined"
+ test_dev = canonical_path(ctx, role, test_dev)
+
+ scratch_dev = properties.get('scratch_dev')
+ assert scratch_dev is not None, \
+ "task run_xfstests requires scratch_dev to be defined"
+ scratch_dev = canonical_path(ctx, role, scratch_dev)
+
+ fs_type = properties.get('fs_type')
+ tests = properties.get('tests')
+
+ (remote,) = ctx.cluster.only(role).remotes.keys()
+
+ # Fetch the test script
+ test_root = teuthology.get_testdir(ctx)
+ test_script = 'run_xfstests.sh'
+ test_path = os.path.join(test_root, test_script)
+
+ git_branch = 'master'
+ test_url = 'https://raw.github.com/ceph/ceph/{branch}/qa/{script}'.format(branch=git_branch, script=test_script)
+ # test_url = 'http://ceph.newdream.net/git/?p=ceph.git;a=blob_plain;hb=refs/heads/{branch};f=qa/{script}'.format(branch=git_branch, script=test_script)
+
+ log.info('Fetching {script} for {role} from {url}'.format(script=test_script,
+ role=role,
+ url=test_url))
+ args = [ 'wget', '-O', test_path, '--', test_url ]
+ remote.run(args=args)
+
+ log.info('Running xfstests on {role}:'.format(role=role))
+ log.info(' iteration count: {count}:'.format(count=count))
+ log.info(' test device: {dev}'.format(dev=test_dev))
+ log.info(' scratch device: {dev}'.format(dev=scratch_dev))
+ log.info(' using fs_type: {fs_type}'.format(fs_type=fs_type))
+ log.info(' tests to run: {tests}'.format(tests=tests))
+
+ # Note that the device paths are interpreted using
+ # readlink -f <path> in order to get their canonical
+ # pathname (so it matches what the kernel remembers).
+ args = [
+ '/usr/bin/sudo',
+ 'TESTDIR={tdir}'.format(tdir=testdir),
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '/bin/bash',
+ test_path,
+ '-c', str(count),
+ '-f', fs_type,
+ '-t', test_dev,
+ '-s', scratch_dev,
+ ]
+ if tests:
+ args.append(tests)
+ remote.run(args=args, logger=log.getChild(role))
+ finally:
+ log.info('Removing {script} on {role}'.format(script=test_script,
+ role=role))
+ remote.run(args=['rm', '-f', test_path])
+
+@contextlib.contextmanager
+def xfstests(ctx, config):
+ """
+ Run xfstests over rbd devices. This interface sets up all
+ required configuration automatically if not otherwise specified.
+ Note that only one instance of xfstests can run on a single host
+ at a time. By default, the set of tests specified is run once.
+ If a (non-zero) count value is supplied, the complete set of
+ tests will be run that number of times.
+
+ For example::
+
+ tasks:
+ - ceph:
+ # Image sizes are in MB
+ - rbd.xfstests:
+ client.0:
+ count: 3
+ test_image: 'test_image'
+ test_size: 250
+ test_format: 2
+ scratch_image: 'scratch_image'
+ scratch_size: 250
+ scratch_format: 1
+ fs_type: 'xfs'
+ tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48'
+ """
+ if config is None:
+ config = { 'all': None }
+ assert isinstance(config, dict) or isinstance(config, list), \
+ "task xfstests only supports a list or dictionary for configuration"
+ if isinstance(config, dict):
+ config = teuthology.replace_all_with_clients(ctx.cluster, config)
+ runs = config.items()
+ else:
+ runs = [(role, None) for role in config]
+
+ running_xfstests = {}
+ for role, properties in runs:
+ assert role.startswith('client.'), \
+ "task xfstests can only run on client nodes"
+ for host, roles_for_host in ctx.cluster.remotes.items():
+ if role in roles_for_host:
+ assert host not in running_xfstests, \
+ "task xfstests allows only one instance at a time per host"
+ running_xfstests[host] = True
+
+ images_config = {}
+ scratch_config = {}
+ modprobe_config = {}
+ image_map_config = {}
+ scratch_map_config = {}
+ xfstests_config = {}
+ for role, properties in runs:
+ if properties is None:
+ properties = {}
+
+ test_image = properties.get('test_image', 'test_image.{role}'.format(role=role))
+ test_size = properties.get('test_size', 2000) # 2G
+ test_fmt = properties.get('test_format', 1)
+ scratch_image = properties.get('scratch_image', 'scratch_image.{role}'.format(role=role))
+ scratch_size = properties.get('scratch_size', 10000) # 10G
+ scratch_fmt = properties.get('scratch_format', 1)
+
+ images_config[role] = dict(
+ image_name=test_image,
+ image_size=test_size,
+ image_format=test_fmt,
+ )
+
+ scratch_config[role] = dict(
+ image_name=scratch_image,
+ image_size=scratch_size,
+ image_format=scratch_fmt,
+ )
+
+ xfstests_config[role] = dict(
+ count=properties.get('count', 1),
+ test_dev='/dev/rbd/rbd/{image}'.format(image=test_image),
+ scratch_dev='/dev/rbd/rbd/{image}'.format(image=scratch_image),
+ fs_type=properties.get('fs_type', 'xfs'),
+ tests=properties.get('tests'),
+ )
+
+ log.info('Setting up xfstests using RBD images:')
+ log.info(' test ({size} MB): {image}'.format(size=test_size,
+ image=test_image))
+ log.info(' scratch ({size} MB): {image}'.format(size=scratch_size,
+ image=scratch_image))
+ modprobe_config[role] = None
+ image_map_config[role] = test_image
+ scratch_map_config[role] = scratch_image
+
+ with contextutil.nested(
+ lambda: create_image(ctx=ctx, config=images_config),
+ lambda: create_image(ctx=ctx, config=scratch_config),
+ lambda: modprobe(ctx=ctx, config=modprobe_config),
+ lambda: dev_create(ctx=ctx, config=image_map_config),
+ lambda: dev_create(ctx=ctx, config=scratch_map_config),
+ lambda: run_xfstests(ctx=ctx, config=xfstests_config),
+ ):
+ yield
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Create and mount an rbd image.
+
+ For example, you can specify which clients to run on::
+
+ tasks:
+ - ceph:
+ - rbd: [client.0, client.1]
+
+ There are a few image options::
+
+ tasks:
+ - ceph:
+ - rbd:
+ client.0: # uses defaults
+ client.1:
+ image_name: foo
+ image_size: 2048
+ image_format: 2
+ fs_type: xfs
+
+ To use default options on all clients::
+
+ tasks:
+ - ceph:
+ - rbd:
+ all:
+
+ To create 20GiB images and format them with xfs on all clients::
+
+ tasks:
+ - ceph:
+ - rbd:
+ all:
+ image_size: 20480
+ fs_type: xfs
+ """
+ if config is None:
+ config = { 'all': None }
+ norm_config = config
+ if isinstance(config, dict):
+ norm_config = teuthology.replace_all_with_clients(ctx.cluster, config)
+ if isinstance(norm_config, dict):
+ role_images = {}
+ for role, properties in norm_config.iteritems():
+ if properties is None:
+ properties = {}
+ role_images[role] = properties.get('image_name')
+ else:
+ role_images = norm_config
+
+ log.debug('rbd config is: %s', norm_config)
+
+ with contextutil.nested(
+ lambda: create_image(ctx=ctx, config=norm_config),
+ lambda: modprobe(ctx=ctx, config=norm_config),
+ lambda: dev_create(ctx=ctx, config=role_images),
+ lambda: generic_mkfs(ctx=ctx, config=norm_config,
+ devname_rtn=rbd_devname_rtn),
+ lambda: generic_mount(ctx=ctx, config=role_images,
+ devname_rtn=rbd_devname_rtn),
+ ):
+ yield
--- /dev/null
+"""
+Run fsx on an rbd image
+"""
+import contextlib
+import logging
+
+from teuthology.parallel import parallel
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run fsx on an rbd image.
+
+ Currently this requires running as client.admin
+ to create a pool.
+
+ Specify which clients to run on as a list::
+
+ tasks:
+ ceph:
+ rbd_fsx:
+ clients: [client.0, client.1]
+
+ You can optionally change some properties of fsx:
+
+ tasks:
+ ceph:
+ rbd_fsx:
+ clients: <list of clients>
+ seed: <random seed number, or 0 to use the time>
+ ops: <number of operations to do>
+ size: <maximum image size in bytes>
+ """
+ log.info('starting rbd_fsx...')
+ with parallel() as p:
+ for role in config['clients']:
+ p.spawn(_run_one_client, ctx, config, role)
+ yield
+
+def _run_one_client(ctx, config, role):
+ """Spawned task that runs the client"""
+ testdir = teuthology.get_testdir(ctx)
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote.run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'ceph_test_librbd_fsx',
+ '-d',
+ '-W', '-R', # mmap doesn't work with rbd
+ '-p', str(config.get('progress_interval', 100)), # show progress
+ '-P', '{tdir}/archive'.format(tdir=testdir),
+ '-t', str(config.get('truncbdy',1)),
+ '-l', str(config.get('size', 250000000)),
+ '-S', str(config.get('seed', 0)),
+ '-N', str(config.get('ops', 1000)),
+ 'pool_{pool}'.format(pool=role),
+ 'image_{image}'.format(image=role),
+ ],
+ )
--- /dev/null
+"""
+Recovery system benchmarking
+"""
+from cStringIO import StringIO
+
+import contextlib
+import gevent
+import json
+import logging
+import random
+import time
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Benchmark the recovery system.
+
+ Generates objects with smalliobench, runs it normally to get a
+ baseline performance measurement, then marks an OSD out and reruns
+ to measure performance during recovery.
+
+ The config should be as follows:
+
+ recovery_bench:
+ duration: <seconds for each measurement run>
+ num_objects: <number of objects>
+ io_size: <io size in bytes>
+
+ example:
+
+ tasks:
+ - ceph:
+ - recovery_bench:
+ duration: 60
+ num_objects: 500
+ io_size: 4096
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'recovery_bench task only accepts a dict for configuration'
+
+ log.info('Beginning recovery bench...')
+
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+ while len(manager.get_osd_status()['up']) < num_osds:
+ manager.sleep(10)
+
+ bench_proc = RecoveryBencher(
+ manager,
+ config,
+ )
+ try:
+ yield
+ finally:
+ log.info('joining recovery bencher')
+ bench_proc.do_join()
+
+class RecoveryBencher:
+ """
+ RecoveryBencher
+ """
+ def __init__(self, manager, config):
+ self.ceph_manager = manager
+ self.ceph_manager.wait_for_clean()
+
+ osd_status = self.ceph_manager.get_osd_status()
+ self.osds = osd_status['up']
+
+ self.config = config
+ if self.config is None:
+ self.config = dict()
+
+ else:
+ def tmp(x):
+ """
+ Local wrapper to print value.
+ """
+ print x
+ self.log = tmp
+
+ log.info("spawning thread")
+
+ self.thread = gevent.spawn(self.do_bench)
+
+ def do_join(self):
+ """
+ Join the recovery bencher. This is called after the main
+ task exits.
+ """
+ self.thread.get()
+
+ def do_bench(self):
+ """
+ Do the benchmarking.
+ """
+ duration = self.config.get("duration", 60)
+ num_objects = self.config.get("num_objects", 500)
+ io_size = self.config.get("io_size", 4096)
+
+ osd = str(random.choice(self.osds))
+ (osd_remote,) = self.ceph_manager.ctx.cluster.only('osd.%s' % osd).remotes.iterkeys()
+
+ testdir = teuthology.get_testdir(self.ceph_manager.ctx)
+
+ # create the objects
+ osd_remote.run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'smalliobench'.format(tdir=testdir),
+ '--use-prefix', 'recovery_bench',
+ '--init-only', '1',
+ '--num-objects', str(num_objects),
+ '--io-size', str(io_size),
+ ],
+ wait=True,
+ )
+
+ # baseline bench
+ log.info('non-recovery (baseline)')
+ p = osd_remote.run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'smalliobench',
+ '--use-prefix', 'recovery_bench',
+ '--do-not-init', '1',
+ '--duration', str(duration),
+ '--io-size', str(io_size),
+ ],
+ stdout=StringIO(),
+ stderr=StringIO(),
+ wait=True,
+ )
+ self.process_samples(p.stderr.getvalue())
+
+ self.ceph_manager.raw_cluster_cmd('osd', 'out', osd)
+ time.sleep(5)
+
+ # recovery bench
+ log.info('recovery active')
+ p = osd_remote.run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'smalliobench',
+ '--use-prefix', 'recovery_bench',
+ '--do-not-init', '1',
+ '--duration', str(duration),
+ '--io-size', str(io_size),
+ ],
+ stdout=StringIO(),
+ stderr=StringIO(),
+ wait=True,
+ )
+ self.process_samples(p.stderr.getvalue())
+
+ self.ceph_manager.raw_cluster_cmd('osd', 'in', osd)
+
+ def process_samples(self, input):
+ """
+ Extract samples from the input and process the results
+
+ :param input: input lines in JSON format
+ """
+ lat = {}
+ for line in input.split('\n'):
+ try:
+ sample = json.loads(line)
+ samples = lat.setdefault(sample['type'], [])
+ samples.append(float(sample['latency']))
+ except Exception:
+ pass
+
+ for type in lat:
+ samples = lat[type]
+ samples.sort()
+
+ num = len(samples)
+
+ # median
+ if num & 1 == 1: # odd number of samples
+ median = samples[num / 2]
+ else:
+ median = (samples[num / 2] + samples[num / 2 - 1]) / 2
+
+ # 99%
+ ninety_nine = samples[int(num * 0.99)]
+
+ log.info("%s: median %f, 99%% %f" % (type, median, ninety_nine))
--- /dev/null
+"""
+Lost_unfound
+"""
+import logging
+import ceph_manager
+from teuthology import misc as teuthology
+from util.rados import rados
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Test handling of lost objects.
+
+ A pretty rigid cluseter is brought up andtested by this task
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'lost_unfound task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ while len(manager.get_osd_status()['up']) < 3:
+ manager.sleep(10)
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_clean()
+
+ # something that is always there
+ dummyfile = '/etc/fstab'
+
+ # take an osd out until the very end
+ manager.kill_osd(2)
+ manager.mark_down_osd(2)
+ manager.mark_out_osd(2)
+
+ # kludge to make sure they get a map
+ rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.wait_for_recovery()
+
+ # create old objects
+ for f in range(1, 10):
+ rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'rm', 'existed_%d' % f])
+
+ # delay recovery, and make the pg log very long (to prevent backfill)
+ manager.raw_cluster_cmd(
+ 'tell', 'osd.1',
+ 'injectargs',
+ '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
+ )
+
+ manager.kill_osd(0)
+ manager.mark_down_osd(0)
+
+ for f in range(1, 10):
+ rados(ctx, mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
+
+ # bring osd.0 back up, let it peer, but don't replicate the new
+ # objects...
+ log.info('osd.0 command_args is %s' % 'foo')
+ log.info(ctx.daemons.get_daemon('osd', 0).command_args)
+ ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([
+ '--osd-recovery-delay-start', '1000'
+ ])
+ manager.revive_osd(0)
+ manager.mark_in_osd(0)
+ manager.wait_till_osd_is_up(0)
+
+ manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.wait_till_active()
+
+ # take out osd.1 and the only copy of those objects.
+ manager.kill_osd(1)
+ manager.mark_down_osd(1)
+ manager.mark_out_osd(1)
+ manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
+
+ # bring up osd.2 so that things would otherwise, in theory, recovery fully
+ manager.revive_osd(2)
+ manager.mark_in_osd(2)
+ manager.wait_till_osd_is_up(2)
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_till_active()
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+
+ # verify that there are unfound objects
+ unfound = manager.get_num_unfound_objects()
+ log.info("there are %d unfound objects" % unfound)
+ assert unfound
+
+ # mark stuff lost
+ pgs = manager.get_pg_stats()
+ for pg in pgs:
+ if pg['stat_sum']['num_objects_unfound'] > 0:
+ primary = 'osd.%d' % pg['acting'][0]
+
+ # verify that i can list them direct from the osd
+ log.info('listing missing/lost in %s state %s', pg['pgid'],
+ pg['state']);
+ m = manager.list_pg_missing(pg['pgid'])
+ #log.info('%s' % m)
+ assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
+ num_unfound=0
+ for o in m['objects']:
+ if len(o['locations']) == 0:
+ num_unfound += 1
+ assert m['num_unfound'] == num_unfound
+
+ log.info("reverting unfound in %s on %s", pg['pgid'], primary)
+ manager.raw_cluster_cmd('pg', pg['pgid'],
+ 'mark_unfound_lost', 'delete')
+ else:
+ log.info("no unfound in %s", pg['pgid'])
+
+ manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
+ manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
+ manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
+ manager.wait_for_recovery()
+
+ # verify result
+ for f in range(1, 10):
+ err = rados(ctx, mon, ['-p', 'data', 'get', 'new_%d' % f, '-'])
+ assert err
+ err = rados(ctx, mon, ['-p', 'data', 'get', 'existed_%d' % f, '-'])
+ assert err
+ err = rados(ctx, mon, ['-p', 'data', 'get', 'existing_%d' % f, '-'])
+ assert err
+
+ # see if osd.1 can cope
+ manager.revive_osd(1)
+ manager.mark_in_osd(1)
+ manager.wait_till_osd_is_up(1)
+ manager.wait_for_clean()
--- /dev/null
+import logging
+import time
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def setup(ctx, config):
+ ctx.manager.wait_for_clean()
+ ctx.manager.create_pool("repair_test_pool", 1)
+ return "repair_test_pool"
+
+def teardown(ctx, config, pool):
+ ctx.manager.remove_pool(pool)
+ ctx.manager.wait_for_clean()
+
+def run_test(ctx, config, test):
+ s = setup(ctx, config)
+ test(ctx, config, s)
+ teardown(ctx, config, s)
+
+def choose_primary(ctx):
+ def ret(pool, num):
+ log.info("Choosing primary")
+ return ctx.manager.get_pg_primary(pool, num)
+ return ret
+
+def choose_replica(ctx):
+ def ret(pool, num):
+ log.info("Choosing replica")
+ return ctx.manager.get_pg_replica(pool, num)
+ return ret
+
+def trunc(ctx):
+ def ret(osd, pool, obj):
+ log.info("truncating object")
+ return ctx.manager.osd_admin_socket(
+ osd,
+ ['truncobj', pool, obj, '1'])
+ return ret
+
+def dataerr(ctx):
+ def ret(osd, pool, obj):
+ log.info("injecting data err on object")
+ return ctx.manager.osd_admin_socket(
+ osd,
+ ['injectdataerr', pool, obj])
+ return ret
+
+def mdataerr(ctx):
+ def ret(osd, pool, obj):
+ log.info("injecting mdata err on object")
+ return ctx.manager.osd_admin_socket(
+ osd,
+ ['injectmdataerr', pool, obj])
+ return ret
+
+def omaperr(ctx):
+ def ret(osd, pool, obj):
+ log.info("injecting omap err on object")
+ return ctx.manager.osd_admin_socket(osd, ['setomapval', pool, obj, 'badkey', 'badval']);
+ return ret
+
+def gen_repair_test_1(corrupter, chooser, scrub_type):
+ def ret(ctx, config, pool):
+ log.info("starting repair test type 1")
+ victim_osd = chooser(pool, 0)
+
+ # create object
+ log.info("doing put")
+ ctx.manager.do_put(pool, 'repair_test_obj', '/etc/hosts')
+
+ # corrupt object
+ log.info("corrupting object")
+ corrupter(victim_osd, pool, 'repair_test_obj')
+
+ # verify inconsistent
+ log.info("scrubbing")
+ ctx.manager.do_pg_scrub(pool, 0, scrub_type)
+
+ assert ctx.manager.pg_inconsistent(pool, 0)
+
+ # repair
+ log.info("repairing")
+ ctx.manager.do_pg_scrub(pool, 0, "repair")
+
+ log.info("re-scrubbing")
+ ctx.manager.do_pg_scrub(pool, 0, scrub_type)
+
+ # verify consistent
+ assert not ctx.manager.pg_inconsistent(pool, 0)
+ log.info("done")
+ return ret
+
+def gen_repair_test_2(chooser):
+ def ret(ctx, config, pool):
+ log.info("starting repair test type 2")
+ victim_osd = chooser(pool, 0)
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ # create object
+ log.info("doing put and setomapval")
+ ctx.manager.do_put(pool, 'file1', '/etc/hosts')
+ ctx.manager.do_rados(mon, ['-p', pool, 'setomapval', 'file1', 'key', 'val'])
+ ctx.manager.do_put(pool, 'file2', '/etc/hosts')
+ ctx.manager.do_put(pool, 'file3', '/etc/hosts')
+ ctx.manager.do_put(pool, 'file4', '/etc/hosts')
+ ctx.manager.do_put(pool, 'file5', '/etc/hosts')
+ ctx.manager.do_rados(mon, ['-p', pool, 'setomapval', 'file5', 'key', 'val'])
+ ctx.manager.do_put(pool, 'file6', '/etc/hosts')
+
+ # corrupt object
+ log.info("corrupting object")
+ omaperr(ctx)(victim_osd, pool, 'file1')
+
+ # verify inconsistent
+ log.info("scrubbing")
+ ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub')
+
+ assert ctx.manager.pg_inconsistent(pool, 0)
+
+ # Regression test for bug #4778, should still
+ # be inconsistent after scrub
+ ctx.manager.do_pg_scrub(pool, 0, 'scrub')
+
+ assert ctx.manager.pg_inconsistent(pool, 0)
+
+ # Additional corruptions including 2 types for file1
+ log.info("corrupting more objects")
+ dataerr(ctx)(victim_osd, pool, 'file1')
+ mdataerr(ctx)(victim_osd, pool, 'file2')
+ trunc(ctx)(victim_osd, pool, 'file3')
+ omaperr(ctx)(victim_osd, pool, 'file6')
+
+ # see still inconsistent
+ log.info("scrubbing")
+ ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub')
+
+ assert ctx.manager.pg_inconsistent(pool, 0)
+
+ # repair
+ log.info("repairing")
+ ctx.manager.do_pg_scrub(pool, 0, "repair")
+
+ # Let repair clear inconsistent flag
+ time.sleep(10)
+
+ # verify consistent
+ assert not ctx.manager.pg_inconsistent(pool, 0)
+
+ # In the future repair might determine state of
+ # inconsistency itself, verify with a deep-scrub
+ log.info("scrubbing")
+ ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub')
+
+ # verify consistent
+ assert not ctx.manager.pg_inconsistent(pool, 0)
+
+ log.info("done")
+ return ret
+
+def task(ctx, config):
+ """
+ Test [deep] repair in several situations:
+ Repair [Truncate, Data EIO, MData EIO] on [Primary|Replica]
+
+ The config should be as follows:
+
+ Must include the log-whitelist below
+ Must enable filestore_debug_inject_read_err config
+
+ example:
+
+ tasks:
+ - chef:
+ - install:
+ - ceph:
+ log-whitelist: ['candidate had a read error', 'deep-scrub 0 missing, 1 inconsistent objects', 'deep-scrub 0 missing, 4 inconsistent objects', 'deep-scrub 1 errors', 'deep-scrub 4 errors', '!= known omap_digest', 'repair 0 missing, 1 inconsistent objects', 'repair 0 missing, 4 inconsistent objects', 'repair 1 errors, 1 fixed', 'repair 4 errors, 4 fixed', 'scrub 0 missing, 1 inconsistent', 'scrub 1 errors', 'size 1 != known size']
+ conf:
+ osd:
+ filestore debug inject read err: true
+ - repair_test:
+
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'repair_test task only accepts a dict for config'
+
+ if not hasattr(ctx, 'manager'):
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ ctx.manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager')
+ )
+
+ tests = [
+ gen_repair_test_1(mdataerr(ctx), choose_primary(ctx), "scrub"),
+ gen_repair_test_1(mdataerr(ctx), choose_replica(ctx), "scrub"),
+ gen_repair_test_1(dataerr(ctx), choose_primary(ctx), "deep-scrub"),
+ gen_repair_test_1(dataerr(ctx), choose_replica(ctx), "deep-scrub"),
+ gen_repair_test_1(trunc(ctx), choose_primary(ctx), "scrub"),
+ gen_repair_test_1(trunc(ctx), choose_replica(ctx), "scrub"),
+ gen_repair_test_2(choose_primary(ctx)),
+ gen_repair_test_2(choose_replica(ctx))
+ ]
+
+ for test in tests:
+ run_test(ctx, config, test)
--- /dev/null
+"""
+Rest Api
+"""
+import logging
+import contextlib
+import time
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.orchestra import run
+from tasks.ceph import DaemonGroup
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def run_rest_api_daemon(ctx, api_clients):
+ """
+ Wrapper starts the rest api daemons
+ """
+ if not hasattr(ctx, 'daemons'):
+ ctx.daemons = DaemonGroup()
+ remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+ for rems, roles in remotes.iteritems():
+ for whole_id_ in roles:
+ if whole_id_ in api_clients:
+ id_ = whole_id_[len('clients'):]
+ run_cmd = [
+ 'sudo',
+ 'daemon-helper',
+ 'kill',
+ 'ceph-rest-api',
+ '-n',
+ 'client.rest{id}'.format(id=id_), ]
+ cl_rest_id = 'client.rest{id}'.format(id=id_)
+ ctx.daemons.add_daemon(rems, 'restapi',
+ cl_rest_id,
+ args=run_cmd,
+ logger=log.getChild(cl_rest_id),
+ stdin=run.PIPE,
+ wait=False,
+ )
+ for i in range(1, 12):
+ log.info('testing for ceph-rest-api try {0}'.format(i))
+ run_cmd = [
+ 'wget',
+ '-O',
+ '/dev/null',
+ '-q',
+ 'http://localhost:5000/api/v0.1/status'
+ ]
+ proc = rems.run(
+ args=run_cmd,
+ check_status=False
+ )
+ if proc.exitstatus == 0:
+ break
+ time.sleep(5)
+ if proc.exitstatus != 0:
+ raise RuntimeError('Cannot contact ceph-rest-api')
+ try:
+ yield
+
+ finally:
+ """
+ TO DO: destroy daemons started -- modify iter_daemons_of_role
+ """
+ teuthology.stop_daemons_of_type(ctx, 'restapi')
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Start up rest-api.
+
+ To start on on all clients::
+
+ tasks:
+ - ceph:
+ - rest-api:
+
+ To only run on certain clients::
+
+ tasks:
+ - ceph:
+ - rest-api: [client.0, client.3]
+
+ or
+
+ tasks:
+ - ceph:
+ - rest-api:
+ client.0:
+ client.3:
+
+ The general flow of things here is:
+ 1. Find clients on which rest-api is supposed to run (api_clients)
+ 2. Generate keyring values
+ 3. Start up ceph-rest-api daemons
+ On cleanup:
+ 4. Stop the daemons
+ 5. Delete keyring value files.
+ """
+ api_clients = []
+ remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+ log.info(remotes)
+ if config == None:
+ api_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ else:
+ api_clients = config
+ log.info(api_clients)
+ testdir = teuthology.get_testdir(ctx)
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+ for rems, roles in remotes.iteritems():
+ for whole_id_ in roles:
+ if whole_id_ in api_clients:
+ id_ = whole_id_[len('client.'):]
+ keyring = '/etc/ceph/ceph.client.rest{id}.keyring'.format(
+ id=id_)
+ rems.run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ '--create-keyring',
+ '--gen-key',
+ '--name=client.rest{id}'.format(id=id_),
+ '--set-uid=0',
+ '--cap', 'mon', 'allow *',
+ '--cap', 'osd', 'allow *',
+ '--cap', 'mds', 'allow',
+ keyring,
+ run.Raw('&&'),
+ 'sudo',
+ 'chmod',
+ '0644',
+ keyring,
+ ],
+ )
+ rems.run(
+ args=[
+ 'sudo',
+ 'sh',
+ '-c',
+ run.Raw("'"),
+ "echo",
+ '[client.rest{id}]'.format(id=id_),
+ run.Raw('>>'),
+ "/etc/ceph/ceph.conf",
+ run.Raw("'")
+ ]
+ )
+ rems.run(
+ args=[
+ 'sudo',
+ 'sh',
+ '-c',
+ run.Raw("'"),
+ 'echo',
+ 'restapi',
+ 'keyring',
+ '=',
+ '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
+ run.Raw('>>'),
+ '/etc/ceph/ceph.conf',
+ run.Raw("'"),
+ ]
+ )
+ rems.run(
+ args=[
+ 'ceph',
+ 'auth',
+ 'import',
+ '-i',
+ '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
+ ]
+ )
+ with contextutil.nested(
+ lambda: run_rest_api_daemon(ctx=ctx, api_clients=api_clients),):
+ yield
+
--- /dev/null
+"""
+Daemon restart
+"""
+import logging
+import pipes
+
+from teuthology import misc as teuthology
+from teuthology.orchestra import run as tor
+
+from teuthology.orchestra import run
+log = logging.getLogger(__name__)
+
+def restart_daemon(ctx, config, role, id_, *args):
+ """
+ Handle restart (including the execution of the command parameters passed)
+ """
+ log.info('Restarting {r}.{i} daemon...'.format(r=role, i=id_))
+ daemon = ctx.daemons.get_daemon(role, id_)
+ log.debug('Waiting for exit of {r}.{i} daemon...'.format(r=role, i=id_))
+ try:
+ daemon.wait_for_exit()
+ except tor.CommandFailedError as e:
+ log.debug('Command Failed: {e}'.format(e=e))
+ if len(args) > 0:
+ confargs = ['--{k}={v}'.format(k=k, v=v) for k,v in zip(args[0::2], args[1::2])]
+ log.debug('Doing restart of {r}.{i} daemon with args: {a}...'.format(r=role, i=id_, a=confargs))
+ daemon.restart_with_args(confargs)
+ else:
+ log.debug('Doing restart of {r}.{i} daemon...'.format(r=role, i=id_))
+ daemon.restart()
+
+def get_tests(ctx, config, role, remote, testdir):
+ """Download restart tests"""
+ srcdir = '{tdir}/restart.{role}'.format(tdir=testdir, role=role)
+
+ refspec = config.get('branch')
+ if refspec is None:
+ refspec = config.get('sha1')
+ if refspec is None:
+ refspec = config.get('tag')
+ if refspec is None:
+ refspec = 'HEAD'
+ log.info('Pulling restart qa/workunits from ref %s', refspec)
+
+ remote.run(
+ logger=log.getChild(role),
+ args=[
+ 'mkdir', '--', srcdir,
+ run.Raw('&&'),
+ 'git',
+ 'archive',
+ '--remote=git://git.ceph.com/ceph.git',
+ '%s:qa/workunits' % refspec,
+ run.Raw('|'),
+ 'tar',
+ '-C', srcdir,
+ '-x',
+ '-f-',
+ run.Raw('&&'),
+ 'cd', '--', srcdir,
+ run.Raw('&&'),
+ 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
+ run.Raw('&&'),
+ 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
+ run.Raw('>{tdir}/restarts.list'.format(tdir=testdir)),
+ ],
+ )
+ restarts = sorted(teuthology.get_file(
+ remote,
+ '{tdir}/restarts.list'.format(tdir=testdir)).split('\0'))
+ return (srcdir, restarts)
+
+def task(ctx, config):
+ """
+ Execute commands and allow daemon restart with config options.
+ Each process executed can output to stdout restart commands of the form:
+ restart <role> <id> <conf_key1> <conf_value1> <conf_key2> <conf_value2>
+ This will restart the daemon <role>.<id> with the specified config values once
+ by modifying the conf file with those values, and then replacing the old conf file
+ once the daemon is restarted.
+ This task does not kill a running daemon, it assumes the daemon will abort on an
+ assert specified in the config.
+
+ tasks:
+ - install:
+ - ceph:
+ - restart:
+ exec:
+ client.0:
+ - test_backtraces.py
+
+ """
+ assert isinstance(config, dict), "task kill got invalid config"
+
+ testdir = teuthology.get_testdir(ctx)
+
+ try:
+ assert 'exec' in config, "config requires exec key with <role>: <command> entries"
+ for role, task in config['exec'].iteritems():
+ log.info('restart for role {r}'.format(r=role))
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ srcdir, restarts = get_tests(ctx, config, role, remote, testdir)
+ log.info('Running command on role %s host %s', role, remote.name)
+ spec = '{spec}'.format(spec=task[0])
+ log.info('Restarts list: %s', restarts)
+ log.info('Spec is %s', spec)
+ to_run = [w for w in restarts if w == task or w.find(spec) != -1]
+ log.info('To run: %s', to_run)
+ for c in to_run:
+ log.info('Running restart script %s...', c)
+ args = [
+ run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
+ ]
+ env = config.get('env')
+ if env is not None:
+ for var, val in env.iteritems():
+ quoted_val = pipes.quote(val)
+ env_arg = '{var}={val}'.format(var=var, val=quoted_val)
+ args.append(run.Raw(env_arg))
+ args.extend([
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{srcdir}/{c}'.format(
+ srcdir=srcdir,
+ c=c,
+ ),
+ ])
+ proc = remote.run(
+ args=args,
+ stdout=tor.PIPE,
+ stdin=tor.PIPE,
+ stderr=log,
+ wait=False,
+ )
+ log.info('waiting for a command from script')
+ while True:
+ l = proc.stdout.readline()
+ if not l or l == '':
+ break
+ log.debug('script command: {c}'.format(c=l))
+ ll = l.strip()
+ cmd = ll.split(' ')
+ if cmd[0] == "done":
+ break
+ assert cmd[0] == 'restart', "script sent invalid command request to kill task"
+ # cmd should be: restart <role> <id> <conf_key1> <conf_value1> <conf_key2> <conf_value2>
+ # or to clear, just: restart <role> <id>
+ restart_daemon(ctx, config, cmd[1], cmd[2], *cmd[3:])
+ proc.stdin.writelines(['restarted\n'])
+ proc.stdin.flush()
+ try:
+ proc.wait()
+ except tor.CommandFailedError:
+ raise Exception('restart task got non-zero exit status from script: {s}'.format(s=c))
+ finally:
+ log.info('Finishing %s on %s...', task, role)
+ remote.run(
+ logger=log.getChild(role),
+ args=[
+ 'rm', '-rf', '--', '{tdir}/restarts.list'.format(tdir=testdir), srcdir,
+ ],
+ )
--- /dev/null
+"""
+rgw routines
+"""
+import argparse
+import contextlib
+import json
+import logging
+import os
+
+from cStringIO import StringIO
+
+from teuthology.orchestra import run
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.orchestra.run import CommandFailedError
+from util.rgw import rgwadmin
+from util.rados import (rados, create_ec_pool,
+ create_replicated_pool,
+ create_cache_pool)
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def create_apache_dirs(ctx, config):
+ """
+ Remotely create apache directories. Delete when finished.
+ """
+ log.info('Creating apache directories...')
+ testdir = teuthology.get_testdir(ctx)
+ for client in config.iterkeys():
+ ctx.cluster.only(client).run(
+ args=[
+ 'mkdir',
+ '-p',
+ '{tdir}/apache/htdocs.{client}'.format(tdir=testdir,
+ client=client),
+ '{tdir}/apache/tmp.{client}/fastcgi_sock'.format(
+ tdir=testdir,
+ client=client),
+ run.Raw('&&'),
+ 'mkdir',
+ '{tdir}/archive/apache.{client}'.format(tdir=testdir,
+ client=client),
+ ],
+ )
+ try:
+ yield
+ finally:
+ log.info('Cleaning up apache directories...')
+ for client in config.iterkeys():
+ ctx.cluster.only(client).run(
+ args=[
+ 'rm',
+ '-rf',
+ '{tdir}/apache/tmp.{client}'.format(tdir=testdir,
+ client=client),
+ run.Raw('&&'),
+ 'rmdir',
+ '{tdir}/apache/htdocs.{client}'.format(tdir=testdir,
+ client=client),
+ ],
+ )
+
+ for client in config.iterkeys():
+ ctx.cluster.only(client).run(
+ args=[
+ 'rmdir',
+ '{tdir}/apache'.format(tdir=testdir),
+ ],
+ check_status=False, # only need to remove once per host
+ )
+
+
+@contextlib.contextmanager
+def ship_apache_configs(ctx, config, role_endpoints):
+ """
+ Ship apache config and rgw.fgci to all clients. Clean up on termination
+ """
+ assert isinstance(config, dict)
+ assert isinstance(role_endpoints, dict)
+ testdir = teuthology.get_testdir(ctx)
+ log.info('Shipping apache config and rgw.fcgi...')
+ src = os.path.join(os.path.dirname(__file__), 'apache.conf.template')
+ for client, conf in config.iteritems():
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ system_type = teuthology.get_system_type(remote)
+ if not conf:
+ conf = {}
+ idle_timeout = conf.get('idle_timeout', ctx.rgw.default_idle_timeout)
+ if system_type == 'deb':
+ mod_path = '/usr/lib/apache2/modules'
+ print_continue = 'on'
+ user = 'www-data'
+ group = 'www-data'
+ apache24_modconfig = '''
+ IncludeOptional /etc/apache2/mods-available/mpm_event.conf
+ IncludeOptional /etc/apache2/mods-available/mpm_event.load
+'''
+ else:
+ mod_path = '/usr/lib64/httpd/modules'
+ print_continue = 'off'
+ user = 'apache'
+ group = 'apache'
+ apache24_modconfig = \
+ 'IncludeOptional /etc/httpd/conf.modules.d/00-mpm.conf'
+ host, port = role_endpoints[client]
+ with file(src, 'rb') as f:
+ conf = f.read().format(
+ testdir=testdir,
+ mod_path=mod_path,
+ print_continue=print_continue,
+ host=host,
+ port=port,
+ client=client,
+ idle_timeout=idle_timeout,
+ user=user,
+ group=group,
+ apache24_modconfig=apache24_modconfig,
+ )
+ teuthology.write_file(
+ remote=remote,
+ path='{tdir}/apache/apache.{client}.conf'.format(
+ tdir=testdir,
+ client=client),
+ data=conf,
+ )
+ teuthology.write_file(
+ remote=remote,
+ path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
+ tdir=testdir,
+ client=client),
+ data="""#!/bin/sh
+ulimit -c unlimited
+exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring --rgw-socket-path {tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock
+
+""".format(tdir=testdir, client=client)
+ )
+ remote.run(
+ args=[
+ 'chmod',
+ 'a=rx',
+ '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir,
+ client=client),
+ ],
+ )
+ try:
+ yield
+ finally:
+ log.info('Removing apache config...')
+ for client in config.iterkeys():
+ ctx.cluster.only(client).run(
+ args=[
+ 'rm',
+ '-f',
+ '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
+ client=client),
+ run.Raw('&&'),
+ 'rm',
+ '-f',
+ '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
+ tdir=testdir,
+ client=client),
+ ],
+ )
+
+
+@contextlib.contextmanager
+def start_rgw(ctx, config):
+ """
+ Start rgw on remote sites.
+ """
+ log.info('Starting rgw...')
+ testdir = teuthology.get_testdir(ctx)
+ for client in config.iterkeys():
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+
+ client_config = config.get(client)
+ if client_config is None:
+ client_config = {}
+ log.info("rgw %s config is %s", client, client_config)
+ id_ = client.split('.', 1)[1]
+ log.info('client {client} is id {id}'.format(client=client, id=id_))
+ cmd_prefix = [
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'daemon-helper',
+ 'term',
+ ]
+
+ rgw_cmd = ['radosgw']
+
+ if ctx.rgw.frontend == 'apache':
+ rgw_cmd.extend([
+ '--rgw-socket-path',
+ '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format(
+ tdir=testdir,
+ client=client,
+ ),
+ ])
+ elif ctx.rgw.frontend == 'civetweb':
+ host, port = ctx.rgw.role_endpoints[client]
+ rgw_cmd.extend([
+ '--rgw-frontends',
+ 'civetweb port={port}'.format(port=port),
+ ])
+
+ rgw_cmd.extend([
+ '-n', client,
+ '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client),
+ '--log-file',
+ '/var/log/ceph/rgw.{client}.log'.format(client=client),
+ '--rgw_ops_log_socket_path',
+ '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
+ client=client),
+ '--foreground',
+ run.Raw('|'),
+ 'sudo',
+ 'tee',
+ '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir,
+ client=client),
+ run.Raw('2>&1'),
+ ])
+
+ if client_config.get('valgrind'):
+ cmd_prefix = teuthology.get_valgrind_args(
+ testdir,
+ client,
+ cmd_prefix,
+ client_config.get('valgrind')
+ )
+
+ run_cmd = list(cmd_prefix)
+ run_cmd.extend(rgw_cmd)
+
+ ctx.daemons.add_daemon(
+ remote, 'rgw', client,
+ args=run_cmd,
+ logger=log.getChild(client),
+ stdin=run.PIPE,
+ wait=False,
+ )
+
+ try:
+ yield
+ finally:
+ teuthology.stop_daemons_of_type(ctx, 'rgw')
+ for client in config.iterkeys():
+ ctx.cluster.only(client).run(
+ args=[
+ 'rm',
+ '-f',
+ '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
+ client=client),
+ ],
+ )
+
+
+@contextlib.contextmanager
+def start_apache(ctx, config):
+ """
+ Start apache on remote sites.
+ """
+ log.info('Starting apache...')
+ testdir = teuthology.get_testdir(ctx)
+ apaches = {}
+ for client in config.iterkeys():
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ system_type = teuthology.get_system_type(remote)
+ if system_type == 'deb':
+ apache_name = 'apache2'
+ else:
+ try:
+ remote.run(
+ args=[
+ 'stat',
+ '/usr/sbin/httpd.worker',
+ ],
+ )
+ apache_name = '/usr/sbin/httpd.worker'
+ except CommandFailedError:
+ apache_name = '/usr/sbin/httpd'
+
+ proc = remote.run(
+ args=[
+ 'adjust-ulimits',
+ 'daemon-helper',
+ 'kill',
+ apache_name,
+ '-X',
+ '-f',
+ '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
+ client=client),
+ ],
+ logger=log.getChild(client),
+ stdin=run.PIPE,
+ wait=False,
+ )
+ apaches[client] = proc
+
+ try:
+ yield
+ finally:
+ log.info('Stopping apache...')
+ for client, proc in apaches.iteritems():
+ proc.stdin.close()
+
+ run.wait(apaches.itervalues())
+
+
+def extract_user_info(client_config):
+ """
+ Extract user info from the client config specified. Returns a dict
+ that includes system key information.
+ """
+ # test if there isn't a system user or if there isn't a name for that
+ # user, return None
+ if ('system user' not in client_config or
+ 'name' not in client_config['system user']):
+ return None
+
+ user_info = dict()
+ user_info['system_key'] = dict(
+ user=client_config['system user']['name'],
+ access_key=client_config['system user']['access key'],
+ secret_key=client_config['system user']['secret key'],
+ )
+ return user_info
+
+
+def extract_zone_info(ctx, client, client_config):
+ """
+ Get zone information.
+ :param client: dictionary of client information
+ :param client_config: dictionary of client configuration information
+ :returns: zone extracted from client and client_config information
+ """
+ ceph_config = ctx.ceph.conf.get('global', {})
+ ceph_config.update(ctx.ceph.conf.get('client', {}))
+ ceph_config.update(ctx.ceph.conf.get(client, {}))
+ for key in ['rgw zone', 'rgw region', 'rgw zone root pool']:
+ assert key in ceph_config, \
+ 'ceph conf must contain {key} for {client}'.format(key=key,
+ client=client)
+ region = ceph_config['rgw region']
+ zone = ceph_config['rgw zone']
+ zone_info = dict()
+ for key in ['rgw control pool', 'rgw gc pool', 'rgw log pool',
+ 'rgw intent log pool', 'rgw usage log pool',
+ 'rgw user keys pool', 'rgw user email pool',
+ 'rgw user swift pool', 'rgw user uid pool',
+ 'rgw domain root']:
+ new_key = key.split(' ', 1)[1]
+ new_key = new_key.replace(' ', '_')
+
+ if key in ceph_config:
+ value = ceph_config[key]
+ log.debug('{key} specified in ceph_config ({val})'.format(
+ key=key, val=value))
+ zone_info[new_key] = value
+ else:
+ zone_info[new_key] = '.' + region + '.' + zone + '.' + new_key
+
+ index_pool = '.' + region + '.' + zone + '.' + 'index_pool'
+ data_pool = '.' + region + '.' + zone + '.' + 'data_pool'
+ data_extra_pool = '.' + region + '.' + zone + '.' + 'data_extra_pool'
+
+ zone_info['placement_pools'] = [{'key': 'default_placement',
+ 'val': {'index_pool': index_pool,
+ 'data_pool': data_pool,
+ 'data_extra_pool': data_extra_pool}
+ }]
+
+ # these keys are meant for the zones argument in the region info. We
+ # insert them into zone_info with a different format and then remove them
+ # in the fill_in_endpoints() method
+ for key in ['rgw log meta', 'rgw log data']:
+ if key in ceph_config:
+ zone_info[key] = ceph_config[key]
+
+ # these keys are meant for the zones argument in the region info. We
+ # insert them into zone_info with a different format and then remove them
+ # in the fill_in_endpoints() method
+ for key in ['rgw log meta', 'rgw log data']:
+ if key in ceph_config:
+ zone_info[key] = ceph_config[key]
+
+ return region, zone, zone_info
+
+
+def extract_region_info(region, region_info):
+ """
+ Extract region information from the region_info parameter, using get
+ to set default values.
+
+ :param region: name of the region
+ :param region_info: region information (in dictionary form).
+ :returns: dictionary of region information set from region_info, using
+ default values for missing fields.
+ """
+ assert isinstance(region_info['zones'], list) and region_info['zones'], \
+ 'zones must be a non-empty list'
+ return dict(
+ name=region,
+ api_name=region_info.get('api name', region),
+ is_master=region_info.get('is master', False),
+ log_meta=region_info.get('log meta', False),
+ log_data=region_info.get('log data', False),
+ master_zone=region_info.get('master zone', region_info['zones'][0]),
+ placement_targets=region_info.get('placement targets',
+ [{'name': 'default_placement',
+ 'tags': []}]),
+ default_placement=region_info.get('default placement',
+ 'default_placement'),
+ )
+
+
+def assign_ports(ctx, config):
+ """
+ Assign port numberst starting with port 7280.
+ """
+ port = 7280
+ role_endpoints = {}
+ for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for role in roles_for_host:
+ if role in config:
+ role_endpoints[role] = (remote.name.split('@')[1], port)
+ port += 1
+
+ return role_endpoints
+
+
+def fill_in_endpoints(region_info, role_zones, role_endpoints):
+ """
+ Iterate through the list of role_endpoints, filling in zone information
+
+ :param region_info: region data
+ :param role_zones: region and zone information.
+ :param role_endpoints: endpoints being used
+ """
+ for role, (host, port) in role_endpoints.iteritems():
+ region, zone, zone_info, _ = role_zones[role]
+ host, port = role_endpoints[role]
+ endpoint = 'http://{host}:{port}/'.format(host=host, port=port)
+ # check if the region specified under client actually exists
+ # in region_info (it should, if properly configured).
+ # If not, throw a reasonable error
+ if region not in region_info:
+ raise Exception(
+ 'Region: {region} was specified but no corresponding'
+ ' entry was found under \'regions\''.format(region=region))
+
+ region_conf = region_info[region]
+ region_conf.setdefault('endpoints', [])
+ region_conf['endpoints'].append(endpoint)
+
+ # this is the payload for the 'zones' field in the region field
+ zone_payload = dict()
+ zone_payload['endpoints'] = [endpoint]
+ zone_payload['name'] = zone
+
+ # Pull the log meta and log data settings out of zone_info, if they
+ # exist, then pop them as they don't actually belong in the zone info
+ for key in ['rgw log meta', 'rgw log data']:
+ new_key = key.split(' ', 1)[1]
+ new_key = new_key.replace(' ', '_')
+
+ if key in zone_info:
+ value = zone_info.pop(key)
+ else:
+ value = 'false'
+
+ zone_payload[new_key] = value
+
+ region_conf.setdefault('zones', [])
+ region_conf['zones'].append(zone_payload)
+
+
+@contextlib.contextmanager
+def configure_users(ctx, config, everywhere=False):
+ """
+ Create users by remotely running rgwadmin commands using extracted
+ user information.
+ """
+ log.info('Configuring users...')
+
+ # extract the user info and append it to the payload tuple for the given
+ # client
+ for client, c_config in config.iteritems():
+ if not c_config:
+ continue
+ user_info = extract_user_info(c_config)
+ if not user_info:
+ continue
+
+ # For data sync the master zones and regions must have the
+ # system users of the secondary zones. To keep this simple,
+ # just create the system users on every client if regions are
+ # configured.
+ clients_to_create_as = [client]
+ if everywhere:
+ clients_to_create_as = config.keys()
+ for client_name in clients_to_create_as:
+ log.debug('Creating user {user} on {client}'.format(
+ user=user_info['system_key']['user'], client=client))
+ rgwadmin(ctx, client_name,
+ cmd=[
+ 'user', 'create',
+ '--uid', user_info['system_key']['user'],
+ '--access-key', user_info['system_key']['access_key'],
+ '--secret', user_info['system_key']['secret_key'],
+ '--display-name', user_info['system_key']['user'],
+ '--system',
+ ],
+ check_status=True,
+ )
+
+ yield
+
+
+@contextlib.contextmanager
+def create_nonregion_pools(ctx, config, regions):
+ """Create replicated or erasure coded data pools for rgw."""
+ if regions:
+ yield
+ return
+
+ log.info('creating data pools')
+ for client in config.keys():
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ data_pool = '.rgw.buckets'
+ if ctx.rgw.ec_data_pool:
+ create_ec_pool(remote, data_pool, client, 64)
+ else:
+ create_replicated_pool(remote, data_pool, 64)
+ if ctx.rgw.cache_pools:
+ create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
+ 64*1024*1024)
+ yield
+
+
+@contextlib.contextmanager
+def configure_regions_and_zones(ctx, config, regions, role_endpoints):
+ """
+ Configure regions and zones from rados and rgw.
+ """
+ if not regions:
+ log.debug(
+ 'In rgw.configure_regions_and_zones() and regions is None. '
+ 'Bailing')
+ yield
+ return
+
+ log.info('Configuring regions and zones...')
+
+ log.debug('config is %r', config)
+ log.debug('regions are %r', regions)
+ log.debug('role_endpoints = %r', role_endpoints)
+ # extract the zone info
+ role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
+ for client, c_config in config.iteritems()])
+ log.debug('roles_zones = %r', role_zones)
+
+ # extract the user info and append it to the payload tuple for the given
+ # client
+ for client, c_config in config.iteritems():
+ if not c_config:
+ user_info = None
+ else:
+ user_info = extract_user_info(c_config)
+
+ (region, zone, zone_info) = role_zones[client]
+ role_zones[client] = (region, zone, zone_info, user_info)
+
+ region_info = dict([
+ (region_name, extract_region_info(region_name, r_config))
+ for region_name, r_config in regions.iteritems()])
+
+ fill_in_endpoints(region_info, role_zones, role_endpoints)
+
+ # clear out the old defaults
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ # removing these objects from .rgw.root and the per-zone root pools
+ # may or may not matter
+ rados(ctx, mon,
+ cmd=['-p', '.rgw.root', 'rm', 'region_info.default'])
+ rados(ctx, mon,
+ cmd=['-p', '.rgw.root', 'rm', 'zone_info.default'])
+
+ for client in config.iterkeys():
+ for role, (_, zone, zone_info, user_info) in role_zones.iteritems():
+ rados(ctx, mon,
+ cmd=['-p', zone_info['domain_root'],
+ 'rm', 'region_info.default'])
+ rados(ctx, mon,
+ cmd=['-p', zone_info['domain_root'],
+ 'rm', 'zone_info.default'])
+
+ (remote,) = ctx.cluster.only(role).remotes.keys()
+ for pool_info in zone_info['placement_pools']:
+ remote.run(args=['ceph', 'osd', 'pool', 'create',
+ pool_info['val']['index_pool'], '64', '64'])
+ if ctx.rgw.ec_data_pool:
+ create_ec_pool(remote, pool_info['val']['data_pool'],
+ zone, 64)
+ else:
+ create_replicated_pool(
+ remote, pool_info['val']['data_pool'],
+ 64)
+
+ rgwadmin(ctx, client,
+ cmd=['-n', client, 'zone', 'set', '--rgw-zone', zone],
+ stdin=StringIO(json.dumps(dict(
+ zone_info.items() + user_info.items()))),
+ check_status=True)
+
+ for region, info in region_info.iteritems():
+ region_json = json.dumps(info)
+ log.debug('region info is: %s', region_json)
+ rgwadmin(ctx, client,
+ cmd=['-n', client, 'region', 'set'],
+ stdin=StringIO(region_json),
+ check_status=True)
+ if info['is_master']:
+ rgwadmin(ctx, client,
+ cmd=['-n', client,
+ 'region', 'default',
+ '--rgw-region', region],
+ check_status=True)
+
+ rgwadmin(ctx, client, cmd=['-n', client, 'regionmap', 'update'])
+ yield
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Either use configure apache to run a rados gateway, or use the built-in
+ civetweb server.
+ Only one should be run per machine, since it uses a hard-coded port for
+ now.
+
+ For example, to run rgw on all clients::
+
+ tasks:
+ - ceph:
+ - rgw:
+
+ To only run on certain clients::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0, client.3]
+
+ or
+
+ tasks:
+ - ceph:
+ - rgw:
+ client.0:
+ client.3:
+
+ You can adjust the idle timeout for fastcgi (default is 30 seconds):
+
+ tasks:
+ - ceph:
+ - rgw:
+ client.0:
+ idle_timeout: 90
+
+ To run radosgw through valgrind:
+
+ tasks:
+ - ceph:
+ - rgw:
+ client.0:
+ valgrind: [--tool=memcheck]
+ client.3:
+ valgrind: [--tool=memcheck]
+
+ To use civetweb instead of apache:
+
+ tasks:
+ - ceph:
+ - rgw:
+ - client.0
+ overrides:
+ rgw:
+ frontend: civetweb
+
+ Note that without a modified fastcgi module e.g. with the default
+ one on CentOS, you must have rgw print continue = false in ceph.conf::
+
+ tasks:
+ - ceph:
+ conf:
+ global:
+ rgw print continue: false
+ - rgw: [client.0]
+
+ To run rgws for multiple regions or zones, describe the regions
+ and their zones in a regions section. The endpoints will be
+ generated by this task. Each client must have a region, zone,
+ and pools assigned in ceph.conf::
+
+ tasks:
+ - install:
+ - ceph:
+ conf:
+ client.0:
+ rgw region: foo
+ rgw zone: foo-1
+ rgw region root pool: .rgw.rroot.foo
+ rgw zone root pool: .rgw.zroot.foo
+ rgw log meta: true
+ rgw log data: true
+ client.1:
+ rgw region: bar
+ rgw zone: bar-master
+ rgw region root pool: .rgw.rroot.bar
+ rgw zone root pool: .rgw.zroot.bar
+ rgw log meta: true
+ rgw log data: true
+ client.2:
+ rgw region: bar
+ rgw zone: bar-secondary
+ rgw region root pool: .rgw.rroot.bar
+ rgw zone root pool: .rgw.zroot.bar-secondary
+ - rgw:
+ default_idle_timeout: 30
+ ec-data-pool: true
+ regions:
+ foo:
+ api name: api_name # default: region name
+ is master: true # default: false
+ master zone: foo-1 # default: first zone
+ zones: [foo-1]
+ log meta: true
+ log data: true
+ placement targets: [target1, target2] # default: []
+ default placement: target2 # default: ''
+ bar:
+ api name: bar-api
+ zones: [bar-master, bar-secondary]
+ client.0:
+ system user:
+ name: foo-system
+ access key: X2IYPSTY1072DDY1SJMC
+ secret key: YIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
+ client.1:
+ system user:
+ name: bar1
+ access key: Y2IYPSTY1072DDY1SJMC
+ secret key: XIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
+ client.2:
+ system user:
+ name: bar2
+ access key: Z2IYPSTY1072DDY1SJMC
+ secret key: ZIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
+ """
+ if config is None:
+ config = dict(('client.{id}'.format(id=id_), None)
+ for id_ in teuthology.all_roles_of_type(
+ ctx.cluster, 'client'))
+ elif isinstance(config, list):
+ config = dict((name, None) for name in config)
+
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('rgw', {}))
+
+ regions = {}
+ if 'regions' in config:
+ # separate region info so only clients are keys in config
+ regions = config['regions']
+ del config['regions']
+
+ role_endpoints = assign_ports(ctx, config)
+ ctx.rgw = argparse.Namespace()
+ ctx.rgw.role_endpoints = role_endpoints
+ # stash the region info for later, since it was deleted from the config
+ # structure
+ ctx.rgw.regions = regions
+
+ ctx.rgw.ec_data_pool = False
+ if 'ec-data-pool' in config:
+ ctx.rgw.ec_data_pool = bool(config['ec-data-pool'])
+ del config['ec-data-pool']
+ ctx.rgw.default_idle_timeout = 30
+ if 'default_idle_timeout' in config:
+ ctx.rgw.default_idle_timeout = int(config['default_idle_timeout'])
+ del config['default_idle_timeout']
+ ctx.rgw.cache_pools = False
+ if 'cache-pools' in config:
+ ctx.rgw.cache_pools = bool(config['cache-pools'])
+ del config['cache-pools']
+
+ ctx.rgw.frontend = 'apache'
+ if 'frontend' in config:
+ ctx.rgw.frontend = config['frontend']
+ del config['frontend']
+
+ subtasks = [
+ lambda: configure_regions_and_zones(
+ ctx=ctx,
+ config=config,
+ regions=regions,
+ role_endpoints=role_endpoints,
+ ),
+ lambda: configure_users(
+ ctx=ctx,
+ config=config,
+ everywhere=bool(regions),
+ ),
+ lambda: create_nonregion_pools(
+ ctx=ctx, config=config, regions=regions),
+ ]
+ if ctx.rgw.frontend == 'apache':
+ subtasks.insert(0, lambda: create_apache_dirs(ctx=ctx, config=config))
+ subtasks.extend([
+ lambda: ship_apache_configs(ctx=ctx, config=config,
+ role_endpoints=role_endpoints),
+ lambda: start_rgw(ctx=ctx, config=config),
+ lambda: start_apache(ctx=ctx, config=config),
+ ])
+ elif ctx.rgw.frontend == 'civetweb':
+ subtasks.extend([
+ lambda: start_rgw(ctx=ctx, config=config),
+ ])
+ else:
+ raise ValueError("frontend must be 'apache' or 'civetweb'")
+
+ log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
+ with contextutil.nested(*subtasks):
+ yield
--- /dev/null
+"""
+rgw s3tests logging wrappers
+"""
+from cStringIO import StringIO
+from configobj import ConfigObj
+import contextlib
+import logging
+import s3tests
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def download(ctx, config):
+ """
+ Run s3tests download function
+ """
+ return s3tests.download(ctx, config)
+
+def _config_user(s3tests_conf, section, user):
+ """
+ Run s3tests user config function
+ """
+ return s3tests._config_user(s3tests_conf, section, user)
+
+@contextlib.contextmanager
+def create_users(ctx, config):
+ """
+ Run s3tests user create function
+ """
+ return s3tests.create_users(ctx, config)
+
+@contextlib.contextmanager
+def configure(ctx, config):
+ """
+ Run s3tests user configure function
+ """
+ return s3tests.configure(ctx, config)
+
+@contextlib.contextmanager
+def run_tests(ctx, config):
+ """
+ Run remote netcat tests
+ """
+ assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
+ for client, client_config in config.iteritems():
+ client_config['extra_args'] = [
+ 's3tests.functional.test_s3:test_bucket_list_return_data',
+ ]
+# args = [
+# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
+# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
+# '-w',
+# '{tdir}/s3-tests'.format(tdir=testdir),
+# '-v',
+# 's3tests.functional.test_s3:test_bucket_list_return_data',
+# ]
+# if client_config is not None and 'extra_args' in client_config:
+# args.extend(client_config['extra_args'])
+#
+# ctx.cluster.only(client).run(
+# args=args,
+# )
+
+ s3tests.run_tests(ctx, config)
+
+ netcat_out = StringIO()
+
+ for client, client_config in config.iteritems():
+ ctx.cluster.only(client).run(
+ args = [
+ 'netcat',
+ '-w', '5',
+ '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
+ ],
+ stdout = netcat_out,
+ )
+
+ out = netcat_out.getvalue()
+
+ assert len(out) > 100
+
+ log.info('Received', out)
+
+ yield
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run some s3-tests suite against rgw, verify opslog socket returns data
+
+ Must restrict testing to a particular client::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3tests: [client.0]
+
+ To pass extra arguments to nose (e.g. to run a certain test)::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3tests:
+ client.0:
+ extra_args: ['test_s3:test_object_acl_grand_public_read']
+ client.1:
+ extra_args: ['--exclude', 'test_100_continue']
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ overrides = ctx.config.get('overrides', {})
+ # merge each client section, not the top level.
+ for (client, cconf) in config.iteritems():
+ teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {}))
+
+ log.debug('config is %s', config)
+
+ s3tests_conf = {}
+ for client in clients:
+ s3tests_conf[client] = ConfigObj(
+ indent_type='',
+ infile={
+ 'DEFAULT':
+ {
+ 'port' : 7280,
+ 'is_secure' : 'no',
+ },
+ 'fixtures' : {},
+ 's3 main' : {},
+ 's3 alt' : {},
+ }
+ )
+
+ with contextutil.nested(
+ lambda: download(ctx=ctx, config=config),
+ lambda: create_users(ctx=ctx, config=dict(
+ clients=clients,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: configure(ctx=ctx, config=dict(
+ clients=config,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: run_tests(ctx=ctx, config=config),
+ ):
+ yield
--- /dev/null
+"""
+Run rgw s3 readwite tests
+"""
+from cStringIO import StringIO
+import base64
+import contextlib
+import logging
+import os
+import random
+import string
+import yaml
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.config import config as teuth_config
+from teuthology.orchestra import run
+from teuthology.orchestra.connection import split_user
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def download(ctx, config):
+ """
+ Download the s3 tests from the git builder.
+ Remove downloaded s3 file upon exit.
+
+ The context passed in should be identical to the context
+ passed in to the main task.
+ """
+ assert isinstance(config, dict)
+ log.info('Downloading s3-tests...')
+ testdir = teuthology.get_testdir(ctx)
+ for (client, cconf) in config.items():
+ branch = cconf.get('force-branch', None)
+ if not branch:
+ branch = cconf.get('branch', 'master')
+ sha1 = cconf.get('sha1')
+ ctx.cluster.only(client).run(
+ args=[
+ 'git', 'clone',
+ '-b', branch,
+ teuth_config.ceph_git_base_url + 's3-tests.git',
+ '{tdir}/s3-tests'.format(tdir=testdir),
+ ],
+ )
+ if sha1 is not None:
+ ctx.cluster.only(client).run(
+ args=[
+ 'cd', '{tdir}/s3-tests'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'git', 'reset', '--hard', sha1,
+ ],
+ )
+ try:
+ yield
+ finally:
+ log.info('Removing s3-tests...')
+ testdir = teuthology.get_testdir(ctx)
+ for client in config:
+ ctx.cluster.only(client).run(
+ args=[
+ 'rm',
+ '-rf',
+ '{tdir}/s3-tests'.format(tdir=testdir),
+ ],
+ )
+
+
+def _config_user(s3tests_conf, section, user):
+ """
+ Configure users for this section by stashing away keys, ids, and
+ email addresses.
+ """
+ s3tests_conf[section].setdefault('user_id', user)
+ s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
+ s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
+ s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+ s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
+
+@contextlib.contextmanager
+def create_users(ctx, config):
+ """
+ Create a default s3 user.
+ """
+ assert isinstance(config, dict)
+ log.info('Creating rgw users...')
+ testdir = teuthology.get_testdir(ctx)
+ users = {'s3': 'foo'}
+ cached_client_user_names = dict()
+ for client in config['clients']:
+ cached_client_user_names[client] = dict()
+ s3tests_conf = config['s3tests_conf'][client]
+ s3tests_conf.setdefault('readwrite', {})
+ s3tests_conf['readwrite'].setdefault('bucket', 'rwtest-' + client + '-{random}-')
+ s3tests_conf['readwrite'].setdefault('readers', 10)
+ s3tests_conf['readwrite'].setdefault('writers', 3)
+ s3tests_conf['readwrite'].setdefault('duration', 300)
+ s3tests_conf['readwrite'].setdefault('files', {})
+ rwconf = s3tests_conf['readwrite']
+ rwconf['files'].setdefault('num', 10)
+ rwconf['files'].setdefault('size', 2000)
+ rwconf['files'].setdefault('stddev', 500)
+ for section, user in users.iteritems():
+ _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
+ log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'],
+ client=client))
+
+ # stash the 'delete_user' flag along with user name for easier cleanup
+ delete_this_user = True
+ if 'delete_user' in s3tests_conf['s3']:
+ delete_this_user = s3tests_conf['s3']['delete_user']
+ log.debug('delete_user set to {flag} for {client}'.format(flag=delete_this_user, client=client))
+ cached_client_user_names[client][section+user] = (s3tests_conf[section]['user_id'], delete_this_user)
+
+ # skip actual user creation if the create_user flag is set to false for this client
+ if 'create_user' in s3tests_conf['s3'] and s3tests_conf['s3']['create_user'] == False:
+ log.debug('create_user set to False, skipping user creation for {client}'.format(client=client))
+ continue
+ else:
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client,
+ 'user', 'create',
+ '--uid', s3tests_conf[section]['user_id'],
+ '--display-name', s3tests_conf[section]['display_name'],
+ '--access-key', s3tests_conf[section]['access_key'],
+ '--secret', s3tests_conf[section]['secret_key'],
+ '--email', s3tests_conf[section]['email'],
+ ],
+ )
+ try:
+ yield
+ finally:
+ for client in config['clients']:
+ for section, user in users.iteritems():
+ #uid = '{user}.{client}'.format(user=user, client=client)
+ real_uid, delete_this_user = cached_client_user_names[client][section+user]
+ if delete_this_user:
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client,
+ 'user', 'rm',
+ '--uid', real_uid,
+ '--purge-data',
+ ],
+ )
+ else:
+ log.debug('skipping delete for user {uid} on {client}'.format(uid=real_uid, client=client))
+
+@contextlib.contextmanager
+def configure(ctx, config):
+ """
+ Configure the s3-tests. This includes the running of the
+ bootstrap code and the updating of local conf files.
+ """
+ assert isinstance(config, dict)
+ log.info('Configuring s3-readwrite-tests...')
+ for client, properties in config['clients'].iteritems():
+ s3tests_conf = config['s3tests_conf'][client]
+ if properties is not None and 'rgw_server' in properties:
+ host = None
+ for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+ log.info('roles: ' + str(roles))
+ log.info('target: ' + str(target))
+ if properties['rgw_server'] in roles:
+ _, host = split_user(target)
+ assert host is not None, "Invalid client specified as the rgw_server"
+ s3tests_conf['s3']['host'] = host
+ else:
+ s3tests_conf['s3']['host'] = 'localhost'
+
+ def_conf = s3tests_conf['DEFAULT']
+ s3tests_conf['s3'].setdefault('port', def_conf['port'])
+ s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])
+
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote.run(
+ args=[
+ 'cd',
+ '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)),
+ run.Raw('&&'),
+ './bootstrap',
+ ],
+ )
+ conf_fp = StringIO()
+ conf = dict(
+ s3=s3tests_conf['s3'],
+ readwrite=s3tests_conf['readwrite'],
+ )
+ yaml.safe_dump(conf, conf_fp, default_flow_style=False)
+ teuthology.write_file(
+ remote=remote,
+ path='{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=teuthology.get_testdir(ctx), client=client),
+ data=conf_fp.getvalue(),
+ )
+ yield
+
+
+@contextlib.contextmanager
+def run_tests(ctx, config):
+ """
+ Run the s3readwrite tests after everything is set up.
+
+ :param ctx: Context passed to task
+ :param config: specific configuration information
+ """
+ assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
+ for client, client_config in config.iteritems():
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client))
+ args = [
+ '{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir),
+ ]
+ if client_config is not None and 'extra_args' in client_config:
+ args.extend(client_config['extra_args'])
+
+ ctx.cluster.only(client).run(
+ args=args,
+ stdin=conf,
+ )
+ yield
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run the s3tests-test-readwrite suite against rgw.
+
+ To run all tests on all clients::
+
+ tasks:
+ - ceph:
+ - rgw:
+ - s3readwrite:
+
+ To restrict testing to particular clients::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3readwrite: [client.0]
+
+ To run against a server on client.1::
+
+ tasks:
+ - ceph:
+ - rgw: [client.1]
+ - s3readwrite:
+ client.0:
+ rgw_server: client.1
+
+ To pass extra test arguments
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3readwrite:
+ client.0:
+ readwrite:
+ bucket: mybucket
+ readers: 10
+ writers: 3
+ duration: 600
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
+ client.1:
+ ...
+
+ To override s3 configuration
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3readwrite:
+ client.0:
+ s3:
+ user_id: myuserid
+ display_name: myname
+ email: my@email
+ access_key: myaccesskey
+ secret_key: mysecretkey
+
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ overrides = ctx.config.get('overrides', {})
+ # merge each client section, not the top level.
+ for client in config.iterkeys():
+ if not config[client]:
+ config[client] = {}
+ teuthology.deep_merge(config[client], overrides.get('s3readwrite', {}))
+
+ log.debug('in s3readwrite, config is %s', config)
+
+ s3tests_conf = {}
+ for client in clients:
+ if config[client] is None:
+ config[client] = {}
+ config[client].setdefault('s3', {})
+ config[client].setdefault('readwrite', {})
+
+ s3tests_conf[client] = ({
+ 'DEFAULT':
+ {
+ 'port' : 7280,
+ 'is_secure' : False,
+ },
+ 'readwrite' : config[client]['readwrite'],
+ 's3' : config[client]['s3'],
+ })
+
+ with contextutil.nested(
+ lambda: download(ctx=ctx, config=config),
+ lambda: create_users(ctx=ctx, config=dict(
+ clients=clients,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: configure(ctx=ctx, config=dict(
+ clients=config,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: run_tests(ctx=ctx, config=config),
+ ):
+ pass
+ yield
--- /dev/null
+"""
+Run rgw roundtrip message tests
+"""
+from cStringIO import StringIO
+import base64
+import contextlib
+import logging
+import os
+import random
+import string
+import yaml
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.config import config as teuth_config
+from teuthology.orchestra import run
+from teuthology.orchestra.connection import split_user
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def download(ctx, config):
+ """
+ Download the s3 tests from the git builder.
+ Remove downloaded s3 file upon exit.
+
+ The context passed in should be identical to the context
+ passed in to the main task.
+ """
+ assert isinstance(config, list)
+ log.info('Downloading s3-tests...')
+ testdir = teuthology.get_testdir(ctx)
+ for client in config:
+ ctx.cluster.only(client).run(
+ args=[
+ 'git', 'clone',
+ teuth_config.ceph_git_base_url + 's3-tests.git',
+ '{tdir}/s3-tests'.format(tdir=testdir),
+ ],
+ )
+ try:
+ yield
+ finally:
+ log.info('Removing s3-tests...')
+ for client in config:
+ ctx.cluster.only(client).run(
+ args=[
+ 'rm',
+ '-rf',
+ '{tdir}/s3-tests'.format(tdir=testdir),
+ ],
+ )
+
+def _config_user(s3tests_conf, section, user):
+ """
+ Configure users for this section by stashing away keys, ids, and
+ email addresses.
+ """
+ s3tests_conf[section].setdefault('user_id', user)
+ s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
+ s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
+ s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+ s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
+
+@contextlib.contextmanager
+def create_users(ctx, config):
+ """
+ Create a default s3 user.
+ """
+ assert isinstance(config, dict)
+ log.info('Creating rgw users...')
+ testdir = teuthology.get_testdir(ctx)
+ users = {'s3': 'foo'}
+ for client in config['clients']:
+ s3tests_conf = config['s3tests_conf'][client]
+ s3tests_conf.setdefault('roundtrip', {})
+ s3tests_conf['roundtrip'].setdefault('bucket', 'rttest-' + client + '-{random}-')
+ s3tests_conf['roundtrip'].setdefault('readers', 10)
+ s3tests_conf['roundtrip'].setdefault('writers', 3)
+ s3tests_conf['roundtrip'].setdefault('duration', 300)
+ s3tests_conf['roundtrip'].setdefault('files', {})
+ rtconf = s3tests_conf['roundtrip']
+ rtconf['files'].setdefault('num', 10)
+ rtconf['files'].setdefault('size', 2000)
+ rtconf['files'].setdefault('stddev', 500)
+ for section, user in [('s3', 'foo')]:
+ _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client,
+ 'user', 'create',
+ '--uid', s3tests_conf[section]['user_id'],
+ '--display-name', s3tests_conf[section]['display_name'],
+ '--access-key', s3tests_conf[section]['access_key'],
+ '--secret', s3tests_conf[section]['secret_key'],
+ '--email', s3tests_conf[section]['email'],
+ ],
+ )
+ try:
+ yield
+ finally:
+ for client in config['clients']:
+ for user in users.itervalues():
+ uid = '{user}.{client}'.format(user=user, client=client)
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client,
+ 'user', 'rm',
+ '--uid', uid,
+ '--purge-data',
+ ],
+ )
+
+@contextlib.contextmanager
+def configure(ctx, config):
+ """
+ Configure the s3-tests. This includes the running of the
+ bootstrap code and the updating of local conf files.
+ """
+ assert isinstance(config, dict)
+ log.info('Configuring s3-roundtrip-tests...')
+ testdir = teuthology.get_testdir(ctx)
+ for client, properties in config['clients'].iteritems():
+ s3tests_conf = config['s3tests_conf'][client]
+ if properties is not None and 'rgw_server' in properties:
+ host = None
+ for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+ log.info('roles: ' + str(roles))
+ log.info('target: ' + str(target))
+ if properties['rgw_server'] in roles:
+ _, host = split_user(target)
+ assert host is not None, "Invalid client specified as the rgw_server"
+ s3tests_conf['s3']['host'] = host
+ else:
+ s3tests_conf['s3']['host'] = 'localhost'
+
+ def_conf = s3tests_conf['DEFAULT']
+ s3tests_conf['s3'].setdefault('port', def_conf['port'])
+ s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])
+
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote.run(
+ args=[
+ 'cd',
+ '{tdir}/s3-tests'.format(tdir=testdir),
+ run.Raw('&&'),
+ './bootstrap',
+ ],
+ )
+ conf_fp = StringIO()
+ conf = dict(
+ s3=s3tests_conf['s3'],
+ roundtrip=s3tests_conf['roundtrip'],
+ )
+ yaml.safe_dump(conf, conf_fp, default_flow_style=False)
+ teuthology.write_file(
+ remote=remote,
+ path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client),
+ data=conf_fp.getvalue(),
+ )
+ yield
+
+
+@contextlib.contextmanager
+def run_tests(ctx, config):
+ """
+ Run the s3 roundtrip after everything is set up.
+
+ :param ctx: Context passed to task
+ :param config: specific configuration information
+ """
+ assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
+ for client, client_config in config.iteritems():
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client))
+ args = [
+ '{tdir}/s3-tests/virtualenv/bin/s3tests-test-roundtrip'.format(tdir=testdir),
+ ]
+ if client_config is not None and 'extra_args' in client_config:
+ args.extend(client_config['extra_args'])
+
+ ctx.cluster.only(client).run(
+ args=args,
+ stdin=conf,
+ )
+ yield
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run the s3tests-test-roundtrip suite against rgw.
+
+ To run all tests on all clients::
+
+ tasks:
+ - ceph:
+ - rgw:
+ - s3roundtrip:
+
+ To restrict testing to particular clients::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3roundtrip: [client.0]
+
+ To run against a server on client.1::
+
+ tasks:
+ - ceph:
+ - rgw: [client.1]
+ - s3roundtrip:
+ client.0:
+ rgw_server: client.1
+
+ To pass extra test arguments
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3roundtrip:
+ client.0:
+ roundtrip:
+ bucket: mybucket
+ readers: 10
+ writers: 3
+ duration: 600
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
+ client.1:
+ ...
+
+ To override s3 configuration
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3roundtrip:
+ client.0:
+ s3:
+ user_id: myuserid
+ display_name: myname
+ email: my@email
+ access_key: myaccesskey
+ secret_key: mysecretkey
+
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ s3tests_conf = {}
+ for client in clients:
+ if config[client] is None:
+ config[client] = {}
+ config[client].setdefault('s3', {})
+ config[client].setdefault('roundtrip', {})
+
+ s3tests_conf[client] = ({
+ 'DEFAULT':
+ {
+ 'port' : 7280,
+ 'is_secure' : False,
+ },
+ 'roundtrip' : config[client]['roundtrip'],
+ 's3' : config[client]['s3'],
+ })
+
+ with contextutil.nested(
+ lambda: download(ctx=ctx, config=clients),
+ lambda: create_users(ctx=ctx, config=dict(
+ clients=clients,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: configure(ctx=ctx, config=dict(
+ clients=config,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: run_tests(ctx=ctx, config=config),
+ ):
+ pass
+ yield
--- /dev/null
+"""
+Run a set of s3 tests on rgw.
+"""
+from cStringIO import StringIO
+from configobj import ConfigObj
+import base64
+import contextlib
+import logging
+import os
+import random
+import string
+
+import util.rgw as rgw_utils
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.config import config as teuth_config
+from teuthology.orchestra import run
+from teuthology.orchestra.connection import split_user
+
+log = logging.getLogger(__name__)
+
+def extract_sync_client_data(ctx, client_name):
+ """
+ Extract synchronized client rgw zone and rgw region information.
+
+ :param ctx: Context passed to the s3tests task
+ :param name: Name of client that we are synching with
+ """
+ return_region_name = None
+ return_dict = None
+ client = ctx.ceph.conf.get(client_name, None)
+ if client:
+ current_client_zone = client.get('rgw zone', None)
+ if current_client_zone:
+ (endpoint_host, endpoint_port) = ctx.rgw.role_endpoints.get(client_name, (None, None))
+ # pull out the radosgw_agent stuff
+ regions = ctx.rgw.regions
+ for region in regions:
+ log.debug('jbuck, region is {region}'.format(region=region))
+ region_data = ctx.rgw.regions[region]
+ log.debug('region data is {region}'.format(region=region_data))
+ zones = region_data['zones']
+ for zone in zones:
+ if current_client_zone in zone:
+ return_region_name = region
+ return_dict = dict()
+ return_dict['api_name'] = region_data['api name']
+ return_dict['is_master'] = region_data['is master']
+ return_dict['port'] = endpoint_port
+ return_dict['host'] = endpoint_host
+
+ # The s3tests expect the sync_agent_[addr|port} to be
+ # set on the non-master node for some reason
+ if not region_data['is master']:
+ (rgwagent_host, rgwagent_port) = ctx.radosgw_agent.endpoint
+ (return_dict['sync_agent_addr'], _) = ctx.rgw.role_endpoints[rgwagent_host]
+ return_dict['sync_agent_port'] = rgwagent_port
+
+ else: #if client_zone:
+ log.debug('No zone info for {host}'.format(host=client_name))
+ else: # if client
+ log.debug('No ceph conf for {host}'.format(host=client_name))
+
+ return return_region_name, return_dict
+
+def update_conf_with_region_info(ctx, config, s3tests_conf):
+ """
+ Scan for a client (passed in s3tests_conf) that is an s3agent
+ with which we can sync. Update information in local conf file
+ if such a client is found.
+ """
+ for key in s3tests_conf.keys():
+ # we'll assume that there's only one sync relationship (source / destination) with client.X
+ # as the key for now
+
+ # Iterate through all of the radosgw_agent (rgwa) configs and see if a
+ # given client is involved in a relationship.
+ # If a given client isn't, skip it
+ this_client_in_rgwa_config = False
+ for rgwa in ctx.radosgw_agent.config.keys():
+ rgwa_data = ctx.radosgw_agent.config[rgwa]
+
+ if key in rgwa_data['src'] or key in rgwa_data['dest']:
+ this_client_in_rgwa_config = True
+ log.debug('{client} is in an radosgw-agent sync relationship'.format(client=key))
+ radosgw_sync_data = ctx.radosgw_agent.config[key]
+ break
+ if not this_client_in_rgwa_config:
+ log.debug('{client} is NOT in an radosgw-agent sync relationship'.format(client=key))
+ continue
+
+ source_client = radosgw_sync_data['src']
+ dest_client = radosgw_sync_data['dest']
+
+ # #xtract the pertinent info for the source side
+ source_region_name, source_region_dict = extract_sync_client_data(ctx, source_client)
+ log.debug('\t{key} source_region {source_region} source_dict {source_dict}'.format
+ (key=key,source_region=source_region_name,source_dict=source_region_dict))
+
+ # The source *should* be the master region, but test anyway and then set it as the default region
+ if source_region_dict['is_master']:
+ log.debug('Setting {region} as default_region'.format(region=source_region_name))
+ s3tests_conf[key]['fixtures'].setdefault('default_region', source_region_name)
+
+ # Extract the pertinent info for the destination side
+ dest_region_name, dest_region_dict = extract_sync_client_data(ctx, dest_client)
+ log.debug('\t{key} dest_region {dest_region} dest_dict {dest_dict}'.format
+ (key=key,dest_region=dest_region_name,dest_dict=dest_region_dict))
+
+ # now add these regions to the s3tests_conf object
+ s3tests_conf[key]['region {region_name}'.format(region_name=source_region_name)] = source_region_dict
+ s3tests_conf[key]['region {region_name}'.format(region_name=dest_region_name)] = dest_region_dict
+
+@contextlib.contextmanager
+def download(ctx, config):
+ """
+ Download the s3 tests from the git builder.
+ Remove downloaded s3 file upon exit.
+
+ The context passed in should be identical to the context
+ passed in to the main task.
+ """
+ assert isinstance(config, dict)
+ log.info('Downloading s3-tests...')
+ testdir = teuthology.get_testdir(ctx)
+ for (client, cconf) in config.items():
+ branch = cconf.get('force-branch', None)
+ if not branch:
+ ceph_branch = ctx.config.get('branch')
+ suite_branch = ctx.config.get('suite_branch', ceph_branch)
+ branch = cconf.get('branch', suite_branch)
+ if not branch:
+ raise ValueError(
+ "Could not determine what branch to use for s3tests!")
+ else:
+ log.info("Using branch '%s' for s3tests", branch)
+ sha1 = cconf.get('sha1')
+ ctx.cluster.only(client).run(
+ args=[
+ 'git', 'clone',
+ '-b', branch,
+ teuth_config.ceph_git_base_url + 's3-tests.git',
+ '{tdir}/s3-tests'.format(tdir=testdir),
+ ],
+ )
+ if sha1 is not None:
+ ctx.cluster.only(client).run(
+ args=[
+ 'cd', '{tdir}/s3-tests'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'git', 'reset', '--hard', sha1,
+ ],
+ )
+ try:
+ yield
+ finally:
+ log.info('Removing s3-tests...')
+ testdir = teuthology.get_testdir(ctx)
+ for client in config:
+ ctx.cluster.only(client).run(
+ args=[
+ 'rm',
+ '-rf',
+ '{tdir}/s3-tests'.format(tdir=testdir),
+ ],
+ )
+
+
+def _config_user(s3tests_conf, section, user):
+ """
+ Configure users for this section by stashing away keys, ids, and
+ email addresses.
+ """
+ s3tests_conf[section].setdefault('user_id', user)
+ s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
+ s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
+ s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+ s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
+
+
+@contextlib.contextmanager
+def create_users(ctx, config):
+ """
+ Create a main and an alternate s3 user.
+ """
+ assert isinstance(config, dict)
+ log.info('Creating rgw users...')
+ testdir = teuthology.get_testdir(ctx)
+ users = {'s3 main': 'foo', 's3 alt': 'bar'}
+ for client in config['clients']:
+ s3tests_conf = config['s3tests_conf'][client]
+ s3tests_conf.setdefault('fixtures', {})
+ s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-')
+ for section, user in users.iteritems():
+ _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
+ log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client))
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client,
+ 'user', 'create',
+ '--uid', s3tests_conf[section]['user_id'],
+ '--display-name', s3tests_conf[section]['display_name'],
+ '--access-key', s3tests_conf[section]['access_key'],
+ '--secret', s3tests_conf[section]['secret_key'],
+ '--email', s3tests_conf[section]['email'],
+ ],
+ )
+ try:
+ yield
+ finally:
+ for client in config['clients']:
+ for user in users.itervalues():
+ uid = '{user}.{client}'.format(user=user, client=client)
+ ctx.cluster.only(client).run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '-n', client,
+ 'user', 'rm',
+ '--uid', uid,
+ '--purge-data',
+ ],
+ )
+
+
+@contextlib.contextmanager
+def configure(ctx, config):
+ """
+ Configure the s3-tests. This includes the running of the
+ bootstrap code and the updating of local conf files.
+ """
+ assert isinstance(config, dict)
+ log.info('Configuring s3-tests...')
+ testdir = teuthology.get_testdir(ctx)
+ for client, properties in config['clients'].iteritems():
+ s3tests_conf = config['s3tests_conf'][client]
+ if properties is not None and 'rgw_server' in properties:
+ host = None
+ for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+ log.info('roles: ' + str(roles))
+ log.info('target: ' + str(target))
+ if properties['rgw_server'] in roles:
+ _, host = split_user(target)
+ assert host is not None, "Invalid client specified as the rgw_server"
+ s3tests_conf['DEFAULT']['host'] = host
+ else:
+ s3tests_conf['DEFAULT']['host'] = 'localhost'
+
+ if properties is not None and 'slow_backend' in properties:
+ s3tests_conf['fixtures']['slow backend'] = properties['slow_backend']
+
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote.run(
+ args=[
+ 'cd',
+ '{tdir}/s3-tests'.format(tdir=testdir),
+ run.Raw('&&'),
+ './bootstrap',
+ ],
+ )
+ conf_fp = StringIO()
+ s3tests_conf.write(conf_fp)
+ teuthology.write_file(
+ remote=remote,
+ path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
+ data=conf_fp.getvalue(),
+ )
+
+ log.info('Configuring boto...')
+ boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
+ for client, properties in config['clients'].iteritems():
+ with file(boto_src, 'rb') as f:
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ conf = f.read().format(
+ idle_timeout=config.get('idle_timeout', 30)
+ )
+ teuthology.write_file(
+ remote=remote,
+ path='{tdir}/boto.cfg'.format(tdir=testdir),
+ data=conf,
+ )
+
+ try:
+ yield
+
+ finally:
+ log.info('Cleaning up boto...')
+ for client, properties in config['clients'].iteritems():
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote.run(
+ args=[
+ 'rm',
+ '{tdir}/boto.cfg'.format(tdir=testdir),
+ ],
+ )
+
+@contextlib.contextmanager
+def sync_users(ctx, config):
+ """
+ Sync this user.
+ """
+ assert isinstance(config, dict)
+ # do a full sync if this is a multi-region test
+ if rgw_utils.multi_region_enabled(ctx):
+ log.debug('Doing a full sync')
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ else:
+ log.debug('Not a multi-region config; skipping the metadata sync')
+
+ yield
+
+@contextlib.contextmanager
+def run_tests(ctx, config):
+ """
+ Run the s3tests after everything is set up.
+
+ :param ctx: Context passed to task
+ :param config: specific configuration information
+ """
+ assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
+ for client, client_config in config.iteritems():
+ args = [
+ 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
+ 'BOTO_CONFIG={tdir}/boto.cfg'.format(tdir=testdir),
+ '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
+ '-w',
+ '{tdir}/s3-tests'.format(tdir=testdir),
+ '-v',
+ '-a', '!fails_on_rgw',
+ ]
+ if client_config is not None and 'extra_args' in client_config:
+ args.extend(client_config['extra_args'])
+
+ ctx.cluster.only(client).run(
+ args=args,
+ label="s3 tests against rgw"
+ )
+ yield
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run the s3-tests suite against rgw.
+
+ To run all tests on all clients::
+
+ tasks:
+ - ceph:
+ - rgw:
+ - s3tests:
+
+ To restrict testing to particular clients::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3tests: [client.0]
+
+ To run against a server on client.1 and increase the boto timeout to 10m::
+
+ tasks:
+ - ceph:
+ - rgw: [client.1]
+ - s3tests:
+ client.0:
+ rgw_server: client.1
+ idle_timeout: 600
+
+ To pass extra arguments to nose (e.g. to run a certain test)::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3tests:
+ client.0:
+ extra_args: ['test_s3:test_object_acl_grand_public_read']
+ client.1:
+ extra_args: ['--exclude', 'test_100_continue']
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ overrides = ctx.config.get('overrides', {})
+ # merge each client section, not the top level.
+ for client in config.iterkeys():
+ if not config[client]:
+ config[client] = {}
+ teuthology.deep_merge(config[client], overrides.get('s3tests', {}))
+
+ log.debug('s3tests config is %s', config)
+
+ s3tests_conf = {}
+ for client in clients:
+ s3tests_conf[client] = ConfigObj(
+ indent_type='',
+ infile={
+ 'DEFAULT':
+ {
+ 'port' : 7280,
+ 'is_secure' : 'no',
+ },
+ 'fixtures' : {},
+ 's3 main' : {},
+ 's3 alt' : {},
+ }
+ )
+
+ # Only attempt to add in the region info if there's a radosgw_agent configured
+ if hasattr(ctx, 'radosgw_agent'):
+ update_conf_with_region_info(ctx, config, s3tests_conf)
+
+ with contextutil.nested(
+ lambda: download(ctx=ctx, config=config),
+ lambda: create_users(ctx=ctx, config=dict(
+ clients=clients,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: sync_users(ctx=ctx, config=config),
+ lambda: configure(ctx=ctx, config=dict(
+ clients=config,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: run_tests(ctx=ctx, config=config),
+ ):
+ pass
+ yield
--- /dev/null
+"""
+Samba
+"""
+import contextlib
+import logging
+import sys
+
+from teuthology import misc as teuthology
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+def get_sambas(ctx, roles):
+ """
+ Scan for roles that are samba. Yield the id of the the samba role
+ (samba.0, samba.1...) and the associated remote site
+
+ :param ctx: Context
+ :param roles: roles for this test (extracted from yaml files)
+ """
+ for role in roles:
+ assert isinstance(role, basestring)
+ PREFIX = 'samba.'
+ assert role.startswith(PREFIX)
+ id_ = role[len(PREFIX):]
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ yield (id_, remote)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Setup samba smbd with ceph vfs module. This task assumes the samba
+ package has already been installed via the install task.
+
+ The config is optional and defaults to starting samba on all nodes.
+ If a config is given, it is expected to be a list of
+ samba nodes to start smbd servers on.
+
+ Example that starts smbd on all samba nodes::
+
+ tasks:
+ - install:
+ - install:
+ project: samba
+ extra_packages: ['samba']
+ - ceph:
+ - samba:
+ - interactive:
+
+ Example that starts smbd on just one of the samba nodes and cifs on the other::
+
+ tasks:
+ - samba: [samba.0]
+ - cifs: [samba.1]
+
+ An optional backend can be specified, and requires a path which smbd will
+ use as the backend storage location:
+
+ roles:
+ - [osd.0, osd.1, osd.2, mon.0, mon.1, mon.2, mds.a]
+ - [client.0, samba.0]
+
+ tasks:
+ - ceph:
+ - ceph-fuse: [client.0]
+ - samba:
+ samba.0:
+ cephfuse: "{testdir}/mnt.0"
+
+ This mounts ceph to {testdir}/mnt.0 using fuse, and starts smbd with
+ a UNC of //localhost/cephfuse. Access through that UNC will be on
+ the ceph fuse mount point.
+
+ If no arguments are specified in the samba
+ role, the default behavior is to enable the ceph UNC //localhost/ceph
+ and use the ceph vfs module as the smbd backend.
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ log.info("Setting up smbd with ceph vfs...")
+ assert config is None or isinstance(config, list) or isinstance(config, dict), \
+ "task samba got invalid config"
+
+ if config is None:
+ config = dict(('samba.{id}'.format(id=id_), None)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba'))
+ elif isinstance(config, list):
+ config = dict((name, None) for name in config)
+
+ samba_servers = list(get_sambas(ctx=ctx, roles=config.keys()))
+
+ testdir = teuthology.get_testdir(ctx)
+
+ from tasks.ceph import DaemonGroup
+ if not hasattr(ctx, 'daemons'):
+ ctx.daemons = DaemonGroup()
+
+ for id_, remote in samba_servers:
+
+ rolestr = "samba.{id_}".format(id_=id_)
+
+ confextras = """vfs objects = ceph
+ ceph:config_file = /etc/ceph/ceph.conf"""
+
+ unc = "ceph"
+ backend = "/"
+
+ if config[rolestr] is not None:
+ # verify that there's just one parameter in role
+ if len(config[rolestr]) != 1:
+ log.error("samba config for role samba.{id_} must have only one parameter".format(id_=id_))
+ raise Exception('invalid config')
+ confextras = ""
+ (unc, backendstr) = config[rolestr].items()[0]
+ backend = backendstr.format(testdir=testdir)
+
+ # on first samba role, set ownership and permissions of ceph root
+ # so that samba tests succeed
+ if config[rolestr] is None and id_ == samba_servers[0][0]:
+ remote.run(
+ args=[
+ 'mkdir', '-p', '/tmp/cmnt', run.Raw('&&'),
+ 'sudo', 'ceph-fuse', '/tmp/cmnt', run.Raw('&&'),
+ 'sudo', 'chown', 'ubuntu:ubuntu', '/tmp/cmnt/', run.Raw('&&'),
+ 'sudo', 'chmod', '1777', '/tmp/cmnt/', run.Raw('&&'),
+ 'sudo', 'umount', '/tmp/cmnt/', run.Raw('&&'),
+ 'rm', '-rf', '/tmp/cmnt',
+ ],
+ )
+ else:
+ remote.run(
+ args=[
+ 'sudo', 'chown', 'ubuntu:ubuntu', backend, run.Raw('&&'),
+ 'sudo', 'chmod', '1777', backend,
+ ],
+ )
+
+ teuthology.sudo_write_file(remote, "/usr/local/samba/etc/smb.conf", """
+[global]
+ workgroup = WORKGROUP
+ netbios name = DOMAIN
+
+[{unc}]
+ path = {backend}
+ {extras}
+ writeable = yes
+ valid users = ubuntu
+""".format(extras=confextras, unc=unc, backend=backend))
+
+ # create ubuntu user
+ remote.run(
+ args=[
+ 'sudo', '/usr/local/samba/bin/smbpasswd', '-e', 'ubuntu',
+ run.Raw('||'),
+ 'printf', run.Raw('"ubuntu\nubuntu\n"'),
+ run.Raw('|'),
+ 'sudo', '/usr/local/samba/bin/smbpasswd', '-s', '-a', 'ubuntu'
+ ])
+
+ smbd_cmd = [
+ 'sudo',
+ 'daemon-helper',
+ 'term',
+ 'nostdin',
+ '/usr/local/samba/sbin/smbd',
+ '-F',
+ ]
+ ctx.daemons.add_daemon(remote, 'smbd', id_,
+ args=smbd_cmd,
+ logger=log.getChild("smbd.{id_}".format(id_=id_)),
+ stdin=run.PIPE,
+ wait=False,
+ )
+
+ # let smbd initialize, probably a better way...
+ import time
+ seconds_to_sleep = 100
+ log.info('Sleeping for %s seconds...' % seconds_to_sleep)
+ time.sleep(seconds_to_sleep)
+ log.info('Sleeping stopped...')
+
+ try:
+ yield
+ finally:
+ log.info('Stopping smbd processes...')
+ exc_info = (None, None, None)
+ for d in ctx.daemons.iter_daemons_of_role('smbd'):
+ try:
+ d.stop()
+ except (run.CommandFailedError,
+ run.CommandCrashedError,
+ run.ConnectionLostError):
+ exc_info = sys.exc_info()
+ log.exception('Saw exception from %s.%s', d.role, d.id_)
+ if exc_info != (None, None, None):
+ raise exc_info[0], exc_info[1], exc_info[2]
+
+ for id_, remote in samba_servers:
+ remote.run(
+ args=[
+ 'sudo',
+ 'rm', '-rf',
+ '/usr/local/samba/etc/smb.conf',
+ '/usr/local/samba/private/*',
+ '/usr/local/samba/var/run/',
+ '/usr/local/samba/var/locks',
+ '/usr/local/samba/var/lock',
+ ],
+ )
+ # make sure daemons are gone
+ try:
+ remote.run(
+ args=[
+ 'while',
+ 'sudo', 'killall', '-9', 'smbd',
+ run.Raw(';'),
+ 'do', 'sleep', '1',
+ run.Raw(';'),
+ 'done',
+ ],
+ )
+
+ remote.run(
+ args=[
+ 'sudo',
+ 'lsof',
+ backend,
+ ],
+ check_status=False
+ )
+ remote.run(
+ args=[
+ 'sudo',
+ 'fuser',
+ '-M',
+ backend,
+ ],
+ check_status=False
+ )
+ except Exception:
+ log.exception("Saw exception")
+ pass
--- /dev/null
+"""
+Scrub osds
+"""
+import contextlib
+import gevent
+import logging
+import random
+import time
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run scrub periodically. Randomly chooses an OSD to scrub.
+
+ The config should be as follows:
+
+ scrub:
+ frequency: <seconds between scrubs>
+ deep: <bool for deepness>
+
+ example:
+
+ tasks:
+ - ceph:
+ - scrub:
+ frequency: 30
+ deep: 0
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'scrub task only accepts a dict for configuration'
+
+ log.info('Beginning scrub...')
+
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+ while len(manager.get_osd_status()['up']) < num_osds:
+ manager.sleep(10)
+
+ scrub_proc = Scrubber(
+ manager,
+ config,
+ )
+ try:
+ yield
+ finally:
+ log.info('joining scrub')
+ scrub_proc.do_join()
+
+class Scrubber:
+ """
+ Scrubbing is actually performed during initialzation
+ """
+ def __init__(self, manager, config):
+ """
+ Spawn scrubbing thread upon completion.
+ """
+ self.ceph_manager = manager
+ self.ceph_manager.wait_for_clean()
+
+ osd_status = self.ceph_manager.get_osd_status()
+ self.osds = osd_status['up']
+
+ self.config = config
+ if self.config is None:
+ self.config = dict()
+
+ else:
+ def tmp(x):
+ """Local display"""
+ print x
+ self.log = tmp
+
+ self.stopping = False
+
+ log.info("spawning thread")
+
+ self.thread = gevent.spawn(self.do_scrub)
+
+ def do_join(self):
+ """Scrubbing thread finished"""
+ self.stopping = True
+ self.thread.get()
+
+ def do_scrub(self):
+ """Perform the scrub operation"""
+ frequency = self.config.get("frequency", 30)
+ deep = self.config.get("deep", 0)
+
+ log.info("stopping %s" % self.stopping)
+
+ while not self.stopping:
+ osd = str(random.choice(self.osds))
+
+ if deep:
+ cmd = 'deep-scrub'
+ else:
+ cmd = 'scrub'
+
+ log.info('%sbing %s' % (cmd, osd))
+ self.ceph_manager.raw_cluster_cmd('osd', cmd, osd)
+
+ time.sleep(frequency)
--- /dev/null
+"""Scrub testing"""
+from cStringIO import StringIO
+
+import logging
+import os
+import time
+
+import ceph_manager
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def task(ctx, config):
+ """
+ Test [deep] scrub
+
+ tasks:
+ - chef:
+ - install:
+ - ceph:
+ log-whitelist:
+ - '!= known digest'
+ - '!= known omap_digest'
+ - deep-scrub 0 missing, 1 inconsistent objects
+ - deep-scrub 1 errors
+ - repair 0 missing, 1 inconsistent objects
+ - repair 1 errors, 1 fixed
+ - scrub_test:
+
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'scrub_test task only accepts a dict for configuration'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+
+ num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
+ log.info('num_osds is %s' % num_osds)
+
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ logger=log.getChild('ceph_manager'),
+ )
+
+ while len(manager.get_osd_status()['up']) < num_osds:
+ time.sleep(10)
+
+ for i in range(num_osds):
+ manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'flush_pg_stats')
+ manager.wait_for_clean()
+
+ # write some data
+ p = manager.do_rados(mon, ['-p', 'rbd', 'bench', '--no-cleanup', '1', 'write', '-b', '4096'])
+ err = p.exitstatus
+ log.info('err is %d' % err)
+
+ # wait for some PG to have data that we can mess with
+ victim = None
+ osd = None
+ while victim is None:
+ stats = manager.get_pg_stats()
+ for pg in stats:
+ size = pg['stat_sum']['num_bytes']
+ if size > 0:
+ victim = pg['pgid']
+ osd = pg['acting'][0]
+ break
+
+ if victim is None:
+ time.sleep(3)
+
+ log.info('messing with PG %s on osd %d' % (victim, osd))
+
+ (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys()
+ data_path = os.path.join(
+ '/var/lib/ceph/osd',
+ 'ceph-{id}'.format(id=osd),
+ 'current',
+ '{pg}_head'.format(pg=victim)
+ )
+
+ # fuzz time
+ ls_fp = StringIO()
+ osd_remote.run(
+ args=[ 'ls', data_path ],
+ stdout=ls_fp,
+ )
+ ls_out = ls_fp.getvalue()
+ ls_fp.close()
+
+ # find an object file we can mess with
+ osdfilename = None
+ for line in ls_out.split('\n'):
+ if 'object' in line:
+ osdfilename = line
+ break
+ assert osdfilename is not None
+
+ # Get actual object name from osd stored filename
+ tmp=osdfilename.split('__')
+ objname=tmp[0]
+ objname=objname.replace('\u', '_')
+ log.info('fuzzing %s' % objname)
+
+ # put a single \0 at the beginning of the file
+ osd_remote.run(
+ args=[ 'sudo', 'dd',
+ 'if=/dev/zero',
+ 'of=%s' % os.path.join(data_path, osdfilename),
+ 'bs=1', 'count=1', 'conv=notrunc'
+ ]
+ )
+
+ # scrub, verify inconsistent
+ manager.raw_cluster_cmd('pg', 'deep-scrub', victim)
+ # Give deep-scrub a chance to start
+ time.sleep(60)
+
+ while True:
+ stats = manager.get_single_pg_stats(victim)
+ state = stats['state']
+
+ # wait for the scrub to finish
+ if 'scrubbing' in state:
+ time.sleep(3)
+ continue
+
+ inconsistent = stats['state'].find('+inconsistent') != -1
+ assert inconsistent
+ break
+
+
+ # repair, verify no longer inconsistent
+ manager.raw_cluster_cmd('pg', 'repair', victim)
+ # Give repair a chance to start
+ time.sleep(60)
+
+ while True:
+ stats = manager.get_single_pg_stats(victim)
+ state = stats['state']
+
+ # wait for the scrub to finish
+ if 'scrubbing' in state:
+ time.sleep(3)
+ continue
+
+ inconsistent = stats['state'].find('+inconsistent') != -1
+ assert not inconsistent
+ break
+
+ # Test deep-scrub with various omap modifications
+ manager.do_rados(mon, ['-p', 'rbd', 'setomapval', objname, 'key', 'val'])
+ manager.do_rados(mon, ['-p', 'rbd', 'setomapheader', objname, 'hdr'])
+
+ # Modify omap on specific osd
+ log.info('fuzzing omap of %s' % objname)
+ manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'key']);
+ manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname, 'badkey', 'badval']);
+ manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'badhdr']);
+
+ # scrub, verify inconsistent
+ manager.raw_cluster_cmd('pg', 'deep-scrub', victim)
+ # Give deep-scrub a chance to start
+ time.sleep(60)
+
+ while True:
+ stats = manager.get_single_pg_stats(victim)
+ state = stats['state']
+
+ # wait for the scrub to finish
+ if 'scrubbing' in state:
+ time.sleep(3)
+ continue
+
+ inconsistent = stats['state'].find('+inconsistent') != -1
+ assert inconsistent
+ break
+
+ # repair, verify no longer inconsistent
+ manager.raw_cluster_cmd('pg', 'repair', victim)
+ # Give repair a chance to start
+ time.sleep(60)
+
+ while True:
+ stats = manager.get_single_pg_stats(victim)
+ state = stats['state']
+
+ # wait for the scrub to finish
+ if 'scrubbing' in state:
+ time.sleep(3)
+ continue
+
+ inconsistent = stats['state'].find('+inconsistent') != -1
+ assert not inconsistent
+ break
+
+ log.info('test successful!')
--- /dev/null
+from textwrap import dedent
+
+from .. import devstack
+
+
+class TestDevstack(object):
+ def test_parse_os_table(self):
+ table_str = dedent("""
+ +---------------------+--------------------------------------+
+ | Property | Value |
+ +---------------------+--------------------------------------+
+ | attachments | [] |
+ | availability_zone | nova |
+ | bootable | false |
+ | created_at | 2014-02-21T17:14:47.548361 |
+ | display_description | None |
+ | display_name | NAME |
+ | id | ffdbd1bb-60dc-4d95-acfe-88774c09ad3e |
+ | metadata | {} |
+ | size | 1 |
+ | snapshot_id | None |
+ | source_volid | None |
+ | status | creating |
+ | volume_type | None |
+ +---------------------+--------------------------------------+
+ """).strip()
+ expected = {
+ 'Property': 'Value',
+ 'attachments': '[]',
+ 'availability_zone': 'nova',
+ 'bootable': 'false',
+ 'created_at': '2014-02-21T17:14:47.548361',
+ 'display_description': 'None',
+ 'display_name': 'NAME',
+ 'id': 'ffdbd1bb-60dc-4d95-acfe-88774c09ad3e',
+ 'metadata': '{}',
+ 'size': '1',
+ 'snapshot_id': 'None',
+ 'source_volid': 'None',
+ 'status': 'creating',
+ 'volume_type': 'None'}
+
+ vol_info = devstack.parse_os_table(table_str)
+ assert vol_info == expected
+
+
+
+
--- /dev/null
+"""
+Task to handle tgt
+
+Assumptions made:
+ The ceph-extras tgt package may need to get installed.
+ The open-iscsi package needs to get installed.
+"""
+import logging
+import contextlib
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def start_tgt_remotes(ctx, start_tgtd):
+ """
+ This subtask starts up a tgtd on the clients specified
+ """
+ remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+ tgtd_list = []
+ for rem, roles in remotes.iteritems():
+ for _id in roles:
+ if _id in start_tgtd:
+ if not rem in tgtd_list:
+ tgtd_list.append(rem)
+ size = ctx.config.get('image_size', 10240)
+ rem.run(
+ args=[
+ 'rbd',
+ 'create',
+ 'iscsi-image',
+ '--size',
+ str(size),
+ ])
+ rem.run(
+ args=[
+ 'sudo',
+ 'tgtadm',
+ '--lld',
+ 'iscsi',
+ '--mode',
+ 'target',
+ '--op',
+ 'new',
+ '--tid',
+ '1',
+ '--targetname',
+ 'rbd',
+ ])
+ rem.run(
+ args=[
+ 'sudo',
+ 'tgtadm',
+ '--lld',
+ 'iscsi',
+ '--mode',
+ 'logicalunit',
+ '--op',
+ 'new',
+ '--tid',
+ '1',
+ '--lun',
+ '1',
+ '--backing-store',
+ 'iscsi-image',
+ '--bstype',
+ 'rbd',
+ ])
+ rem.run(
+ args=[
+ 'sudo',
+ 'tgtadm',
+ '--lld',
+ 'iscsi',
+ '--op',
+ 'bind',
+ '--mode',
+ 'target',
+ '--tid',
+ '1',
+ '-I',
+ 'ALL',
+ ])
+ try:
+ yield
+
+ finally:
+ for rem in tgtd_list:
+ rem.run(
+ args=[
+ 'sudo',
+ 'tgtadm',
+ '--lld',
+ 'iscsi',
+ '--mode',
+ 'target',
+ '--op',
+ 'delete',
+ '--force',
+ '--tid',
+ '1',
+ ])
+ rem.run(
+ args=[
+ 'rbd',
+ 'snap',
+ 'purge',
+ 'iscsi-image',
+ ])
+ rem.run(
+ args=[
+ 'sudo',
+ 'rbd',
+ 'rm',
+ 'iscsi-image',
+ ])
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Start up tgt.
+
+ To start on on all clients::
+
+ tasks:
+ - ceph:
+ - tgt:
+
+ To start on certain clients::
+
+ tasks:
+ - ceph:
+ - tgt: [client.0, client.3]
+
+ or
+
+ tasks:
+ - ceph:
+ - tgt:
+ client.0:
+ client.3:
+
+ An image blocksize size can also be specified::
+
+ tasks:
+ - ceph:
+ - tgt:
+ image_size = 20480
+
+ The general flow of things here is:
+ 1. Find clients on which tgt is supposed to run (start_tgtd)
+ 2. Remotely start up tgt daemon
+ On cleanup:
+ 3. Stop tgt daemon
+
+ The iscsi administration is handled by the iscsi task.
+ """
+ if config:
+ config = {key : val for key, val in config.items()
+ if key.startswith('client')}
+ # config at this point should only contain keys starting with 'client'
+ start_tgtd = []
+ remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+ log.info(remotes)
+ if not config:
+ start_tgtd = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ else:
+ start_tgtd = config
+ log.info(start_tgtd)
+ with contextutil.nested(
+ lambda: start_tgt_remotes(ctx=ctx, start_tgtd=start_tgtd),):
+ yield
--- /dev/null
+"""
+Thrash -- Simulate random osd failures.
+"""
+import contextlib
+import logging
+import ceph_manager
+from teuthology import misc as teuthology
+
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ "Thrash" the OSDs by randomly marking them out/down (and then back
+ in) until the task is ended. This loops, and every op_delay
+ seconds it randomly chooses to add or remove an OSD (even odds)
+ unless there are fewer than min_out OSDs out of the cluster, or
+ more than min_in OSDs in the cluster.
+
+ All commands are run on mon0 and it stops when __exit__ is called.
+
+ The config is optional, and is a dict containing some or all of:
+
+ min_in: (default 3) the minimum number of OSDs to keep in the
+ cluster
+
+ min_out: (default 0) the minimum number of OSDs to keep out of the
+ cluster
+
+ op_delay: (5) the length of time to sleep between changing an
+ OSD's status
+
+ min_dead: (0) minimum number of osds to leave down/dead.
+
+ max_dead: (0) maximum number of osds to leave down/dead before waiting
+ for clean. This should probably be num_replicas - 1.
+
+ clean_interval: (60) the approximate length of time to loop before
+ waiting until the cluster goes clean. (In reality this is used
+ to probabilistically choose when to wait, and the method used
+ makes it closer to -- but not identical to -- the half-life.)
+
+ scrub_interval: (-1) the approximate length of time to loop before
+ waiting until a scrub is performed while cleaning. (In reality
+ this is used to probabilistically choose when to wait, and it
+ only applies to the cases where cleaning is being performed).
+ -1 is used to indicate that no scrubbing will be done.
+
+ chance_down: (0.4) the probability that the thrasher will mark an
+ OSD down rather than marking it out. (The thrasher will not
+ consider that OSD out of the cluster, since presently an OSD
+ wrongly marked down will mark itself back up again.) This value
+ can be either an integer (eg, 75) or a float probability (eg
+ 0.75).
+
+ chance_test_min_size: (0) chance to run test_pool_min_size,
+ which:
+ - kills all but one osd
+ - waits
+ - kills that osd
+ - revives all other osds
+ - verifies that the osds fully recover
+
+ timeout: (360) the number of seconds to wait for the cluster
+ to become clean after each cluster change. If this doesn't
+ happen within the timeout, an exception will be raised.
+
+ revive_timeout: (150) number of seconds to wait for an osd asok to
+ appear after attempting to revive the osd
+
+ thrash_primary_affinity: (true) randomly adjust primary-affinity
+
+ chance_pgnum_grow: (0) chance to increase a pool's size
+ chance_pgpnum_fix: (0) chance to adjust pgpnum to pg for a pool
+ pool_grow_by: (10) amount to increase pgnum by
+ max_pgs_per_pool_osd: (1200) don't expand pools past this size per osd
+
+ pause_short: (3) duration of short pause
+ pause_long: (80) duration of long pause
+ pause_check_after: (50) assert osd down after this long
+ chance_inject_pause_short: (1) chance of injecting short stall
+ chance_inject_pause_long: (0) chance of injecting long stall
+
+ clean_wait: (0) duration to wait before resuming thrashing once clean
+
+ powercycle: (false) whether to power cycle the node instead
+ of just the osd process. Note that this assumes that a single
+ osd is the only important process on the node.
+
+ chance_test_backfill_full: (0) chance to simulate full disks stopping
+ backfill
+
+ chance_test_map_discontinuity: (0) chance to test map discontinuity
+ map_discontinuity_sleep_time: (40) time to wait for map trims
+
+ ceph_objectstore_tool: (true) whether to export/import a pg while an osd is down
+ chance_move_pg: (1.0) chance of moving a pg if more than 1 osd is down (default 100%)
+
+ example:
+
+ tasks:
+ - ceph:
+ - thrashosds:
+ chance_down: 10
+ op_delay: 3
+ min_in: 1
+ timeout: 600
+ - interactive:
+ """
+ if config is None:
+ config = {}
+ assert isinstance(config, dict), \
+ 'thrashosds task only accepts a dict for configuration'
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('thrashosds', {}))
+
+ if 'powercycle' in config:
+
+ # sync everyone first to avoid collateral damage to / etc.
+ log.info('Doing preliminary sync to avoid collateral damage...')
+ ctx.cluster.run(args=['sync'])
+
+ if 'ipmi_user' in ctx.teuthology_config:
+ for remote in ctx.cluster.remotes.keys():
+ log.debug('checking console status of %s' % remote.shortname)
+ if not remote.console.check_status():
+ log.warn('Failed to get console status for %s',
+ remote.shortname)
+
+ # check that all osd remotes have a valid console
+ osds = ctx.cluster.only(teuthology.is_type('osd'))
+ for remote in osds.remotes.keys():
+ if not remote.console.has_ipmi_credentials:
+ raise Exception(
+ 'IPMI console required for powercycling, '
+ 'but not available on osd role: {r}'.format(
+ r=remote.name))
+
+ log.info('Beginning thrashosds...')
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ manager = ceph_manager.CephManager(
+ mon,
+ ctx=ctx,
+ config=config,
+ logger=log.getChild('ceph_manager'),
+ )
+ ctx.manager = manager
+ thrash_proc = ceph_manager.Thrasher(
+ manager,
+ config,
+ logger=log.getChild('thrasher')
+ )
+ try:
+ yield
+ finally:
+ log.info('joining thrashosds')
+ thrash_proc.do_join()
+ manager.wait_for_recovery(config.get('timeout', 360))
--- /dev/null
+#cloud-config-archive
+
+- type: text/cloud-config
+ content: |
+ output:
+ all: '| tee -a /var/log/cloud-init-output.log'
+
+# allow passwordless access for debugging
+- |
+ #!/bin/bash
+ exec passwd -d ubuntu
+
+- |
+ #!/bin/bash
+
+ # mount a NFS share for storing logs
+ apt-get update
+ apt-get -y install nfs-common
+ mkdir /mnt/log
+ # 10.0.2.2 is the host
+ mount -v -t nfs -o proto=tcp 10.0.2.2:{mnt_dir} /mnt/log
+
+ # mount the iso image that has the test script
+ mkdir /mnt/cdrom
+ mount -t auto /dev/cdrom /mnt/cdrom
--- /dev/null
+- |
+ #!/bin/bash
+ cp /var/log/cloud-init-output.log /mnt/log
+
+- |
+ #!/bin/bash
+ umount /mnt/log
+
+- |
+ #!/bin/bash
+ shutdown -h -P now
--- /dev/null
+from teuthology.misc import get_testdir
+from teuthology.orchestra import run
+
+
+def write_secret_file(ctx, remote, role, keyring, filename):
+ """
+ Stash the kerying in the filename specified.
+ """
+ testdir = get_testdir(ctx)
+ remote.run(
+ args=[
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'ceph-authtool',
+ '--name={role}'.format(role=role),
+ '--print-key',
+ keyring,
+ run.Raw('>'),
+ filename,
+ ],
+ )
--- /dev/null
+import logging
+
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def rados(ctx, remote, cmd, wait=True, check_status=False):
+ testdir = teuthology.get_testdir(ctx)
+ log.info("rados %s" % ' '.join(cmd))
+ pre = [
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'rados',
+ ];
+ pre.extend(cmd)
+ proc = remote.run(
+ args=pre,
+ check_status=check_status,
+ wait=wait,
+ )
+ if wait:
+ return proc.exitstatus
+ else:
+ return proc
+
+def create_ec_pool(remote, name, profile_name, pgnum, m=1, k=2):
+ remote.run(args=[
+ 'ceph', 'osd', 'erasure-code-profile', 'set',
+ profile_name, 'm=' + str(m), 'k=' + str(k),
+ 'ruleset-failure-domain=osd',
+ ])
+ remote.run(args=[
+ 'ceph', 'osd', 'pool', 'create', name,
+ str(pgnum), str(pgnum), 'erasure', profile_name,
+ ])
+
+def create_replicated_pool(remote, name, pgnum):
+ remote.run(args=[
+ 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum),
+ ])
+
+def create_cache_pool(remote, base_name, cache_name, pgnum, size):
+ remote.run(args=[
+ 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum)
+ ])
+ remote.run(args=[
+ 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name,
+ str(size),
+ ])
--- /dev/null
+from cStringIO import StringIO
+import logging
+import json
+import requests
+from urlparse import urlparse
+
+from teuthology.orchestra.connection import split_user
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+# simple test to indicate if multi-region testing should occur
+def multi_region_enabled(ctx):
+ # this is populated by the radosgw-agent task, seems reasonable to
+ # use that as an indicator that we're testing multi-region sync
+ return 'radosgw_agent' in ctx
+
+def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False):
+ log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd))
+ testdir = teuthology.get_testdir(ctx)
+ pre = [
+ 'adjust-ulimits',
+ 'ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin'.format(tdir=testdir),
+ '--log-to-stderr',
+ '--format', 'json',
+ '-n', client,
+ ]
+ pre.extend(cmd)
+ log.info('rgwadmin: cmd=%s' % pre)
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ proc = remote.run(
+ args=pre,
+ check_status=check_status,
+ stdout=StringIO(),
+ stderr=StringIO(),
+ stdin=stdin,
+ )
+ r = proc.exitstatus
+ out = proc.stdout.getvalue()
+ j = None
+ if not r and out != '':
+ try:
+ j = json.loads(out)
+ log.info(' json result: %s' % j)
+ except ValueError:
+ j = out
+ log.info(' raw result: %s' % j)
+ return (r, j)
+
+def get_user_summary(out, user):
+ """Extract the summary for a given user"""
+ user_summary = None
+ for summary in out['summary']:
+ if summary.get('user') == user:
+ user_summary = summary
+
+ if not user_summary:
+ raise AssertionError('No summary info found for user: %s' % user)
+
+ return user_summary
+
+def get_user_successful_ops(out, user):
+ summary = out['summary']
+ if len(summary) == 0:
+ return 0
+ return get_user_summary(out, user)['total']['successful_ops']
+
+def get_zone_host_and_port(ctx, client, zone):
+ _, region_map = rgwadmin(ctx, client, check_status=True,
+ cmd=['-n', client, 'region-map', 'get'])
+ regions = region_map['regions']
+ for region in regions:
+ for zone_info in region['val']['zones']:
+ if zone_info['name'] == zone:
+ endpoint = urlparse(zone_info['endpoints'][0])
+ host, port = endpoint.hostname, endpoint.port
+ if port is None:
+ port = 80
+ return host, port
+ assert False, 'no endpoint for zone {zone} found'.format(zone=zone)
+
+def get_master_zone(ctx, client):
+ _, region_map = rgwadmin(ctx, client, check_status=True,
+ cmd=['-n', client, 'region-map', 'get'])
+ regions = region_map['regions']
+ for region in regions:
+ is_master = (region['val']['is_master'] == "true")
+ log.info('region={r} is_master={ism}'.format(r=region, ism=is_master))
+ if not is_master:
+ continue
+ master_zone = region['val']['master_zone']
+ log.info('master_zone=%s' % master_zone)
+ for zone_info in region['val']['zones']:
+ if zone_info['name'] == master_zone:
+ return master_zone
+ log.info('couldn\'t find master zone')
+ return None
+
+def get_master_client(ctx, clients):
+ master_zone = get_master_zone(ctx, clients[0]) # can use any client for this as long as system configured correctly
+ if not master_zone:
+ return None
+
+ for client in clients:
+ zone = zone_for_client(ctx, client)
+ if zone == master_zone:
+ return client
+
+ return None
+
+def get_zone_system_keys(ctx, client, zone):
+ _, zone_info = rgwadmin(ctx, client, check_status=True,
+ cmd=['-n', client,
+ 'zone', 'get', '--rgw-zone', zone])
+ system_key = zone_info['system_key']
+ return system_key['access_key'], system_key['secret_key']
+
+def zone_for_client(ctx, client):
+ ceph_config = ctx.ceph.conf.get('global', {})
+ ceph_config.update(ctx.ceph.conf.get('client', {}))
+ ceph_config.update(ctx.ceph.conf.get(client, {}))
+ return ceph_config.get('rgw zone')
+
+def region_for_client(ctx, client):
+ ceph_config = ctx.ceph.conf.get('global', {})
+ ceph_config.update(ctx.ceph.conf.get('client', {}))
+ ceph_config.update(ctx.ceph.conf.get(client, {}))
+ return ceph_config.get('rgw region')
+
+def radosgw_data_log_window(ctx, client):
+ ceph_config = ctx.ceph.conf.get('global', {})
+ ceph_config.update(ctx.ceph.conf.get('client', {}))
+ ceph_config.update(ctx.ceph.conf.get(client, {}))
+ return ceph_config.get('rgw data log window', 30)
+
+def radosgw_agent_sync_data(ctx, agent_host, agent_port, full=False):
+ log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port))
+ method = "full" if full else "incremental"
+ return requests.post('http://{addr}:{port}/data/{method}'.format(addr = agent_host, port = agent_port, method = method))
+
+def radosgw_agent_sync_metadata(ctx, agent_host, agent_port, full=False):
+ log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port))
+ method = "full" if full else "incremental"
+ return requests.post('http://{addr}:{port}/metadata/{method}'.format(addr = agent_host, port = agent_port, method = method))
+
+def radosgw_agent_sync_all(ctx, full=False, data=False):
+ if ctx.radosgw_agent.procs:
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ zone_for_client(ctx, agent_client)
+ sync_host, sync_port = get_sync_agent(ctx, agent_client)
+ log.debug('doing a sync via {host1}'.format(host1=sync_host))
+ radosgw_agent_sync_metadata(ctx, sync_host, sync_port, full)
+ if (data):
+ radosgw_agent_sync_data(ctx, sync_host, sync_port, full)
+
+def host_for_role(ctx, role):
+ for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+ if role in roles:
+ _, host = split_user(target)
+ return host
+
+def get_sync_agent(ctx, source):
+ for task in ctx.config['tasks']:
+ if 'radosgw-agent' not in task:
+ continue
+ for client, conf in task['radosgw-agent'].iteritems():
+ if conf['src'] == source:
+ return host_for_role(ctx, source), conf.get('port', 8000)
+ return None, None
--- /dev/null
+"""
+test_stress_watch task
+"""
+import contextlib
+import logging
+import proc_thrasher
+
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run test_stress_watch
+
+ The config should be as follows:
+
+ test_stress_watch:
+ clients: [client list]
+
+ example:
+
+ tasks:
+ - ceph:
+ - test_stress_watch:
+ clients: [client.0]
+ - interactive:
+ """
+ log.info('Beginning test_stress_watch...')
+ assert isinstance(config, dict), \
+ "please list clients to run on"
+ testwatch = {}
+
+ remotes = []
+
+ for role in config.get('clients', ['client.0']):
+ assert isinstance(role, basestring)
+ PREFIX = 'client.'
+ assert role.startswith(PREFIX)
+ id_ = role[len(PREFIX):]
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remotes.append(remote)
+
+ args =['CEPH_CLIENT_ID={id_}'.format(id_=id_),
+ 'CEPH_ARGS="{flags}"'.format(flags=config.get('flags', '')),
+ 'daemon-helper',
+ 'kill',
+ 'multi_stress_watch foo foo'
+ ]
+
+ log.info("args are %s" % (args,))
+
+ proc = proc_thrasher.ProcThrasher({}, remote,
+ args=[run.Raw(i) for i in args],
+ logger=log.getChild('testwatch.{id}'.format(id=id_)),
+ stdin=run.PIPE,
+ wait=False
+ )
+ proc.start()
+ testwatch[id_] = proc
+
+ try:
+ yield
+ finally:
+ log.info('joining watch_notify_stress')
+ for i in testwatch.itervalues():
+ i.join()
--- /dev/null
+"""
+Workunit task -- Run ceph on sets of specific clients
+"""
+import logging
+import pipes
+import os
+
+from teuthology import misc
+from teuthology.orchestra.run import CommandFailedError
+from teuthology.parallel import parallel
+from teuthology.orchestra import run
+
+log = logging.getLogger(__name__)
+
+CLIENT_PREFIX = 'client.'
+
+
+def task(ctx, config):
+ """
+ Run ceph on all workunits found under the specified path.
+
+ For example::
+
+ tasks:
+ - ceph:
+ - ceph-fuse: [client.0]
+ - workunit:
+ clients:
+ client.0: [direct_io, xattrs.sh]
+ client.1: [snaps]
+ branch: foo
+
+ You can also run a list of workunits on all clients:
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ - workunit:
+ tag: v0.47
+ clients:
+ all: [direct_io, xattrs.sh, snaps]
+
+ If you have an "all" section it will run all the workunits
+ on each client simultaneously, AFTER running any workunits specified
+ for individual clients. (This prevents unintended simultaneous runs.)
+
+ To customize tests, you can specify environment variables as a dict. You
+ can also specify a time limit for each work unit (defaults to 3h):
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ - workunit:
+ sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6
+ clients:
+ all: [snaps]
+ env:
+ FOO: bar
+ BAZ: quux
+ timeout: 3h
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ assert isinstance(config, dict)
+ assert isinstance(config.get('clients'), dict), \
+ 'configuration must contain a dictionary of clients'
+
+ overrides = ctx.config.get('overrides', {})
+ misc.deep_merge(config, overrides.get('workunit', {}))
+
+ refspec = config.get('branch')
+ if refspec is None:
+ refspec = config.get('sha1')
+ if refspec is None:
+ refspec = config.get('tag')
+ if refspec is None:
+ refspec = 'HEAD'
+
+ timeout = config.get('timeout', '3h')
+
+ log.info('Pulling workunits from ref %s', refspec)
+
+ created_mountpoint = {}
+
+ if config.get('env') is not None:
+ assert isinstance(config['env'], dict), 'env must be a dictionary'
+ clients = config['clients']
+
+ # Create scratch dirs for any non-all workunits
+ log.info('Making a separate scratch dir for every client...')
+ for role in clients.iterkeys():
+ assert isinstance(role, basestring)
+ if role == "all":
+ continue
+
+ assert role.startswith(CLIENT_PREFIX)
+ created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir'))
+ created_mountpoint[role] = created_mnt_dir
+
+ # Execute any non-all workunits
+ with parallel() as p:
+ for role, tests in clients.iteritems():
+ if role != "all":
+ p.spawn(_run_tests, ctx, refspec, role, tests,
+ config.get('env'), timeout=timeout)
+
+ # Clean up dirs from any non-all workunits
+ for role, created in created_mountpoint.items():
+ _delete_dir(ctx, role, created)
+
+ # Execute any 'all' workunits
+ if 'all' in clients:
+ all_tasks = clients["all"]
+ _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'),
+ config.get('subdir'), timeout=timeout)
+
+
+def _delete_dir(ctx, role, created_mountpoint):
+ """
+ Delete file used by this role, and delete the directory that this
+ role appeared in.
+
+ :param ctx: Context
+ :param role: "role.#" where # is used for the role id.
+ """
+ testdir = misc.get_testdir(ctx)
+ id_ = role[len(CLIENT_PREFIX):]
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ # Is there any reason why this is not: join(mnt, role) ?
+ client = os.path.join(mnt, 'client.{id}'.format(id=id_))
+
+ # Remove the directory inside the mount where the workunit ran
+ remote.run(
+ args=[
+ 'sudo',
+ 'rm',
+ '-rf',
+ '--',
+ client,
+ ],
+ )
+ log.info("Deleted dir {dir}".format(dir=client))
+
+ # If the mount was an artificially created dir, delete that too
+ if created_mountpoint:
+ remote.run(
+ args=[
+ 'rmdir',
+ '--',
+ mnt,
+ ],
+ )
+ log.info("Deleted artificial mount point {dir}".format(dir=client))
+
+
+def _make_scratch_dir(ctx, role, subdir):
+ """
+ Make scratch directories for this role. This also makes the mount
+ point if that directory does not exist.
+
+ :param ctx: Context
+ :param role: "role.#" where # is used for the role id.
+ :param subdir: use this subdir (False if not used)
+ """
+ created_mountpoint = False
+ id_ = role[len(CLIENT_PREFIX):]
+ log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role))
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ dir_owner = remote.user
+ mnt = os.path.join(misc.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
+ # if neither kclient nor ceph-fuse are required for a workunit,
+ # mnt may not exist. Stat and create the directory if it doesn't.
+ try:
+ remote.run(
+ args=[
+ 'stat',
+ '--',
+ mnt,
+ ],
+ )
+ log.info('Did not need to create dir {dir}'.format(dir=mnt))
+ except CommandFailedError:
+ remote.run(
+ args=[
+ 'mkdir',
+ '--',
+ mnt,
+ ],
+ )
+ log.info('Created dir {dir}'.format(dir=mnt))
+ created_mountpoint = True
+
+ if not subdir:
+ subdir = 'client.{id}'.format(id=id_)
+
+ if created_mountpoint:
+ remote.run(
+ args=[
+ 'cd',
+ '--',
+ mnt,
+ run.Raw('&&'),
+ 'mkdir',
+ '--',
+ subdir,
+ ],
+ )
+ else:
+ remote.run(
+ args=[
+ # cd first so this will fail if the mount point does
+ # not exist; pure install -d will silently do the
+ # wrong thing
+ 'cd',
+ '--',
+ mnt,
+ run.Raw('&&'),
+ 'sudo',
+ 'install',
+ '-d',
+ '-m', '0755',
+ '--owner={user}'.format(user=dir_owner),
+ '--',
+ subdir,
+ ],
+ )
+
+ return created_mountpoint
+
+
+def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
+ """
+ Make a scratch directory for each client in the cluster, and then for each
+ test spawn _run_tests() for each role.
+
+ See run_tests() for parameter documentation.
+ """
+ client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
+ client_remotes = list()
+
+ created_mountpoint = {}
+ for client in client_generator:
+ (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
+ client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
+ created_mountpoint[client] = _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
+
+ for unit in tests:
+ with parallel() as p:
+ for remote, role in client_remotes:
+ p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir,
+ timeout=timeout)
+
+ # cleanup the generated client directories
+ client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
+ for client in client_generator:
+ _delete_dir(ctx, 'client.{id}'.format(id=client), created_mountpoint[client])
+
+
+def _run_tests(ctx, refspec, role, tests, env, subdir=None, timeout=None):
+ """
+ Run the individual test. Create a scratch directory and then extract the
+ workunits from git. Make the executables, and then run the tests.
+ Clean up (remove files created) after the tests are finished.
+
+ :param ctx: Context
+ :param refspec: branch, sha1, or version tag used to identify this
+ build
+ :param tests: specific tests specified.
+ :param env: environment set in yaml file. Could be None.
+ :param subdir: subdirectory set in yaml file. Could be None
+ :param timeout: If present, use the 'timeout' command on the remote host
+ to limit execution time. Must be specified by a number
+ followed by 's' for seconds, 'm' for minutes, 'h' for
+ hours, or 'd' for days. If '0' or anything that evaluates
+ to False is passed, the 'timeout' command is not used.
+ """
+ testdir = misc.get_testdir(ctx)
+ assert isinstance(role, basestring)
+ assert role.startswith(CLIENT_PREFIX)
+ id_ = role[len(CLIENT_PREFIX):]
+ (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ # subdir so we can remove and recreate this a lot without sudo
+ if subdir is None:
+ scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
+ else:
+ scratch_tmp = os.path.join(mnt, subdir)
+ srcdir = '{tdir}/workunit.{role}'.format(tdir=testdir, role=role)
+
+ remote.run(
+ logger=log.getChild(role),
+ args=[
+ 'mkdir', '--', srcdir,
+ run.Raw('&&'),
+ 'git',
+ 'archive',
+ '--remote=git://git.ceph.com/ceph.git',
+ '%s:qa/workunits' % refspec,
+ run.Raw('|'),
+ 'tar',
+ '-C', srcdir,
+ '-x',
+ '-f-',
+ run.Raw('&&'),
+ 'cd', '--', srcdir,
+ run.Raw('&&'),
+ 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
+ run.Raw('&&'),
+ 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
+ run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)),
+ ],
+ )
+
+ workunits = sorted(misc.get_file(
+ remote,
+ '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)).split('\0'))
+ assert workunits
+
+ try:
+ assert isinstance(tests, list)
+ for spec in tests:
+ log.info('Running workunits matching %s on %s...', spec, role)
+ prefix = '{spec}/'.format(spec=spec)
+ to_run = [w for w in workunits if w == spec or w.startswith(prefix)]
+ if not to_run:
+ raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec))
+ for workunit in to_run:
+ log.info('Running workunit %s...', workunit)
+ args = [
+ 'mkdir', '-p', '--', scratch_tmp,
+ run.Raw('&&'),
+ 'cd', '--', scratch_tmp,
+ run.Raw('&&'),
+ run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'),
+ run.Raw('CEPH_REF={ref}'.format(ref=refspec)),
+ run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
+ run.Raw('CEPH_ID="{id}"'.format(id=id_)),
+ run.Raw('PATH=$PATH:/usr/sbin')
+ ]
+ if env is not None:
+ for var, val in env.iteritems():
+ quoted_val = pipes.quote(val)
+ env_arg = '{var}={val}'.format(var=var, val=quoted_val)
+ args.append(run.Raw(env_arg))
+ args.extend([
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir)])
+ if timeout and timeout != '0':
+ args.extend(['timeout', timeout])
+ args.extend([
+ '{srcdir}/{workunit}'.format(
+ srcdir=srcdir,
+ workunit=workunit,
+ ),
+ ])
+ remote.run(
+ logger=log.getChild(role),
+ args=args,
+ label="workunit test {workunit}".format(workunit=workunit)
+ )
+ remote.run(
+ logger=log.getChild(role),
+ args=['sudo', 'rm', '-rf', '--', scratch_tmp],
+ )
+ finally:
+ log.info('Stopping %s on %s...', tests, role)
+ remote.run(
+ logger=log.getChild(role),
+ args=[
+ 'rm', '-rf', '--', '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role), srcdir,
+ ],
+ )
--- /dev/null
+[tox]
+envlist = flake8
+skipsdist = True
+
+[testenv:flake8]
+deps=
+ flake8
+commands=flake8 --select=F ceph-qa-suite
+++ /dev/null
-overrides:
- rgw:
- ec-data-pool: true
- s3tests:
- slow_backend: true
+++ /dev/null
-overrides:
- rgw:
- ec-data-pool: false
+++ /dev/null
-tasks:
-- install:
-- ceph:
+++ /dev/null
-roles:
-- [osd.0, osd.1, osd.2, client.0, mon.a]
-- [osd.3, osd.4, osd.5, client.1, mon.b]
-- [osd.6, osd.7, osd.8, client.2, mon.c]
-- [osd.9, osd.10, osd.11, client.3, mon.d]
-- [osd.12, osd.13, osd.14, client.4, mon.e]
-- [osd.15, osd.16, osd.17, client.5]
-- [osd.18, osd.19, osd.20, client.6]
-- [osd.21, osd.22, osd.23, client.7]
-- [osd.24, osd.25, osd.26, client.8]
-- [osd.27, osd.28, osd.29, client.9]
-- [osd.30, osd.31, osd.32, client.10]
-- [osd.33, osd.34, osd.35, client.11]
-- [osd.36, osd.37, osd.38, client.12]
-- [osd.39, osd.40, osd.41, client.13]
-- [osd.42, osd.43, osd.44, client.14]
-- [osd.45, osd.46, osd.47, client.15]
-- [osd.48, osd.49, osd.50, client.16]
-- [osd.51, osd.52, osd.53, client.17]
-- [osd.54, osd.55, osd.56, client.18]
-- [osd.57, osd.58, osd.59, client.19]
-- [osd.60, osd.61, osd.62, client.20]
-- [osd.63, osd.64, osd.65, client.21]
-- [osd.66, osd.67, osd.68, client.22]
-- [osd.69, osd.70, osd.71, client.23]
-- [osd.72, osd.73, osd.74, client.24]
-- [osd.75, osd.76, osd.77, client.25]
-- [osd.78, osd.79, osd.80, client.26]
-- [osd.81, osd.82, osd.83, client.27]
-- [osd.84, osd.85, osd.86, client.28]
-- [osd.87, osd.88, osd.89, client.29]
-- [osd.90, osd.91, osd.92, client.30]
-- [osd.93, osd.94, osd.95, client.31]
-- [osd.96, osd.97, osd.98, client.32]
-- [osd.99, osd.100, osd.101, client.33]
-- [osd.102, osd.103, osd.104, client.34]
-- [osd.105, osd.106, osd.107, client.35]
-- [osd.108, osd.109, osd.110, client.36]
-- [osd.111, osd.112, osd.113, client.37]
-- [osd.114, osd.115, osd.116, client.38]
-- [osd.117, osd.118, osd.119, client.39]
-- [osd.120, osd.121, osd.122, client.40]
-- [osd.123, osd.124, osd.125, client.41]
-- [osd.126, osd.127, osd.128, client.42]
-- [osd.129, osd.130, osd.131, client.43]
-- [osd.132, osd.133, osd.134, client.44]
-- [osd.135, osd.136, osd.137, client.45]
-- [osd.138, osd.139, osd.140, client.46]
-- [osd.141, osd.142, osd.143, client.47]
-- [osd.144, osd.145, osd.146, client.48]
-- [osd.147, osd.148, osd.149, client.49]
-- [osd.150, osd.151, osd.152, client.50]
-#- [osd.153, osd.154, osd.155, client.51]
-#- [osd.156, osd.157, osd.158, client.52]
-#- [osd.159, osd.160, osd.161, client.53]
-#- [osd.162, osd.163, osd.164, client.54]
-#- [osd.165, osd.166, osd.167, client.55]
-#- [osd.168, osd.169, osd.170, client.56]
-#- [osd.171, osd.172, osd.173, client.57]
-#- [osd.174, osd.175, osd.176, client.58]
-#- [osd.177, osd.178, osd.179, client.59]
-#- [osd.180, osd.181, osd.182, client.60]
-#- [osd.183, osd.184, osd.185, client.61]
-#- [osd.186, osd.187, osd.188, client.62]
-#- [osd.189, osd.190, osd.191, client.63]
-#- [osd.192, osd.193, osd.194, client.64]
-#- [osd.195, osd.196, osd.197, client.65]
-#- [osd.198, osd.199, osd.200, client.66]
+++ /dev/null
-roles:
-- [osd.0, osd.1, osd.2, client.0, mon.a]
-- [osd.3, osd.4, osd.5, client.1, mon.b]
-- [osd.6, osd.7, osd.8, client.2, mon.c]
-- [osd.9, osd.10, osd.11, client.3, mon.d]
-- [osd.12, osd.13, osd.14, client.4, mon.e]
-- [osd.15, osd.16, osd.17, client.5]
-- [osd.18, osd.19, osd.20, client.6]
-- [osd.21, osd.22, osd.23, client.7]
-- [osd.24, osd.25, osd.26, client.8]
-- [osd.27, osd.28, osd.29, client.9]
-- [osd.30, osd.31, osd.32, client.10]
-- [osd.33, osd.34, osd.35, client.11]
-- [osd.36, osd.37, osd.38, client.12]
-- [osd.39, osd.40, osd.41, client.13]
-- [osd.42, osd.43, osd.44, client.14]
-- [osd.45, osd.46, osd.47, client.15]
-- [osd.48, osd.49, osd.50, client.16]
-- [osd.51, osd.52, osd.53, client.17]
-- [osd.54, osd.55, osd.56, client.18]
-- [osd.57, osd.58, osd.59, client.19]
-- [osd.60, osd.61, osd.62, client.20]
+++ /dev/null
-roles:
-- [osd.0, osd.1, osd.2, client.0, mon.a]
-- [osd.3, osd.4, osd.5, client.1, mon.b]
-- [osd.6, osd.7, osd.8, client.2, mon.c]
-- [osd.9, osd.10, osd.11, client.3, mon.d]
-- [osd.12, osd.13, osd.14, client.4, mon.e]
+++ /dev/null
-overrides:
- ceph:
- fs: btrfs
- conf:
- osd:
- osd sloppy crc: true
- osd op thread timeout: 60
+++ /dev/null
-overrides:
- ceph:
- fs: xfs
- conf:
- osd:
- osd sloppy crc: true
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
+++ /dev/null
-tasks:
-- rados:
- ops: 4000
- max_seconds: 3600
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
+++ /dev/null
-../../../distros/supported
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph-deploy:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.3
- - osd.4
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - suites/blogbench.sh
-exclude_arch: armv7l
+++ /dev/null
-overrides:
- ceph-deploy:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.4
- - osd.3
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - suites/dbench.sh
-exclude_arch: armv7l
+++ /dev/null
-overrides:
- ceph-deploy:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.3
- - osd.4
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - suites/fsstress.sh
-exclude_arch: armv7l
+++ /dev/null
-../../../distros/supported
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph-deploy:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.3
- - osd.4
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- workunit:
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-overrides:
- ceph-deploy:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.3
- - osd.4
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- workunit:
- clients:
- client.0:
- - rados/test_python.sh
-
+++ /dev/null
-overrides:
- ceph-deploy:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.3
- - osd.4
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- workunit:
- clients:
- all:
- - rados/load-gen-big.sh
-
+++ /dev/null
-../../../distros/supported
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph-deploy:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.3
- - osd.4
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd.sh
+++ /dev/null
-overrides:
- ceph-deploy:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.3
- - osd.4
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- workunit:
- clients:
- client.0:
- - rbd/run_cli_tests.sh
+++ /dev/null
-overrides:
- ceph-deploy:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.3
- - osd.4
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- workunit:
- clients:
- client.0:
- - cls/test_cls_rbd.sh
+++ /dev/null
-overrides:
- ceph-deploy:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.3
- - osd.4
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd_python.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- debug ms: 1
- osd:
- debug osd: 10
- mon:
- debug mon: 10
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - osd.3
- - osd.4
- - osd.5
-- - mon.c
- - osd.6
- - osd.7
- - osd.8
-- - client.0
-
-tasks:
-- install:
- extras: yes
-- ssh_keys:
-- ceph-deploy:
-- workunit:
- clients:
- client.0:
- - suites/blogbench.sh
-
+++ /dev/null
-../../../distros/supported
\ No newline at end of file
+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1]
-- [samba.0, client.0, client.1]
+++ /dev/null
-../../../debug/mds_client.yaml
\ No newline at end of file
+++ /dev/null
-roles:
- - [mon.a, mds.a, osd.0, osd.1, client.0]
-
-tasks:
- - nop:
-
+++ /dev/null
-roles:
-- [mon.a, mds.a, mds.a-s]
-- [mon.b, mds.b, mds.b-s]
-- [mon.c, mds.c, mds.c-s]
-- [osd.0]
-- [osd.1]
-- [osd.2]
-- [client.0]
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- mds:
- mds thrash exports: 1
- mds debug subtrees: 1
- mds debug scatterstat: 1
- mds verify scatter: 1
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - suites/fsstress.sh
-
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-../../../../debug/mds_client.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- exec:
- client.0:
- - ceph mds set inline_data true --yes-i-really-mean-it
+++ /dev/null
-../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - kernel_untar_build.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- timeout: 6h
- clients:
- all:
- - fs/misc
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - fs/test_o_trunc.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- filestore flush min: 0
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/ffsb.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsx.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsync-tester.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/iogen.sh
-
+++ /dev/null
-tasks:
-- ceph-fuse: [client.0]
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- client:
- debug ms: 1
- debug client: 20
- mds:
- debug ms: 1
- debug mds: 20
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/pjd.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- client:
- ms_inject_delay_probability: 1
- ms_inject_delay_type: osd
- ms_inject_delay_max: 5
- client_oc_max_dirty_age: 1
-tasks:
-- ceph-fuse:
-- exec:
- client.0:
- - cd $TESTDIR/mnt.* && dd if=/dev/zero of=./foo count=100
- - sleep 2
- - cd $TESTDIR/mnt.* && truncate --size 0 ./foo
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all: [fs/misc/trivial_sync.sh]
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - libcephfs/test.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - libcephfs-java/test.sh
+++ /dev/null
-tasks:
--mds_creation_failure:
--ceph-fuse:
-- workunit:
- clients:
- all: [fs/misc/trivial_sync.sh]
-
+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2]
-- [client.2]
-- [client.1]
-- [client.0]
+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2]
-- [client.1]
-- [client.0]
+++ /dev/null
-../../../../debug/mds_client.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-tasks:
-- install:
-- ceph:
-- kclient:
+++ /dev/null
-# make sure we get the same MPI version on all hosts
-os_type: ubuntu
-os_version: "14.04"
-
-tasks:
-- pexec:
- clients:
- - cd $TESTDIR
- - wget http://ceph.com/qa/fsx-mpi.c
- - mpicc fsx-mpi.c -o fsx-mpi
- - rm fsx-mpi.c
- - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
-- ssh_keys:
-- mpi:
- exec: $TESTDIR/fsx-mpi 1MB -N 50000 -p 10000 -l 1048576
- workdir: $TESTDIR/gmnt
-- pexec:
- all:
- - rm $TESTDIR/gmnt
- - rm $TESTDIR/fsx-mpi
+++ /dev/null
-# make sure we get the same MPI version on all hosts
-os_type: ubuntu
-os_version: "14.04"
-
-tasks:
-- pexec:
- clients:
- - cd $TESTDIR
- - wget http://ceph.com/qa/ior.tbz2
- - tar xvfj ior.tbz2
- - cd ior
- - ./configure
- - make
- - make install DESTDIR=$TESTDIR/binary/
- - cd $TESTDIR/
- - rm ior.tbz2
- - rm -r ior
- - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
-- ssh_keys:
-- mpi:
- exec: $TESTDIR/binary/usr/local/bin/ior -e -w -r -W -b 10m -a POSIX -o $TESTDIR/gmnt/ior.testfile
-- pexec:
- all:
- - rm -f $TESTDIR/gmnt/ior.testfile
- - rm -f $TESTDIR/gmnt
- - rm -rf $TESTDIR/binary
+++ /dev/null
-# make sure we get the same MPI version on all hosts
-os_type: ubuntu
-os_version: "14.04"
-
-tasks:
-- pexec:
- clients:
- - cd $TESTDIR
- - wget http://ceph.com/qa/mdtest-1.9.3.tgz
- - mkdir mdtest-1.9.3
- - cd mdtest-1.9.3
- - tar xvfz $TESTDIR/mdtest-1.9.3.tgz
- - rm $TESTDIR/mdtest-1.9.3.tgz
- - MPI_CC=mpicc make
- - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt
-- ssh_keys:
-- mpi:
- exec: $TESTDIR/mdtest-1.9.3/mdtest -d $TESTDIR/gmnt -I 20 -z 5 -b 2 -R
-- pexec:
- all:
- - rm -f $TESTDIR/gmnt
- - rm -rf $TESTDIR/mdtest-1.9.3
- - rm -rf $TESTDIR/._mdtest-1.9.3
\ No newline at end of file
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- fs: btrfs
- conf:
- osd:
- osd op thread timeout: 60
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - snaps/snaptest-0.sh
- - snaps/snaptest-1.sh
- - snaps/snaptest-2.sh
+++ /dev/null
-tasks:
-- mds_thrash:
+++ /dev/null
-tasks:
-- install:
-- ceph:
+++ /dev/null
-roles:
-- [mon.a, mon.c, osd.0, osd.1, osd.2]
-- [mon.b, mds.a, osd.3, osd.4, osd.5]
-- [client.0, mds.b-s-a]
+++ /dev/null
-../../../../debug/mds_client.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 2500
- mds inject delay type: osd mds
- ms inject delay probability: .005
- ms inject delay max: 1
+++ /dev/null
-../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/pjd.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all: [fs/misc/trivial_sync.sh]
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-../../../../debug/mds_client.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- osd:
- filestore flush min: 0
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/ffsb.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- mds:
- mds inject traceless reply probability: .5
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-../../../../debug/mds_client.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- client:
- debug client: 1/20
- debug ms: 0/10
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - libcephfs/test.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- lockdep: true
+++ /dev/null
-overrides:
- install:
- ceph:
- flavor: notcmalloc
- ceph:
- valgrind:
- mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
- osd: [--tool=memcheck]
- mds: [--tool=memcheck]
- ceph-fuse:
- client.0:
- valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+++ /dev/null
-roles:
-- [mon.0, mds.0, osd.0, hadoop.master.0]
-- [mon.1, osd.1, hadoop.slave.0]
-- [mon.2, hadoop.slave.1, client.0]
-
+++ /dev/null
-tasks:
-- ssh_keys:
-- install:
-- ceph:
-- hadoop:
-- workunit:
- clients:
- client.0: [hadoop-internal-tests]
+++ /dev/null
-tasks:
-- ssh_keys:
-- install:
-- ceph:
-- hadoop:
-- workunit:
- clients:
- client.0: [hadoop-wordcount]
+++ /dev/null
-tasks:
-- install:
-- install:
- project: samba
- extra_packages: ['samba']
-- ceph:
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - direct_io
-
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - kernel_untar_build.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- mds:
- debug mds: 20
- debug ms: 1
-- kclient:
-- workunit:
- clients:
- all:
- - fs/misc
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - fs/test_o_trunc.sh
-
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- osd:
- filestore flush min: 0
-- kclient:
-- workunit:
- clients:
- all:
- - suites/ffsb.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/fsx.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/fsync-tester.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/pjd.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all: [fs/misc/trivial_sync.sh]
+++ /dev/null
-roles:
-- [mon.a, mds.a, osd.0, osd.1]
-- [mon.b, mon.c, osd.2, osd.3, client.0]
-- [client.1]
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- parallel:
- - user-workload
- - kclient-workload
-user-workload:
- sequential:
- - ceph-fuse: [client.0]
- - workunit:
- clients:
- client.0:
- - suites/iozone.sh
-kclient-workload:
- sequential:
- - kclient: [client.1]
- - workunit:
- clients:
- client.1:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- parallel:
- - user-workload
- - kclient-workload
-user-workload:
- sequential:
- - ceph-fuse: [client.0]
- - workunit:
- clients:
- client.0:
- - suites/blogbench.sh
-kclient-workload:
- sequential:
- - kclient: [client.1]
- - workunit:
- clients:
- client.1:
- - kernel_untar_build.sh
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-- thrashosds:
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- filestore flush min: 0
-tasks:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/ffsb.sh
+++ /dev/null
-tasks:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-
-tasks:
-- install:
-- ceph:
-- kclient: [client.0]
-- knfsd:
- client.0:
- options: [rw,no_root_squash,async]
+++ /dev/null
-../../../../clusters/extra-client.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- nfs:
- client.1:
- server: client.0
- options: [rw,hard,intr,nfsvers=3]
+++ /dev/null
-tasks:
-- nfs:
- client.1:
- server: client.0
- options: [rw,hard,intr,nfsvers=4]
+++ /dev/null
-tasks:
-- workunit:
- timeout: 6h
- clients:
- client.1:
- - kernel_untar_build.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.1:
- - fs/misc/chmod.sh
- - fs/misc/i_complete_vs_rename.sh
- - fs/misc/trivial_sync.sh
- #- fs/misc/multiple_rsync.sh
- #- fs/misc/xattrs.sh
-# Once we can run multiple_rsync.sh and xattrs.sh we can change to this
-# - misc
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.1:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.1:
- - suites/dbench-short.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- filestore flush min: 0
-tasks:
-- workunit:
- clients:
- client.1:
- - suites/ffsb.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.1:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.1:
- - suites/iozone.sh
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 500
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - rbd/concurrent.sh
-# Options for rbd/concurrent.sh (default values shown)
-# env:
-# RBD_CONCURRENT_ITER: 100
-# RBD_CONCURRENT_COUNT: 5
-# RBD_CONCURRENT_DELAY: 5
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - rbd/image_read.sh
-# Options for rbd/image_read.sh (default values shown)
-# env:
-# IMAGE_READ_LOCAL_FILES: 'false'
-# IMAGE_READ_FORMAT: '2'
-# IMAGE_READ_VERBOSE: 'true'
-# IMAGE_READ_PAGE_SIZE: '4096'
-# IMAGE_READ_OBJECT_ORDER: '22'
-# IMAGE_READ_TEST_CLONES: 'true'
-# IMAGE_READ_DOUBLE_ORDER: 'true'
-# IMAGE_READ_HALF_ORDER: 'false'
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - rbd/kernel.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - rbd/map-snapshot-io.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - rbd/map-unmap.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - rbd/simple_big.sh
-
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 500
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rbd:
- all:
-- workunit:
- clients:
- all:
- - kernel_untar_build.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rbd:
- all:
-- workunit:
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rbd:
- all:
- image_size: 20480
-- workunit:
- clients:
- all:
- - suites/ffsb.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rbd:
- all:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rbd:
- all:
- fs_type: btrfs
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rbd:
- all:
- fs_type: ext4
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rbd:
- all:
-- workunit:
- clients:
- all:
- - suites/fsx.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rbd:
- all:
- image_size: 20480
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rbd:
- all:
-- workunit:
- clients:
- all: [fs/misc/trivial_sync.sh]
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 500
+++ /dev/null
-roles:
-- [mon.a, mon.c, osd.0, osd.1, osd.2]
-- [mon.b, mds.a, osd.3, osd.4, osd.5]
-- [client.0]
-- [client.1]
-- [client.2]
-tasks:
-- install:
-- ceph:
-- rbd.xfstests:
- client.0:
- tests: 1-9 11-15 17 19-21 26-29 31-34 41 46-54 56 61 63-67 69-70 74-76 78-79 84-89 91
- test_image: 'test_image-0'
- scratch_image: 'scratch_image-0'
- client.1:
- tests: 92 100 103 105 108 110 116-121 124 126 129-132
- test_image: 'test_image-1'
- scratch_image: 'scratch_image-1'
- client.2:
- tests: 133-135 137-141 164-167 184 187-190 192 194 196 199 201 203 214-216 220-227 234 236-238 241 243-249 253 257-259 261-262 269 273 275 277-278
- test_image: 'test_image-2'
- scratch_image: 'scratch_image-2'
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-- thrashosds:
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
+++ /dev/null
-tasks:
-- rbd:
- all:
- image_size: 20480
-- workunit:
- clients:
- all:
- - suites/ffsb.sh
+++ /dev/null
-tasks:
-- rbd:
- all:
- image_size: 20480
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-roles:
-- [mon.a, mon.c, osd.0, osd.1, osd.2]
-- [mon.b, mds.a, osd.3, osd.4, osd.5]
-- [client.0]
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/fsx.sh
+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2]
-- [client.1]
-- [client.0]
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- locktest: [client.0, client.1]
+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2]
-- [mds.a]
-- [client.0]
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- mds:
- mds log segment size: 16384
- mds log max segments: 1
-- restart:
- exec:
- client.0:
- - test-backtraces.py
+++ /dev/null
-roles:
-- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
-- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5]
-- [client.0]
-- [client.1]
+++ /dev/null
-roles:
-- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
-- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
-- [client.0]
-- [client.1]
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - fs/misc
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/fsync-tester.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/pjd.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- client:
- ms_inject_delay_probability: 1
- ms_inject_delay_type: osd
- ms_inject_delay_max: 5
- client_oc_max_dirty_age: 1
-- ceph-fuse:
-- exec:
- client.0:
- - dd if=/dev/zero of=./foo count=100
- - sleep 2
- - truncate --size 0 ./foo
+++ /dev/null
-overrides:
- ceph:
- conf:
- mds:
- mds thrash exports: 1
+++ /dev/null
-roles:
-- [mon.a, mds.a, osd.0, osd.1]
-- [mon.b, mon.c, osd.2, osd.3, client.0]
-- [client.1]
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-tasks:
-- install:
- branch: dumpling
-- ceph:
-- parallel:
- - user-workload
- - kclient-workload
-user-workload:
- sequential:
- - ceph-fuse: [client.0]
- - workunit:
- clients:
- client.0:
- - suites/iozone.sh
-kclient-workload:
- sequential:
- - kclient: [client.1]
- - workunit:
- clients:
- client.1:
- - suites/dbench.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-tasks:
-- install:
- branch: dumpling
-- ceph:
-- parallel:
- - user-workload
- - kclient-workload
-user-workload:
- sequential:
- - ceph-fuse: [client.0]
- - workunit:
- clients:
- client.0:
- - suites/blogbench.sh
-kclient-workload:
- sequential:
- - kclient: [client.1]
- - workunit:
- clients:
- client.1:
- - kernel_untar_build.sh
+++ /dev/null
-tasks:
-- ceph-fuse: [client.0]
-- samba:
- samba.0:
- ceph: "{testdir}/mnt.0"
-
+++ /dev/null
-tasks:
-- kclient: [client.0]
-- samba:
- samba.0:
- ceph: "{testdir}/mnt.0"
-
+++ /dev/null
-tasks:
-- samba:
+++ /dev/null
-tasks:
-- localdir: [client.0]
-- samba:
- samba.0:
- ceph: "{testdir}/mnt.0"
+++ /dev/null
-roles:
-- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
-- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5]
-- [client.0]
+++ /dev/null
-roles:
-- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
-- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
-- [client.0]
+++ /dev/null
-../../../../debug/mds_client.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- exec:
- client.0:
- - ceph mds set inline_data true --yes-i-really-mean-it
+++ /dev/null
-tasks:
-- ceph-fuse:
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-tasks:
-- kclient:
+++ /dev/null
-../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- client:
- fuse_default_permissions: 0
-tasks:
-- workunit:
- clients:
- all:
- - kernel_untar_build.sh
+++ /dev/null
-tasks:
-- workunit:
- timeout: 5h
- clients:
- all:
- - fs/misc
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - fs/test_o_trunc.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- filestore flush min: 0
-tasks:
-- workunit:
- clients:
- all:
- - suites/ffsb.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/fsx.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/fsync-tester.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/iogen.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- client:
- debug ms: 1
- debug client: 20
- mds:
- debug ms: 1
- debug mds: 20
-tasks:
-- workunit:
- clients:
- all:
- - suites/pjd.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- client:
- ms_inject_delay_probability: 1
- ms_inject_delay_type: osd
- ms_inject_delay_max: 5
- client_oc_max_dirty_age: 1
-tasks:
-- exec:
- client.0:
- - dd if=/dev/zero of=./foo count=100
- - sleep 2
- - truncate --size 0 ./foo
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all: [fs/misc/trivial_sync.sh]
+++ /dev/null
-roles:
-- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
-- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5]
-- [client.0]
+++ /dev/null
-roles:
-- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
-- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
-- [client.0]
+++ /dev/null
-../../../../debug/mds_client.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- exec:
- client.0:
- - ceph mds set inline_data true --yes-i-really-mean-it
+++ /dev/null
-../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - libcephfs/test.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - libcephfs-java/test.sh
+++ /dev/null
-tasks:
--mds_creation_failure:
--ceph-fuse:
-- workunit:
- clients:
- all: [fs/misc/trivial_sync.sh]
+++ /dev/null
-roles:
-- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2]
-- [mon.b, mds.b, mds.c, osd.3, osd.4, osd.5]
-- [client.0]
+++ /dev/null
-roles:
-- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
-- [mon.b, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
-- [client.0]
+++ /dev/null
-../../../../debug/mds_client.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-../../../../overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- client:
- debug client: 1/20
- debug ms: 0/10
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - libcephfs/test.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- lockdep: true
+++ /dev/null
-overrides:
- install:
- ceph:
- flavor: notcmalloc
- ceph:
- valgrind:
- mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
- osd: [--tool=memcheck]
- mds: [--tool=memcheck]
- ceph-fuse:
- client.0:
- valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+++ /dev/null
-roles:
-- [mon.0, mon.1, mon.2, mds.0, client.0]
-- [osd.0]
-- [osd.1]
-- [osd.2]
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/ext4.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/xfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- thrashosds:
- chance_down: 1.0
- powercycle: true
- timeout: 600
+++ /dev/null
-overrides:
- ceph:
- conf:
- client.0:
- admin socket: /var/run/ceph/ceph-$name.asok
-tasks:
-- radosbench:
- clients: [client.0]
- time: 60
-- admin_socket:
- client.0:
- objecter_requests:
- test: "http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - kernel_untar_build.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - fs/misc
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- filestore flush min: 0
- mds:
- debug ms: 1
- debug mds: 20
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/ffsb.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsx.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsync-tester.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/pjd.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- client:
- ms_inject_delay_probability: 1
- ms_inject_delay_type: osd
- ms_inject_delay_max: 5
- client_oc_max_dirty_age: 1
-tasks:
-- ceph-fuse:
-- exec:
- client.0:
- - dd if=/dev/zero of=./foo count=100
- - sleep 2
- - truncate --size 0 ./foo
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-tasks:
-- radosbench:
- clients: [client.0]
- time: 1800
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- op_weights:
- read: 45
- write: 45
- delete: 10
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
+++ /dev/null
-../../../../clusters/fixed-2.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 1500
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - reached quota
- - wrongly marked me down
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- client.0:
- - rados/test.sh
- - rados/test_pool_quota.sh
-
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- client.0:
- - cls
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- client.0:
- - rados/test_python.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- client.0:
- - rados/stress_watch.sh
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - rados/load-gen-big.sh
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - rados/load-gen-mix.sh
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - rados/load-gen-mostlyread.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- mon min osdmap epochs: 25
- paxos service trim min: 5
-tasks:
-- install:
-- ceph:
+++ /dev/null
-roles:
-- [mon.a, mon.c, osd.0, osd.1, osd.2]
-- [mon.b, mds.a, osd.3, osd.4, osd.5, client.0]
+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, mon.d, mon.e, osd.0, osd.1, osd.2]
-- [mon.f, mon.g, mon.h, mon.i, mds.a, osd.3, osd.4, osd.5, client.0]
+++ /dev/null
-../../../../fs/xfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 2500
- ms inject delay type: mon
- ms inject delay probability: .005
- ms inject delay max: 1
- ms inject internal delays: .002
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 90
- thrash_delay: 1
- thrash_store: true
- thrash_many: true
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- mon client ping interval: 4
- mon client ping timeout: 12
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- thrash_many: true
- freeze_mon_duration: 20
- freeze_mon_probability: 10
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- paxos min: 10
- paxos trim min: 10
-tasks:
-- mon_thrash:
- revive_delay: 90
- thrash_delay: 1
- thrash_many: true
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- paxos min: 10
- paxos trim min: 10
-tasks:
-- mon_thrash:
- revive_delay: 90
- thrash_delay: 1
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - slow request
-tasks:
-- exec:
- client.0:
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
- - ceph_test_rados_delete_pools_parallel
+++ /dev/null
-tasks:
-- exec:
- client.0:
- - ceph_test_rados_delete_pools_parallel --debug_objecter 20 --debug_ms 1 --debug_rados 20 --debug_monc 20
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
-tasks:
-- workunit:
- clients:
- client.0:
- - mon/pool_ops.sh
- - mon/crush_ops.sh
- - mon/osd.sh
- - mon/caps.sh
-
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
+++ /dev/null
-roles:
-- [mon.a, mon.d, mon.g, mon.j, mon.m, mon.p, mon.s, osd.0]
-- [mon.b, mon.e, mon.h, mon.k, mon.n, mon.q, mon.t, mds.a]
-- [mon.c, mon.f, mon.i, mon.l, mon.o, mon.r, mon.u, osd.1]
+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, osd.0, osd.1, mds.a]
+++ /dev/null
-roles:
-- [mon.a, mon.c, mon.e, osd.0]
-- [mon.b, mon.d, mon.f, osd.1, mds.a]
+++ /dev/null
-roles:
-- [mon.a, mon.d, mon.g, osd.0]
-- [mon.b, mon.e, mon.h, mds.a]
-- [mon.c, mon.f, mon.i, osd.1]
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 500
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - slow request
- - .*clock.*skew.*
- - clocks not synchronized
-- mon_clock_skew_check:
- expect-skew: false
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon.b:
- clock offset: 10
-tasks:
-- install:
-- ceph:
- wait-for-healthy: false
- log-whitelist:
- - slow request
- - .*clock.*skew.*
- - clocks not synchronized
-- mon_clock_skew_check:
- expect-skew: true
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- mon_recovery:
+++ /dev/null
-roles:
-- [mon.0, osd.0, osd.1, osd.2]
-- [osd.3, osd.4, osd.5]
-- [client.0]
-
-tasks:
-- install:
-- ceph:
-- ceph_objectstore_tool:
- objects: 20
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- debug mon: 20
- debug ms: 1
- debug paxos: 20
- mon warn on legacy crush tunables: false
- mon min osdmap epochs: 3
- osd:
- osd map cache size: 2
- osd map max advance: 1
- debug filestore: 20
- debug journal: 20
- debug ms: 1
- debug osd: 20
- log-whitelist:
- - osd_map_cache_size
- - slow request
- - scrub mismatch
- - ScrubResult
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - mon.b
- - mon.c
- - osd.2
- - client.0
-tasks:
-- install:
- branch: v0.80.8
-- print: '**** done installing firefly'
-- ceph:
- fs: xfs
-- print: '**** done ceph'
-- full_sequential:
- - ceph_manager.create_pool:
- args: ['toremove']
- kwargs:
- pg_num: 4096
- - sleep:
- duration: 30
- - ceph_manager.wait_for_clean: null
- - radosbench:
- clients: [client.0]
- time: 120
- size: 1
- pool: toremove
- create_pool: false
- - ceph_manager.remove_pool:
- args: ['toremove']
- - sleep:
- duration: 10
- - ceph.restart:
- daemons:
- - osd.0
- - osd.1
- - osd.2
- - sleep:
- duration: 30
- - ceph_manager.wait_for_clean: null
- - radosbench:
- clients: [client.0]
- time: 60
- size: 1
- - ceph_manager.create_pool:
- args: ['newpool']
- - loop:
- count: 100
- body:
- - ceph_manager.set_pool_property:
- args: ['newpool', 'min_size', 2]
- - ceph_manager.set_pool_property:
- args: ['newpool', 'min_size', 1]
- - sleep:
- duration: 30
- - ceph_manager.wait_for_clean: null
- - loop:
- count: 100
- body:
- - ceph_manager.set_pool_property:
- args: ['newpool', 'min_size', 2]
- - ceph_manager.set_pool_property:
- args: ['newpool', 'min_size', 1]
- - sleep:
- duration: 30
- - ceph_manager.wait_for_clean: null
- - sleep:
- duration: 30
- - install.upgrade:
- mon.a: null
- - ceph.restart:
- daemons:
- - osd.0
- - osd.1
- - osd.2
- - sleep:
- duration: 30
- - radosbench:
- clients: [client.0]
- time: 30
- size: 1
- - ceph_manager.wait_for_clean: null
+++ /dev/null
-roles:
-- [mon.a, mds.a, osd.0, osd.1, osd.2, client.0]
-
-overrides:
- ceph:
- fs: xfs
- conf:
- osd:
- filestore xfs extsize: true
-
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - rados/test_alloc_hint.sh
+++ /dev/null
-roles:
-- [mon.0, osd.0, osd.1, mds.a, client.0]
-tasks:
-- install:
-- ceph:
-- exec:
- client.0:
- - ceph_test_filejournal
+++ /dev/null
-roles:
-- [mon.0, osd.0, osd.1, mds.a, client.0]
-tasks:
-- install:
-- ceph:
- conf:
- global:
- journal aio: true
-- filestore_idempotent:
+++ /dev/null
-roles:
-- [mon.0, osd.0, osd.1, mds.a, client.0]
-tasks:
-- install:
-- ceph:
-- filestore_idempotent:
+++ /dev/null
-roles:
-- - mon.a
- - osd.0
- - osd.1
- - osd.2
- - client.0
-- - mds.a
- - osd.3
- - osd.4
- - osd.5
-tasks:
-- install:
-- ceph:
- conf:
- osd:
- osd debug reject backfill probability: .3
- osd min pg log entries: 25
- osd max pg log entries: 100
-- exec:
- client.0:
- - ceph osd pool create foo 64
- - rados -p foo bench 60 write -b 1024 --no-cleanup
- - ceph osd pool set foo size 3
- - ceph osd out 0 1
-- sleep:
- duration: 60
-- exec:
- client.0:
- - ceph osd in 0 1
-- sleep:
- duration: 60
+++ /dev/null
-roles:
-- [mon.0, osd.0, osd.1, mds.a, client.0]
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - osdc/stress_objectcacher.sh
+++ /dev/null
-roles:
-- [mon.0, osd.0, osd.1, mds.a, client.0]
-tasks:
-- install:
-- ceph:
-- exec:
- client.0:
- - mkdir $TESTDIR/ostest && cd $TESTDIR/ostest && ceph_test_objectstore
- - rm -rf $TESTDIR/ostest
+++ /dev/null
-roles:
-- - mon.a
- - osd.0
- - mds.a
- - osd.1
- - client.a
-tasks:
-- install:
-- ceph:
-- admin_socket:
- osd.0:
- version:
- git_version:
- help:
- config show:
- config set filestore_dump_file /tmp/foo:
- perf dump:
- perf schema:
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - client.0
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - had wrong client addr
- - had wrong cluster addr
- - must scrub before tier agent can activate
-- workunit:
- clients:
- all:
- - cephtool
- - mon/pool_ops.sh
+++ /dev/null
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
-- dump_stuck:
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - objects unfound and apparently lost
-- ec_lost_unfound:
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - objects unfound and apparently lost
-- rep_lost_unfound_delete:
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - objects unfound and apparently lost
-- lost_unfound:
+++ /dev/null
-roles:
-- - mon.0
- - mon.1
- - mon.2
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - client.0
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - mon/test_mon_config_key.py
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - osd.0
- - osd.1
- - mds.0
- - client.0
-tasks:
-- install:
-- ceph:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- workunit:
- clients:
- all:
- - mon/workloadgen.sh
- env:
- LOADGEN_NUM_OSDS: "5"
- VERBOSE: "1"
- DURATION: "600"
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- conf:
- osd:
- osd min pg log entries: 5
-- osd_backfill:
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- conf:
- osd:
- osd min pg log entries: 5
-- osd_recovery.test_incomplete_pgs:
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- conf:
- osd:
- osd min pg log entries: 5
-- osd_recovery:
+++ /dev/null
-roles:
-- - mon.0
- - mon.1
- - mon.2
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-tasks:
-- install:
-- ceph:
- config:
- global:
- osd pool default min size : 1
- log-whitelist:
- - objects unfound and apparently lost
-- peer:
+++ /dev/null
-roles:
-- - mon.a
- - osd.0
- - osd.1
- - client.0
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - had wrong client addr
- - had wrong cluster addr
-- workunit:
- clients:
- all:
- - rados/test_rados_tool.sh
+++ /dev/null
-roles:
-- - mon.0
- - mon.1
- - mon.2
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - client.0
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - had wrong client addr
-- rest-api: [client.0]
-- workunit:
- clients:
- all:
- - rest/test.py
+++ /dev/null
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
- - client.0
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
-- thrashosds:
- op_delay: 30
- clean_interval: 120
- chance_down: .5
-- workunit:
- clients:
- all:
- - rados/load-gen-mix-small.sh
+++ /dev/null
-roles:
-- - mon.a
- - mds.0
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
- - client.0
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - slow request
-- exec:
- client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 500
-- background_exec:
- mon.a:
- - while true
- - do sleep 30
- - echo forward
- - ceph osd tier cache-mode cache forward
- - sleep 10
- - ceph osd pool set cache cache_target_full_ratio .001
- - echo cache-try-flush-evict-all
- - rados -p cache cache-try-flush-evict-all
- - sleep 5
- - echo cache-flush-evict-all
- - rados -p cache cache-flush-evict-all
- - sleep 5
- - echo remove overlay
- - ceph osd tier remove-overlay base
- - sleep 20
- - echo add writeback overlay
- - ceph osd tier cache-mode cache writeback
- - ceph osd pool set cache cache_target_full_ratio .8
- - ceph osd tier set-overlay base cache
- - done
-- rados:
- clients: [client.0]
- pools: [base]
- max_seconds: 600
- ops: 400000
- objects: 10000
- size: 1024
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 500
+++ /dev/null
-../../../../clusters/fixed-2.yaml
\ No newline at end of file
+++ /dev/null
-openstack:
- machine:
- disk: 40 # GB
- ram: 8000 # MB
- cpus: 1
- volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/ext4.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/xfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 2500
- ms tcp read timeout: 5
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 2500
- ms inject delay type: osd
- ms inject delay probability: .005
- ms inject delay max: 1
- ms inject internal delays: .002
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- conf:
- osd:
- osd debug reject backfill probability: .3
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- mon min osdmap epochs: 2
- osd:
- osd map cache size: 1
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - osd_map_cache_size
-- thrashosds:
- timeout: 1800
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- chance_test_map_discontinuity: 0.5
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 3
- chance_pgpnum_fix: 1
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 2
- chance_pgpnum_fix: 1
+++ /dev/null
-overrides:
- ceph:
- conf:
- client.0:
- admin socket: /var/run/ceph/ceph-$name.asok
-tasks:
-- radosbench:
- clients: [client.0]
- time: 60
-- admin_socket:
- client.0:
- objecter_requests:
- test: "http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - must scrub before tier agent can activate
-tasks:
-- exec:
- client.0:
- - ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd
- m=1 k=2
- - ceph osd pool create base 4 erasure teuthologyprofile
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 5000
-- rados:
- clients: [client.0]
- pools: [base]
- ops: 4000
- objects: 10000
- size: 1024
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - must scrub before tier agent can activate
-tasks:
-- exec:
- client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
-- rados:
- clients: [client.0]
- pools: [base]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - must scrub before tier agent can activate
-tasks:
-- exec:
- client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 3600
-- rados:
- clients: [client.0]
- pools: [base]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- flush: 50
- try_flush: 50
- evict: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - must scrub before tier agent can activate
-tasks:
-- exec:
- client.0:
- - ceph osd pool create base 4
- - ceph osd pool create cache 4
- - ceph osd tier add base cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay base cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 3600
-- rados:
- clients: [client.0]
- pools: [base]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- flush: 50
- try_flush: 50
- evict: 50
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - shard.*missing
-tasks:
-- radosbench:
- clients: [client.0]
- time: 1800
- unique_pool: true
- ec_pool: true
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- ec_pool: true
- op_weights:
- read: 45
- write: 0
- append: 45
- delete: 10
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 400000
- max_seconds: 600
- max_in_flight: 64
- objects: 1024
- size: 16384
- ec_pool: true
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-tasks:
-- radosbench:
- clients: [client.0]
- time: 1800
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- op_weights:
- read: 45
- write: 45
- delete: 10
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 400000
- max_seconds: 600
- max_in_flight: 64
- objects: 1024
- size: 16384
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
+++ /dev/null
-tasks:
-- install:
-- ceph:
+++ /dev/null
-../../../../clusters/fixed-2.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-tasks:
-- mon_recovery:
+++ /dev/null
-overrides:
- ceph:
- conf:
- client:
- debug ms: 1
- debug objecter: 20
- debug rados: 20
- debug monc: 20
-tasks:
-- workunit:
- timeout: 6h
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - cls
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- lockdep: true
+++ /dev/null
-overrides:
- install:
- ceph:
- flavor: notcmalloc
- ceph:
- valgrind:
- mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
- osd: [--tool=memcheck]
- mds: [--tool=memcheck]
+++ /dev/null
-tasks:
-- install:
-- ceph:
+++ /dev/null
-tasks:
-- exec:
- client.0:
- - ceph osd pool create cache 4
- - ceph osd tier add rbd cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay rbd cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
+++ /dev/null
-../../../../clusters/fixed-1.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 500
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/copy.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/copy.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/import_export.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/run_cli_tests.sh
-
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - cls/test_cls_rbd.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_lock_fence.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd_python.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- client:
- rbd cache: false
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- client:
- rbd cache: true
+++ /dev/null
-tasks:
-- install:
-- ceph:
- conf:
- client:
- rbd cache: true
- rbd cache max dirty: 0
+++ /dev/null
-tasks:
-- exec:
- client.0:
- - ceph osd pool create cache 4
- - ceph osd tier add rbd cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay rbd cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-../basic/fs
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
- log-whitelist:
- - wrongly marked me down
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd.sh
- env:
- RBD_FEATURES: "1"
+++ /dev/null
-tasks:
-- rbd_fsx:
- clients: [client.0]
- ops: 5000
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- env:
- RBD_FEATURES: "1"
+++ /dev/null
-tasks:
-- qemu:
- all:
- test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/bonnie.sh
-exclude_arch: armv7l
+++ /dev/null
-tasks:
-- qemu:
- all:
- test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/fsstress.sh;h=firefly
-exclude_arch: armv7l
+++ /dev/null
-tasks:
-- qemu:
- all:
- test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/workunits/suites/iozone.sh
- image_size: 20480
-exclude_arch: armv7l
+++ /dev/null
-tasks:
-- qemu:
- all:
- type: block
- num_rbd: 2
- test: http://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa/run_xfstests_qemu.sh
-exclude_arch: armv7l
+++ /dev/null
-roles:
-- [mon.a, osd.0, osd.1, client.0]
-tasks:
-- install:
-- ceph:
-- cram:
- clients:
- client.0:
- - http://git.ceph.com/?p=ceph.git;a=blob_plain;hb=firefly;f=src/test/cli-integration/rbd/formatted-output.t
-
+++ /dev/null
-exclude_arch: armv7l
-roles:
-- [mon.a, osd.0, osd.1, client.0]
-tasks:
-- install:
-- ceph:
- conf:
- client:
- rbd cache: false
-- workunit:
- clients:
- all: [rbd/qemu-iotests.sh]
+++ /dev/null
-exclude_arch: armv7l
-roles:
-- [mon.a, osd.0, osd.1, client.0]
-tasks:
-- install:
-- ceph:
- conf:
- client:
- rbd cache: true
-- workunit:
- clients:
- all: [rbd/qemu-iotests.sh]
+++ /dev/null
-exclude_arch: armv7l
-roles:
-- [mon.a, osd.0, osd.1, client.0]
-tasks:
-- install:
-- ceph:
- conf:
- client:
- rbd cache: true
- rbd cache max dirty: 0
-- workunit:
- clients:
- all: [rbd/qemu-iotests.sh]
+++ /dev/null
-roles:
-- [mon.a, osd.0, osd.1, client.0]
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - mon/rbd_snaps_ops.sh
-
+++ /dev/null
-roles:
-- [mon.a, osd.0, osd.1, client.0]
-tasks:
-- install:
-- ceph:
- conf:
- client:
- rbd cache: false
-- workunit:
- clients:
- all: [rbd/read-flags.sh]
+++ /dev/null
-roles:
-- [mon.a, osd.0, osd.1, client.0]
-tasks:
-- install:
-- ceph:
- conf:
- client:
- rbd cache: true
-- workunit:
- clients:
- all: [rbd/read-flags.sh]
+++ /dev/null
-roles:
-- [mon.a, osd.0, osd.1, client.0]
-tasks:
-- install:
-- ceph:
- conf:
- client:
- rbd cache: true
- rbd cache max dirty: 0
-- workunit:
- clients:
- all: [rbd/read-flags.sh]
+++ /dev/null
-tasks:
-- install:
-- ceph:
+++ /dev/null
-../../../../clusters/fixed-2.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/xfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-tasks:
-- exec:
- client.0:
- - ceph osd pool create cache 4
- - ceph osd tier add rbd cache
- - ceph osd tier cache-mode cache writeback
- - ceph osd tier set-overlay rbd cache
- - ceph osd pool set cache hit_set_type bloom
- - ceph osd pool set cache hit_set_count 8
- - ceph osd pool set cache hit_set_period 60
- - ceph osd pool set cache target_max_objects 250
-- thrashosds:
- timeout: 1200
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-tasks:
-- thrashosds:
- timeout: 1200
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd.sh
- env:
- RBD_FEATURES: "1"
+++ /dev/null
-tasks:
-- rbd_fsx:
- clients: [client.0]
- ops: 2000
-overrides:
- ceph:
- conf:
- client:
- rbd cache: true
+++ /dev/null
-tasks:
-- rbd_fsx:
- clients: [client.0]
- ops: 2000
-overrides:
- ceph:
- conf:
- client:
- rbd cache: true
- rbd cache max dirty: 0
+++ /dev/null
-tasks:
-- rbd_fsx:
- clients: [client.0]
- ops: 2000
-overrides:
- ceph:
- conf:
- client:
- rbd cache: false
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
-
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
- - client.0
-
-tasks:
-- install:
-- ceph:
-- rest-api: [client.0]
-- workunit:
- clients:
- client.0:
- - rest/test.py
+++ /dev/null
-../../../../clusters/fixed-2.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/ext4.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/xfs.yaml
\ No newline at end of file
+++ /dev/null
-../../../rgw_pool_type
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rgw: [client.0]
-- workunit:
- clients:
- client.0:
- - rgw/s3_bucket_quota.pl
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rgw: [client.0]
-- workunit:
- clients:
- client.0:
- - rgw/s3_multipart_upload.pl
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rgw: [client.0]
-- s3readwrite:
- client.0:
- rgw_server: client.0
- readwrite:
- bucket: rwtest
- readers: 10
- writers: 3
- duration: 300
- files:
- num: 10
- size: 2000
- stddev: 500
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rgw: [client.0]
-- s3roundtrip:
- client.0:
- rgw_server: client.0
- roundtrip:
- bucket: rttest
- readers: 10
- writers: 3
- duration: 300
- files:
- num: 10
- size: 2000
- stddev: 500
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rgw: [client.0]
-- s3tests:
- client.0:
- rgw_server: client.0
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rgw: [client.0]
-- swift:
- client.0:
- rgw_server: client.0
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rgw: [client.0]
-- workunit:
- clients:
- client.0:
- - rgw/s3_user_quota.pl
+++ /dev/null
-roles:
-- [mon.a, osd.0, osd.1, client.0, client.1]
-tasks:
-- install:
-- ceph:
- conf:
- client:
- debug ms: 1
- rgw gc obj min wait: 15
- rgw data log window: 30
- osd:
- debug ms: 1
- debug objclass : 20
- client.0:
- rgw region: region0
- rgw zone: r0z0
- rgw region root pool: .rgw.region.0
- rgw zone root pool: .rgw.zone.0
- rgw gc pool: .rgw.gc.0
- rgw user uid pool: .users.uid.0
- rgw user keys pool: .users.0
- rgw log data: True
- rgw log meta: True
- client.1:
- rgw region: region0
- rgw zone: r0z1
- rgw region root pool: .rgw.region.0
- rgw zone root pool: .rgw.zone.1
- rgw gc pool: .rgw.gc.1
- rgw user uid pool: .users.uid.1
- rgw user keys pool: .users.1
- rgw log data: False
- rgw log meta: False
-- rgw:
- regions:
- region0:
- api name: api1
- is master: True
- master zone: r0z0
- zones: [r0z0, r0z1]
- client.0:
- system user:
- name: client0-system-user
- access key: 0te6NH5mcdcq0Tc5i8i2
- secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
- client.1:
- system user:
- name: client1-system-user
- access key: 1te6NH5mcdcq0Tc5i8i3
- secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw
-- radosgw-agent:
- client.0:
- max-entries: 10
- src: client.0
- dest: client.1
-- radosgw-admin:
+++ /dev/null
-roles:
-- [mon.a, mds.a, osd.0, osd.1, client.0]
-- [mon.b, mon.c, osd.2, osd.3, client.1]
-tasks:
-- install:
-- ceph:
- conf:
- client:
- debug ms: 1
- rgw gc obj min wait: 15
- osd:
- debug ms: 1
- debug objclass : 20
- client.0:
- rgw region: region0
- rgw zone: r0z1
- rgw region root pool: .rgw.region.0
- rgw zone root pool: .rgw.zone.0
- rgw gc pool: .rgw.gc.0
- rgw user uid pool: .users.uid.0
- rgw user keys pool: .users.0
- rgw log data: True
- rgw log meta: True
- client.1:
- rgw region: region1
- rgw zone: r1z1
- rgw region root pool: .rgw.region.1
- rgw zone root pool: .rgw.zone.1
- rgw gc pool: .rgw.gc.1
- rgw user uid pool: .users.uid.1
- rgw user keys pool: .users.1
- rgw log data: False
- rgw log meta: False
-- rgw:
- regions:
- region0:
- api name: api1
- is master: True
- master zone: r0z1
- zones: [r0z1]
- region1:
- api name: api1
- is master: False
- master zone: r1z1
- zones: [r1z1]
- client.0:
- system user:
- name: client0-system-user
- access key: 0te6NH5mcdcq0Tc5i8i2
- secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
- client.1:
- system user:
- name: client1-system-user
- access key: 1te6NH5mcdcq0Tc5i8i3
- secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw
-- radosgw-agent:
- client.0:
- src: client.0
- dest: client.1
- metadata-only: true
-- radosgw-admin:
+++ /dev/null
-roles:
-- [mon.a, mds.a, osd.0, client.0, osd.1]
-tasks:
-- install:
-- ceph:
- conf:
- client:
- debug ms: 1
- rgw gc obj min wait: 15
- osd:
- debug ms: 1
- debug objclass : 20
-- rgw:
- client.0:
-- radosgw-admin:
+++ /dev/null
-overrides:
- s3readwrite:
- s3:
- user_id: s3readwrite-test-user
- display_name: test user for the s3readwrite tests
- email: tester@inktank
- access_key: 2te6NH5mcdcq0Tc5i8i4
- secret_key: Qy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXx
- readwrite:
- deterministic_file_names: True
- duration: 30
- bucket: testbucket
- files:
- num: 10
- size: 2000
- stddev: 500
-roles:
-- [mon.a, mds.a, osd.0, osd.1, client.0]
-- [mon.b, mon.c, osd.2, osd.3, client.1]
-
-tasks:
-- install:
-- ceph:
- conf:
- client.1:
- rgw region: default
- rgw zone: r1z1
- rgw region root pool: .rgw
- rgw zone root pool: .rgw
- rgw domain root: .rgw
- rgw gc pool: .rgw.gc
- rgw user uid pool: .users.uid
- rgw user keys pool: .users
-- rgw:
- client.0:
- system user:
- name: nr-system
- access key: 0te6NH5mcdcq0Tc5i8i2
- secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
-- s3readwrite:
- client.0:
- extra_args: ['--no-cleanup']
- s3:
- delete_user: False
- readwrite:
- writers: 1
- readers: 0
-- rgw:
- regions:
- default:
- api name: api1
- is master: true
- master zone: r1z1
- zones: [r1z1]
- client.1:
- system user:
- name: r2-system
- access key: 1te6NH5mcdcq0Tc5i8i3
- secret key: Py4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXw
-- s3readwrite:
- client.1:
- s3:
- create_user: False
- readwrite:
- writers: 0
- readers: 2
-
+++ /dev/null
-../../../rgw_pool_type/
\ No newline at end of file
+++ /dev/null
-../../../../clusters/fixed-2.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-../../../rgw_pool_type/
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
- flavor: notcmalloc
-- ceph:
-- rgw:
- client.0:
- valgrind: [--tool=memcheck]
-- s3tests:
- client.0:
- rgw_server: client.0
+++ /dev/null
-tasks:
-- install:
- flavor: notcmalloc
-- ceph:
- conf:
- client.0:
- rgw region: zero
- rgw zone: r0z1
- rgw region root pool: .rgw.region.0
- rgw zone root pool: .rgw.zone.0
- rgw gc pool: .rgw.gc.0
- rgw user uid pool: .users.uid.0
- rgw user keys pool: .users.0
- rgw log data: True
- rgw log meta: True
- client.1:
- rgw region: one
- rgw zone: r1z1
- rgw region root pool: .rgw.region.1
- rgw zone root pool: .rgw.zone.1
- rgw gc pool: .rgw.gc.1
- rgw user uid pool: .users.uid.1
- rgw user keys pool: .users.1
- rgw log data: False
- rgw log meta: False
-- rgw:
- default_idle_timeout: 300
- regions:
- zero:
- api name: api1
- is master: True
- master zone: r0z1
- zones: [r0z1]
- one:
- api name: api1
- is master: False
- master zone: r1z1
- zones: [r1z1]
- client.0:
- valgrind: [--tool=memcheck]
- system user:
- name: client0-system-user
- access key: 1te6NH5mcdcq0Tc5i8i2
- secret key: 1y4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
- client.1:
- valgrind: [--tool=memcheck]
- system user:
- name: client1-system-user
- access key: 0te6NH5mcdcq0Tc5i8i2
- secret key: Oy4IOauQoL18Gp2zM7lC1vLmoawgqcYPbYGcWfXv
-- radosgw-agent:
- client.0:
- src: client.0
- dest: client.1
- metadata-only: true
-- s3tests:
- client.0:
- idle_timeout: 300
- rgw_server: client.0
+++ /dev/null
-tasks:
-- install:
- flavor: notcmalloc
-- ceph:
-- rgw:
- client.0:
- valgrind: [--tool=memcheck]
-- swift:
- client.0:
- rgw_server: client.0
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- lockdep: true
- mon:
- lockdep: true
+++ /dev/null
-overrides:
- install:
- ceph:
- flavor: notcmalloc
- ceph:
- valgrind:
- mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
- osd: [--tool=memcheck]
- mds: [--tool=memcheck]
+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1]
-- [samba.0, client.0, client.1]
+++ /dev/null
-../../../debug/mds_client.yaml
\ No newline at end of file
+++ /dev/null
-../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-# we currently can't install Samba on RHEL; need a gitbuilder and code updates
-os_type: ubuntu
-
-tasks:
-- install:
-- install:
- project: samba
- extra_packages: ['samba']
-- ceph:
+++ /dev/null
-tasks:
-- ceph-fuse: [client.0]
-- samba:
- samba.0:
- ceph: "{testdir}/mnt.0"
-
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-tasks:
-- kclient: [client.0]
-- samba:
- samba.0:
- ceph: "{testdir}/mnt.0"
-
+++ /dev/null
-tasks:
-- samba:
+++ /dev/null
-tasks:
-- localdir: [client.0]
-- samba:
- samba.0:
- ceph: "{testdir}/mnt.0"
+++ /dev/null
-tasks:
-- cifs-mount:
- client.1:
- share: ceph
-- workunit:
- clients:
- client.1:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- cifs-mount:
- client.1:
- share: ceph
-- workunit:
- clients:
- client.1:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- cifs-mount:
- client.1:
- share: ceph
-- workunit:
- clients:
- client.1:
- - kernel_untar_build.sh
-
+++ /dev/null
-tasks:
-- pexec:
- client.1:
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb
-# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon
-# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl
-# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse: [client.0]
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/pjd.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - direct_io
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/pjd.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - libcephfs/test.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - rados/test_python.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - rados/load-gen-mix.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd.sh
- env:
- RBD_FEATURES: "1"
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - rbd/import_export.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- env:
- RBD_FEATURES: "1"
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd_python.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms die on skipped message: false
-tasks:
-- install:
-- ceph:
-- rbd:
- all:
- image_size: 20480
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1]
-- [client.1]
-- [client.0]
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- locktest: [client.0, client.1]
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rgw: [client.0]
-- s3tests:
- client.0:
- rgw_server: client.0
+++ /dev/null
-roles:
-- [mon.a, mon.d, osd.0]
-- [mon.b, mon.e, mds.a]
-- [mon.c, mon.f, osd.1]
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- mon_recovery:
+++ /dev/null
-roles:
-- [mon.0, osd.0, osd.1, mds.a, client.0]
-tasks:
-- install:
-- ceph:
-- filestore_idempotent:
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- conf:
- osd:
- osd min pg log entries: 5
-- osd_backfill:
+++ /dev/null
-roles:
-- - mon.a
- - mds.0
- - osd.0
-- - osd.1
-- - osd.2
-- - osd.3
-- - osd.4
-- - client.0
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
-- thrashosds:
- op_delay: 30
- clean_interval: 120
- chance_down: .5
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - rados/load-gen-mix-small.sh
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-roles:
-- [mon.a, osd.0, osd.1, osd.2]
-- [mds.a, osd.3, osd.4, osd.5]
-- [client.0]
+++ /dev/null
-../../../../fs/xfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-- thrashosds:
+++ /dev/null
-overrides:
- ceph:
- conf:
- client.0:
- admin socket: /var/run/ceph/ceph-$name.asok
-tasks:
-- radosbench:
- clients: [client.0]
- time: 60
-- admin_socket:
- client.0:
- objecter_requests:
- test: "http://ceph.newdream.net/git/?p=ceph.git;a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
+++ /dev/null
-tasks:
-- rbd:
- all:
- image_size: 20480
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - libcephfs/test.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- mon_recovery:
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - cls
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- rgw:
- client.0:
- valgrind: [--tool=memcheck]
-- s3tests:
- default_idle_timeout: 300
- client.0:
- rgw_server: client.0
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- lockdep: true
+++ /dev/null
-overrides:
- ceph:
- valgrind:
- mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
- osd: [--tool=memcheck]
- mds: [--tool=memcheck]
+++ /dev/null
-../../../../clusters/fixed-3.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - snaps
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- kclient:
-- workunit:
- clients:
- all:
- - suites/fsx.sh
+++ /dev/null
-roles:
-- [mon.0, mds.a, osd.0]
-- [mon.1, osd.1]
-- [mon.2, osd.2]
-- [osd.3]
-- [osd.4]
-- [osd.5]
-- [osd.6]
-- [osd.7]
-- [osd.8]
-- [osd.9]
-- [osd.10]
-- [osd.11]
-- [osd.12]
-- [osd.13]
-- [osd.14]
-- [osd.15]
-- [client.0]
+++ /dev/null
-roles:
-- [mon.0, mds.a, osd.0, osd.1, osd.2]
-- [mon.1, mon.2, client.0]
+++ /dev/null
-roles:
-- [mon.0, mds.a, osd.0]
-- [mon.1, osd.1]
-- [mon.2, osd.2]
-- [osd.3]
-- [osd.4]
-- [osd.5]
-- [osd.6]
-- [osd.7]
-- [client.0]
+++ /dev/null
-../../../../fs/btrfs.yaml
\ No newline at end of file
+++ /dev/null
-../../../../fs/xfs.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-- thrashosds:
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-- thrashosds:
- op_delay: 1
- chance_down: 10
+++ /dev/null
-tasks:
-- install:
-- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
-- thrashosds:
- chance_down: 50
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/bonnie.sh
+++ /dev/null
-tasks:
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-tasks:
-- radosbench:
- clients: [client.0]
- time: 1800
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- op_weights:
- read: 45
- write: 45
- delete: 10
+++ /dev/null
-roles:
-- [mon.a, mon.c, osd.0, osd.1, osd.2]
-- [mon.b, mds.a, osd.3, osd.4, osd.5]
-- [client.0]
+++ /dev/null
-overrides:
- ceph:
- fs: btrfs
- conf:
- osd:
- osd op thread timeout: 60
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 500
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/bonnie.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/dbench-short.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/ffsb.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/fio.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/fsx.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/fsync-tester.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/iogen.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/iozone-sync.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/iozone.sh
+++ /dev/null
-tasks:
-- install:
-- ceph:
-- tgt:
-- iscsi:
-- workunit:
- clients:
- all:
- - suites/pjd.sh
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: bobtail
-- ceph:
-- install.upgrade:
- all:
- tag: v0.61.5
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.61.5
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.61.6
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - suites/dbench.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/iogen.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- conf:
- paxos service trim min: 5
- mon min osdmap epochs: 25
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: bobtail
-- ceph:
+++ /dev/null
-tasks:
-- install:
- tag: v0.61.1
-- ceph:
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- tag: v0.61.3
-- ceph.restart:
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- tag: v0.61.4
-- ceph.restart:
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- tag: v0.61.5
-- ceph.restart:
+++ /dev/null
-tasks:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
+++ /dev/null
-workload:
- rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- workunit:
- clients:
- client.0:
- - rados/test.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: bobtail
-- ceph:
-- install.upgrade:
- all:
- tag: v0.61.5
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.61.5
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.61.6
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- workunit:
- clients:
- client.0:
- - rados/test.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: bobtail
-- ceph:
-- install.upgrade:
- all:
- tag: v0.61.5
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.61.5
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.61.6
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - workunit:
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-
-
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- workunit:
- clients:
- client.0:
- - rbd/copy.sh
- env:
- RBD_CREATE_ARGS: --new-format
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- workunit:
- clients:
- client.0:
- - rbd/test_lock_fence.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: bobtail
-- ceph:
-- install.upgrade:
- all:
- tag: v0.61.5
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.61.5
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.61.6
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- rgw: [client.0]
- s3tests:
- client.0:
- rgw_server: client.0
-
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - ceph.restart: [rgw.client.0]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - ceph.restart: [rgw.client.0]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: cuttlefish
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [rgw.client.0]
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- swift:
- client.0:
- rgw_server: client.0
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- swift:
- client.0:
- rgw_server: client.0
-
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- log-whitelist:
- - scrub mismatch
- - ScrubResult
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
- - client.1
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- fs: xfs
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test.sh
- - cls
+++ /dev/null
-workload:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/load-gen-big.sh
+++ /dev/null
-workload:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd.sh
+++ /dev/null
-workload:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd_python.sh
+++ /dev/null
-tasks:
- - install.upgrade:
- mon.a:
- branch: emperor
- mon.b:
- branch: emperor
- - ceph.restart:
- - parallel:
- - workload2
- - upgrade-sequence
+++ /dev/null
-workload2:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test.sh
- - cls
+++ /dev/null
-workload2:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/load-gen-big.sh
+++ /dev/null
-workload2:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd.sh
+++ /dev/null
-workload2:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd_python.sh
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- mon.a:
- branch: emperor
- mon.b:
- branch: emperor
- - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- mon.a:
- branch: emperor
- mon.b:
- branch: emperor
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 60
- - ceph.restart: [osd.1]
- - sleep:
- duration: 60
- - ceph.restart: [osd.2]
- - sleep:
- duration: 60
- - ceph.restart: [osd.3]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
+++ /dev/null
-tasks:
-- rados:
- clients: [client.1]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-tasks:
- - workunit:
- branch: dumpling
- clients:
- client.1:
- - rados/load-gen-mix.sh
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- workunit:
- branch: dumpling
- clients:
- client.1:
- - rados/test.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.1:
- - cls/test_cls_rbd.sh
-
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
-- rgw: [client.1]
-- s3tests:
- client.1:
- rgw_server: client.1
+++ /dev/null
-tasks:
-# Uncomment the next line if you have not already included rgw_s3tests.yaml in your test.
-# - rgw: [client.1]
-- swift:
- client.1:
- rgw_server: client.1
+++ /dev/null
-../../../../distros/supported
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
- - client.1
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- log-whitelist:
- - scrub mismatch
- - ScrubResult
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- print: "**** done install"
-- ceph:
- fs: xfs
-- print: "**** done ceph"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel"
+++ /dev/null
-workload:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test-upgrade-firefly.sh
- - cls
-
+++ /dev/null
-workload:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/load-gen-big.sh
+++ /dev/null
-workload:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd.sh
+++ /dev/null
-workload:
- sequential:
- - workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd_python.sh
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- mon.a:
- mon.b:
- - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- mon.a:
- mon.b:
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 60
- - ceph.restart: [osd.1]
- - sleep:
- duration: 60
- - ceph.restart: [osd.2]
- - sleep:
- duration: 60
- - ceph.restart: [osd.3]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
+++ /dev/null
-tasks:
- - install.upgrade:
- client.0:
- - print: "**** done install.upgrade"
+++ /dev/null
-tasks:
- - rados:
- clients: [client.1]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-tasks:
- - workunit:
- clients:
- client.1:
- - rados/load-gen-mix.sh
+++ /dev/null
-tasks:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - workunit:
- clients:
- client.1:
- - rados/test.sh
+++ /dev/null
-tasks:
- - workunit:
- clients:
- client.1:
- - cls/test_cls_rbd.sh
-
+++ /dev/null
-tasks:
- - workunit:
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
- - rgw: [client.1]
- - s3tests:
- client.1:
- rgw_server: client.1
- branch: dumpling
+++ /dev/null
-tasks:
-# no need for rwg when we use +
-# - rgw: [client.1]
- - swift:
- client.1:
- rgw_server: client.1
+++ /dev/null
-../../../../distros/supported
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
-roles:
-- - mon.a
- - mon.b
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
- - mon.c
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- fs: xfs
+++ /dev/null
-tasks:
-- install.upgrade:
- osd.0:
-- ceph.restart:
- daemons: [osd.0, osd.1, osd.2]
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- thrash_primary_affinity: false
+++ /dev/null
-tasks:
-- ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test-upgrade-firefly.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - cls/test_cls_rbd.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- op_weights:
- read: 45
- write: 45
- delete: 10
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-tasks:
-- ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test-upgrade-firefly.sh
+++ /dev/null
-tasks:
-- radosbench:
- clients: [client.0]
- time: 1800
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- mon.c:
-- ceph.restart:
- daemons: [mon.c]
- wait-for-healthy: false
- wait-for-osds-up: true
-- ceph.wait_for_mon_quorum: [a, b, c]
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test-upgrade-firefly.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd_python.sh
+++ /dev/null
-tasks:
-- rgw:
- default_idle_timeout: 300
- client.0:
-- swift:
- client.0:
- rgw_server: client.0
-
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-../../../../distros/supported
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: cuttlefish
-- ceph:
-- install.upgrade:
- all:
- tag: v0.67.1
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.1
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.2
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.3
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.4
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.5
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.7
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - suites/dbench.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/iogen.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: cuttlefish
-- ceph:
-- install.upgrade:
- all:
- tag: v0.67.1
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.1
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.2
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.3
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.4
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.5
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.7
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- workunit:
- clients:
- client.0:
- - rados/test.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: cuttlefish
-- ceph:
-- install.upgrade:
- all:
- tag: v0.67.1
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.1
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.2
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.3
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.4
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.5
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.7
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - workunit:
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-
-
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- workunit:
- clients:
- client.0:
- - rbd/copy.sh
- env:
- RBD_CREATE_ARGS: --new-format
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- workunit:
- clients:
- client.0:
- - rbd/test_lock_fence.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: cuttlefish
-- ceph:
-- install.upgrade:
- all:
- tag: v0.67.1
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.1
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.2
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.3
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.4
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.5
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.67.7
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- rgw: [client.0]
- s3tests:
- client.0:
- rgw_server: client.0
-
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - ceph.restart: [rgw.client.0]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - ceph.restart: [rgw.client.0]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: dumpling
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [rgw.client.0]
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- swift:
- client.0:
- rgw_server: client.0
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- swift:
- client.0:
- rgw_server: client.0
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
-- install.upgrade:
- all:
- tag:
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.73
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.74
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.75
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- ceph-fuse:
-- workunit:
- clients:
- client.0:
- - suites/dbench.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- ceph-fuse:
-- workunit:
- clients:
- all:
- - suites/iogen.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
-- install.upgrade:
- all:
- tag:
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.73
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.74
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.75
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- workunit:
- clients:
- client.0:
- - rados/test.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
-- install.upgrade:
- all:
- tag:
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.73
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.74
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.75
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - workunit:
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-
-
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- workunit:
- clients:
- client.0:
- - rbd/copy.sh
- env:
- RBD_CREATE_ARGS: --new-format
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- workunit:
- clients:
- client.0:
- - rbd/test_lock_fence.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
-- install.upgrade:
- all:
- tag:
-- ceph.restart:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.73
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.74
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.75
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- rgw: [client.0]
- s3tests:
- client.0:
- rgw_server: client.0
-
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - ceph.restart: [rgw.client.0]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - ceph.restart: [rgw.client.0]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [rgw.client.0]
+++ /dev/null
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- swift:
- client.0:
- rgw_server: client.0
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- swift:
- client.0:
- rgw_server: client.0
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- - scrub mismatch
- - ScrubResult
- - osd_map_max_advance
- fs: xfs
- conf:
- osd:
- osd map max advance: 1000
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
-- - client.0
- - client.1
+++ /dev/null
-tasks:
-- install:
- tag: v0.80.10
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.80.4
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.80.5
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.80.6
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.80.8
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.80.9
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - suites/blogbench.sh
+++ /dev/null
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - workunit:
- clients:
- client.0:
- - cls/test_cls_rbd.sh
+++ /dev/null
-workload:
- sequential:
- - rgw: [client.0]
- - s3tests:
- client.0:
- force-branch: firefly-original
- rgw_server: client.0
+++ /dev/null
-workload:
- sequential:
- - rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- mon.a:
- branch: firefly
- mon.b:
- branch: firefly
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- mon.a:
- branch: firefly
- mon.b:
- branch: firefly
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
+++ /dev/null
-tasks:
-- install.upgrade:
- client.0:
+++ /dev/null
-tasks:
-- sequential:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - ceph-fuse: [client.0]
- - workunit:
- clients:
- client.0:
- - suites/dbench.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- sequential:
- - thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- - ceph-fuse: [client.0]
- - workunit:
- clients:
- client.0:
- - suites/iogen.sh
-
+++ /dev/null
-tasks:
-- sequential:
- - workunit:
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - workunit:
- clients:
- client.0:
- - cls/test_cls_rbd.sh
+++ /dev/null
-tasks:
-- sequential:
- - rgw: [client.1]
- - s3readwrite:
- client.0:
- rgw_server: client.1
- readwrite:
- bucket: rwtest
- readers: 10
- writers: 3
- duration: 300
- files:
- num: 10
- size: 2000
- stddev: 500
+++ /dev/null
-../../../../distros/supported
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- - osd_map_max_advance
- fs: xfs
- conf:
- osd:
- osd map max advance: 1000
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
-- - client.1
- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- log-whitelist:
- - scrub mismatch
- - ScrubResult
-- install.upgrade:
- all:
- tag: v0.80.1
-- ceph.restart:
-- exec:
- client.0:
- - ceph osd crush tunables firefly
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- branch: emperor
-- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- log-whitelist:
- - scrub mismatch
- - ScrubResult
-- install.upgrade:
- all:
- tag: v0.80.1
-- ceph.restart:
-- exec:
- client.0:
- - ceph osd crush tunables firefly
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- thrashosds:
- thrash_primary_affinity: false
-tasks:
-- install:
- tag: v0.67.11
-- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.80.1
-- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.80.2
-- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.80.3
-- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-tasks:
-- install:
- tag: v0.80
-- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - suites/blogbench.sh
+++ /dev/null
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - rados/load-gen-big.sh
+++ /dev/null
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - workunit:
- clients:
- client.0:
- - cls/test_cls_rbd.sh
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- mon.a:
- branch: firefly
- mon.b:
- branch: firefly
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
+++ /dev/null
-tasks:
-- install.upgrade:
- client.0:
+++ /dev/null
-tasks:
-- sequential:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - ceph-fuse: [client.0]
- - workunit:
- clients:
- client.0:
- - suites/dbench.sh
-
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- sequential:
- - thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- - ceph-fuse: [client.0]
- - workunit:
- clients:
- client.0:
- - suites/iogen.sh
-
+++ /dev/null
-tasks:
-- sequential:
- - workunit:
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - workunit:
- clients:
- client.0:
- - cls/test_cls_rbd.sh
+++ /dev/null
-tasks:
-- sequential:
- - rgw: [client.1]
- - s3tests:
- client.1:
- force-branch: firefly-original
- rgw_server: client.1
+++ /dev/null
-../../../../distros/supported
\ No newline at end of file
+++ /dev/null
-../../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-# this case tests issue #9419 "dumpling->firefly upgrade, sending setallochint?"
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- log-whitelist:
- - scrub mismatch
- - ScrubResult
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
-tasks:
-- install:
- branch: dumpling
-- print: "**** done install dumpling"
-- ceph:
- fs: xfs
-- print: "**** done ceph"
-- install.upgrade:
- client.0:
-- print: "**** done install.upgrade on clinet.0"
-- install.upgrade:
- mon.a:
- mon.b:
-- print: "**** done install.upgrade"
-- ceph.restart:
- #osd.2 is not upgraded
- daemons: [mon.a, mon.b, mon.c, osd.0, osd.1]
-- print: "**** done restart all"
-- workunit:
- branch: firefly
- clients:
- client.0:
- - rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh"
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/load-gen-big.sh
-- print: "**** done rados/load-gen-big.sh"
-- workunit:
- branch: firefly
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh"
-- workunit:
- branch: firefly
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-- print: "**** done cls/test_cls_rbd.sh"
-- rgw: [client.0]
-- s3tests:
- client.0:
- force-branch: firefly
- rgw_server: client.0
-- print: "**** done s3tests"
+++ /dev/null
-../../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub
- - osd_map_max_advance
- fs: xfs
- conf:
- osd:
- osd map max advance: 1000
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
-- - client.1
-tasks:
-- install:
- tag: v0.80.4
-- print: "**** done v0.80.4 install"
-- ceph:
- fs: xfs
-- print: "**** done ceph xfs"
-- sequential:
- - workload
-- print: "**** done workload v0.80.4"
-- parallel:
- - workload1
- - upgrade-sequence1
-- print: "**** done parallel v0.80.5"
-- parallel:
- - workload2
- - upgrade-sequence2
-- print: "**** done parallel v0.80.7"
-- parallel:
- - workload3
- - upgrade-sequence3
-- print: "**** done parallel v0.80.8"
-- parallel:
- - workload4
- - upgrade-sequence4
-- print: "**** done parallel v0.80.9"
-- parallel:
- - workload_firefly
- - upgrade-sequence_firefly
-- print: "**** done parallel firefly branch"
-#######################
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh workload"
-workload1:
- sequential:
- - workunit:
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh workload1"
- - workunit:
- clients:
- client.0:
- - rados/test.sh
- - cls
- - print: "**** done rados/test.sh & cls workload1"
- - workunit:
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh workload1"
-upgrade-sequence1:
- sequential:
- - install.upgrade:
- mon.a:
- tag: v0.80.5
- mon.b:
- tag: v0.80.5
- client.1:
- tag: v0.80.5
- - print: "**** done v0.80.5 install.upgrade"
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - print: "**** done ceph.restart all 1 mon/mds/osd"
-workload2:
- sequential:
-# removed to fix #10176
-# - workunit:
-# clients:
-# client.0:
-# - rbd/import_export.sh
-# env:
-# RBD_CREATE_ARGS: --new-format
- - workunit:
- clients:
- client.0:
- - cls/test_cls_rbd.sh
- - print: "**** done cls/test_cls_rbd.sh workload2"
-upgrade-sequence2:
- sequential:
- - install.upgrade:
- mon.a:
- tag: v0.80.7
- mon.b:
- tag: v0.80.7
- client.1:
- tag: v0.80.7
- - print: "**** done v0.80.7 install.upgrade"
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - print: "**** done ceph.restart all 2 osd/mon/mds"
-workload3:
- sequential:
- - workunit:
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh workload3"
- - workunit:
- clients:
- client.0:
- - rados/test.sh
- - cls
- - print: "**** done rados/test.sh & cls workload3"
- - workunit:
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh workload3"
-upgrade-sequence3:
- sequential:
- - install.upgrade:
- mon.a:
- tag: v0.80.8
- mon.b:
- tag: v0.80.8
- client.1:
- tag: v0.80.8
- - print: "**** done v0.80.8 install.upgrade"
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - print: "**** done ceph.restart all mon/mds/osd upgrade-sequence3"
-workload4:
- sequential:
- - workunit:
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh workload4"
- - workunit:
- clients:
- client.0:
- - rados/test.sh
- - cls
- - print: "**** done rados/test.sh & cls workload4"
- - workunit:
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh workload4"
-upgrade-sequence4:
- sequential:
- - install.upgrade:
- mon.a:
- tag: v0.80.9
- mon.b:
- tag: v0.80.9
- client.1:
- tag: v0.80.9
- - print: "**** done v0.80.9 install.upgrade"
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - print: "**** done ceph.restart all 1 mon/mds/osd upgrade-sequence4"
-workload_firefly:
- sequential:
- - rgw: [client.0]
- - print: "**** done rgw workload_firefly"
- - s3tests:
- client.0:
- force-branch: firefly
- rgw_server: client.0
- - print: "**** done s3tests workload_firefly"
-upgrade-sequence_firefly:
- sequential:
- - install.upgrade:
- mon.a:
- branch: firefly
- mon.b:
- branch: firefly
- client.1:
- branch: firefly
- - print: "**** done branch: firefly install.upgrade"
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - print: "**** done ceph.restart all firefly current branch mds/osd/mon"
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: cuttlefish
-- ceph:
- fs: xfs
-- ceph-fuse:
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- client.0:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- client.0:
- - suites/iogen.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: dumpling
+++ /dev/null
-tasks:
-- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/iogen.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - kernel_untar_build.sh
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/tiobench.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: emperor
+++ /dev/null
-tasks:
-- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
+++ /dev/null
-tasks:
-- workunit:
- branch: emperor
- clients:
- client.0:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: emperor
- clients:
- all:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: emperor
- clients:
- client.0:
- - suites/iogen.sh
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
-
+++ /dev/null
-tasks:
-- install:
- branch: cuttlefish
-- ceph:
-
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- client.0:
- - rados/test.sh
- - cls
-
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- client.0:
- - rados/load-gen-mix.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- osd.0:
- branch: dumpling
- osd.2:
- branch: dumpling
-
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.2]
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- client.0:
- - rados/test.sh
- - cls
-
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- client.0:
- - rados/load-gen-big.sh
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
-
+++ /dev/null
-tasks:
-- install:
- branch: cuttlefish
-- ceph:
-
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- all:
- - cephtool/test.sh
- - mon/pool_ops.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- mon.a:
- branch: dumpling
+++ /dev/null
-tasks:
-- ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- all:
- - cephtool/test.sh
- - mon/pool_ops.sh
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - had wrong client addr
- - had wrong cluster addr
-tasks:
-- install.upgrade:
- mon.b:
- branch: dumpling
- client.0:
- branch: dumpling
-- ceph.restart:
- daemons:
- - mon.b
- - mon.c
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-- workunit:
- branch: dumpling
- clients:
- all:
- - cephtool/test.sh
- - mon/pool_ops.sh
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- fs: xfs
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-../rados/distro
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- fs: xfs
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- workunit:
- branch: dumpling
- clients:
- all:
- - rados/load-gen-big.sh
+++ /dev/null
-workload:
- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/load-gen-mix.sh
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 60
- - ceph.restart: [osd.1]
- - sleep:
- duration: 60
- - ceph.restart: [osd.2]
- - sleep:
- duration: 60
- - ceph.restart: [osd.3]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 60
- - ceph.restart: [osd.1]
- - sleep:
- duration: 60
- - ceph.restart: [osd.2]
- - sleep:
- duration: 60
- - ceph.restart: [osd.3]
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [osd.0]
- - sleep:
- duration: 60
- - ceph.restart: [osd.1]
- - sleep:
- duration: 60
- - ceph.restart: [osd.2]
- - sleep:
- duration: 60
- - ceph.restart: [osd.3]
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
+++ /dev/null
-os_type: centos
-os_version: "6.4"
+++ /dev/null
-os_type: debian
-os_version: "7.0"
+++ /dev/null
-os_type: fedora
-os_version: "18"
+++ /dev/null
-os_type: rhel
-os_version: "6.3"
+++ /dev/null
-os_type: ubuntu
-os_version: "12.04"
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
- - client.1
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- fs: xfs
-- parallel:
- - workload
- - upgrade-sequence
+++ /dev/null
-workload:
- sequential:
- - rgw: [client.0]
- - s3tests:
- # use older tests when we are running a mix
- client.0:
- force-branch: dumpling
- rgw_server: client.0
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- all:
- branch: emperor
- - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0]
+++ /dev/null
-tasks:
-- rgw: [client.1]
-- swift:
- client.1:
- rgw_server: client.1
+++ /dev/null
-../rados/distro
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
- - mon.c
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- fs: xfs
+++ /dev/null
-tasks:
-- install.upgrade:
- osd.0:
-- ceph.restart:
- daemons: [osd.0, osd.1, osd.2]
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
+++ /dev/null
-tasks:
-- ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-tasks:
-- radosbench:
- clients: [client.0]
- time: 1800
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- op_weights:
- read: 45
- write: 45
- delete: 10
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-tasks:
-- ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- mon.c: null
-- ceph.restart:
- daemons: [mon.c]
- wait-for-healthy: false
- wait-for-osds-up: true
-- ceph.wait_for_mon_quorum: [a, b, c]
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-../rados/distro
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: bobtail
-- ceph:
+++ /dev/null
-tasks:
-- workunit:
- branch: bobtail
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: dumpling
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test.sh
- - cls
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/load-gen-mix.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rados/test.sh
- - cls
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: cuttlefish
-- ceph:
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- client.0:
- - rados/test.sh
- - cls
+++ /dev/null
-tasks:
-- workunit:
- branch: cuttlefish
- clients:
- client.0:
- - rados/load-gen-mix.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: dumpling
+++ /dev/null
-tasks:
-- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a]
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rados/test.sh
- - cls
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: emperor
+++ /dev/null
-tasks:
-- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
+++ /dev/null
-tasks:
-- workunit:
- branch: emperor
- clients:
- client.0:
- - rados/test.sh
- - cls
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: bobtail
-- ceph:
+++ /dev/null
-tasks:
-- workunit:
- branch: bobtail
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: dumpling
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - cls/test_cls_rbd.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c]
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd_python.sh
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
-tasks:
-- install:
- branch: bobtail
-- ceph:
+++ /dev/null
-tasks:
-- workunit:
- branch: bobtail
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: dumpling
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a]
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - cls/test_cls_rbd.sh
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/test_librbd_python.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: emperor
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a]
+++ /dev/null
-tasks:
-- workunit:
- branch: emperor
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: bobtail
-- ceph:
-- rgw:
+++ /dev/null
-tasks:
-- s3tests:
- client.0:
- force-branch: bobtail
- rgw_server: client.0
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: dumpling
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c, rgw.client.0]
+++ /dev/null
-tasks:
-- s3readwrite:
- client.0:
- rgw_server: client.0
- readwrite:
- bucket: rwtest
- readers: 10
- writers: 3
- duration: 300
- files:
- num: 10
- size: 2000
- stddev: 500
+++ /dev/null
-tasks:
-- s3tests:
- client.0:
- force-branch: dumpling
- rgw_server: client.0
+++ /dev/null
-tasks:
-- swift:
- client.0:
- rgw_server: client.0
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3, rgw.client.0]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mds.a, mon.a, mon.b, mon.c, rgw.client.0]
+++ /dev/null
-tasks:
-- s3readwrite:
- client.0:
- rgw_server: client.0
- readwrite:
- bucket: rwtest
- readers: 10
- writers: 3
- duration: 300
- files:
- num: 10
- size: 2000
- stddev: 500
+++ /dev/null
-tasks:
-- s3tests:
- client.0:
- rgw_server: client.0
+++ /dev/null
-tasks:
-- swift:
- client.0:
- rgw_server: client.0
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
- - client.1
-- - client.0
-tasks:
-- install:
- branch: bobtail
-- ceph:
- conf:
- client:
- client mount timeout: 600
- rgw init timeout: 600
-- rgw: [client.0]
+++ /dev/null
-tasks:
-- s3readwrite:
- client.0:
- rgw_server: client.0
- readwrite:
- bucket: rwtest
- readers: 10
- writers: 3
- duration: 300
- files:
- num: 10
- size: 2000
- stddev: 500
+++ /dev/null
-tasks:
-- s3tests:
- client.0:
- rgw_server: client.0
- force-branch: bobtail
+++ /dev/null
-tasks:
-- swift:
- client.0:
- rgw_server: client.0
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: dumpling
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a, rgw.client.0]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0]
+++ /dev/null
-tasks:
-- s3readwrite:
- client.0:
- rgw_server: client.0
- readwrite:
- bucket: rwtest
- readers: 10
- writers: 3
- duration: 300
- files:
- num: 10
- size: 2000
- stddev: 500
+++ /dev/null
-tasks:
-- s3tests:
- client.0:
- force-branch: dumpling
- rgw_server: client.0
+++ /dev/null
-tasks:
-- swift:
- client.0:
- rgw_server: client.0
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: emperor
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3, mds.a, rgw.client.0]
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0]
+++ /dev/null
-tasks:
-- s3readwrite:
- client.0:
- rgw_server: client.0
- readwrite:
- bucket: rwtest
- readers: 10
- writers: 3
- duration: 300
- files:
- num: 10
- size: 2000
- stddev: 500
+++ /dev/null
-tasks:
-- s3tests:
- client.0:
- force-branch: emperor
- rgw_server: client.0
+++ /dev/null
-tasks:
-- swift:
- client.0:
- rgw_server: client.0
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- fs: xfs
-- ceph-fuse:
+++ /dev/null
-tasks:
-- workunit:
- clients:
- all:
- - suites/blogbench.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: emperor
+++ /dev/null
-tasks:
-- ceph.restart: [mds.a, mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
-
+++ /dev/null
-tasks:
-- workunit:
- branch: emperor
- clients:
- client.0:
- - suites/dbench.sh
+++ /dev/null
-../rados/distro
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- fs: xfs
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- all:
- - rados/load-gen-big.sh
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: emperor
+++ /dev/null
-tasks:
-- ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+++ /dev/null
-tasks:
-- workunit:
- branch: emperor
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-os_type: centos
-os_version: "6.4"
+++ /dev/null
-os_type: debian
-os_version: "7.0"
+++ /dev/null
-os_type: fedora
-os_version: "18"
+++ /dev/null
-os_type: rhel
-os_version: "6.3"
+++ /dev/null
-os_type: rhel
-os_version: "6.4"
+++ /dev/null
-os_type: ubuntu
-os_version: "12.04"
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- fs: xfs
+++ /dev/null
-tasks:
-- workunit:
- branch: dumpling
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: emperor
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a]
+++ /dev/null
-tasks:
-- workunit:
- branch: emperor
- clients:
- client.0:
- - cls/test_cls_rbd.sh
+++ /dev/null
-../rados/distro
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-tasks:
-- install:
- branch: dumpling
-- ceph:
- fs: xfs
-- rgw: [client.0]
+++ /dev/null
-tasks:
-- s3tests:
- client.0:
- rgw_server: client.0
- force-branch: dumpling
+++ /dev/null
-tasks:
-- install.upgrade:
- all:
- branch: emperor
+++ /dev/null
-tasks:
-- ceph.restart: [osd.0, osd.1, osd.2, osd.3, mon.a, mon.b, mon.c, mds.a, rgw.client.0]
+++ /dev/null
-tasks:
-- s3tests:
- client.0:
- rgw_server: client.0
+++ /dev/null
-../rados/distro
\ No newline at end of file
+++ /dev/null
-tasks:
-- cifs-mount:
- client.1:
- share: ceph
-- workunit:
- clients:
- client.1:
- - suites/dbench.sh
+++ /dev/null
-tasks:
-- cifs-mount:
- client.1:
- share: ceph
-- workunit:
- clients:
- client.1:
- - suites/fsstress.sh
+++ /dev/null
-tasks:
-- cifs-mount:
- client.1:
- share: ceph
-- workunit:
- clients:
- client.1:
- - kernel_untar_build.sh
-
+++ /dev/null
-tasks:
-- pexec:
- client.1:
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb
-# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon
-# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number
- - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl
-# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid
+++ /dev/null
-import logging
-
-# Inherit teuthology's log level
-teuthology_log = logging.getLogger('teuthology')
-log = logging.getLogger(__name__)
-log.setLevel(teuthology_log.level)
+++ /dev/null
-"""
-Admin Socket task -- used in rados, powercycle, and smoke testing
-"""
-from cStringIO import StringIO
-
-import json
-import logging
-import os
-import time
-
-from teuthology.orchestra import run
-from teuthology import misc as teuthology
-from teuthology.parallel import parallel
-
-log = logging.getLogger(__name__)
-
-
-def task(ctx, config):
- """
- Run an admin socket command, make sure the output is json, and run
- a test program on it. The test program should read json from
- stdin. This task succeeds if the test program exits with status 0.
-
- To run the same test on all clients::
-
- tasks:
- - ceph:
- - rados:
- - admin_socket:
- all:
- dump_requests:
- test: http://example.com/script
-
- To restrict it to certain clients::
-
- tasks:
- - ceph:
- - rados: [client.1]
- - admin_socket:
- client.1:
- dump_requests:
- test: http://example.com/script
-
- If an admin socket command has arguments, they can be specified as
- a list::
-
- tasks:
- - ceph:
- - rados: [client.0]
- - admin_socket:
- client.0:
- dump_requests:
- test: http://example.com/script
- help:
- test: http://example.com/test_help_version
- args: [version]
-
- Note that there must be a ceph client with an admin socket running
- before this task is run. The tests are parallelized at the client
- level. Tests for a single client are run serially.
-
- :param ctx: Context
- :param config: Configuration
- """
- assert isinstance(config, dict), \
- 'admin_socket task requires a dict for configuration'
- teuthology.replace_all_with_clients(ctx.cluster, config)
-
- with parallel() as ptask:
- for client, tests in config.iteritems():
- ptask.spawn(_run_tests, ctx, client, tests)
-
-
-def _socket_command(ctx, remote, socket_path, command, args):
- """
- Run an admin socket command and return the result as a string.
-
- :param ctx: Context
- :param remote: Remote site
- :param socket_path: path to socket
- :param command: command to be run remotely
- :param args: command arguments
-
- :returns: output of command in json format
- """
- json_fp = StringIO()
- testdir = teuthology.get_testdir(ctx)
- max_tries = 60
- while True:
- proc = remote.run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'ceph',
- '--admin-daemon', socket_path,
- ] + command.split(' ') + args,
- stdout=json_fp,
- check_status=False,
- )
- if proc.exitstatus == 0:
- break
- assert max_tries > 0
- max_tries -= 1
- log.info('ceph cli returned an error, command not registered yet?')
- log.info('sleeping and retrying ...')
- time.sleep(1)
- out = json_fp.getvalue()
- json_fp.close()
- log.debug('admin socket command %s returned %s', command, out)
- return json.loads(out)
-
-def _run_tests(ctx, client, tests):
- """
- Create a temp directory and wait for a client socket to be created.
- For each test, copy the executable locally and run the test.
- Remove temp directory when finished.
-
- :param ctx: Context
- :param client: client machine to run the test
- :param tests: list of tests to run
- """
- testdir = teuthology.get_testdir(ctx)
- log.debug('Running admin socket tests on %s', client)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client)
- overrides = ctx.config.get('overrides', {}).get('admin_socket', {})
-
- try:
- tmp_dir = os.path.join(
- testdir,
- 'admin_socket_{client}'.format(client=client),
- )
- remote.run(
- args=[
- 'mkdir',
- '--',
- tmp_dir,
- run.Raw('&&'),
- # wait for client process to create the socket
- 'while', 'test', '!', '-e', socket_path, run.Raw(';'),
- 'do', 'sleep', '1', run.Raw(';'), 'done',
- ],
- )
-
- for command, config in tests.iteritems():
- if config is None:
- config = {}
- teuthology.deep_merge(config, overrides)
- log.debug('Testing %s with config %s', command, str(config))
-
- test_path = None
- if 'test' in config:
- url = config['test'].format(
- branch=config.get('branch', 'master')
- )
- test_path = os.path.join(tmp_dir, command)
- remote.run(
- args=[
- 'wget',
- '-q',
- '-O',
- test_path,
- '--',
- url,
- run.Raw('&&'),
- 'chmod',
- 'u=rx',
- '--',
- test_path,
- ],
- )
-
- args = config.get('args', [])
- assert isinstance(args, list), \
- 'admin socket command args must be a list'
- sock_out = _socket_command(ctx, remote, socket_path, command, args)
- if test_path is not None:
- remote.run(
- args=[
- test_path,
- ],
- stdin=json.dumps(sock_out),
- )
-
- finally:
- remote.run(
- args=[
- 'rm', '-rf', '--', tmp_dir,
- ],
- )
+++ /dev/null
-<IfModule !version_module>
- LoadModule version_module {mod_path}/mod_version.so
-</IfModule>
-<IfModule !env_module>
- LoadModule env_module {mod_path}/mod_env.so
-</IfModule>
-<IfModule !rewrite_module>
- LoadModule rewrite_module {mod_path}/mod_rewrite.so
-</IfModule>
-<IfModule !fastcgi_module>
- LoadModule fastcgi_module {mod_path}/mod_fastcgi.so
-</IfModule>
-<IfModule !log_config_module>
- LoadModule log_config_module {mod_path}/mod_log_config.so
-</IfModule>
-
-Listen {port}
-ServerName {host}
-
-<IfVersion >= 2.4>
- <IfModule !unixd_module>
- LoadModule unixd_module {mod_path}/mod_unixd.so
- </IfModule>
- <IfModule !authz_core_module>
- LoadModule authz_core_module {mod_path}/mod_authz_core.so
- </IfModule>
- <IfModule !mpm_worker_module>
- LoadModule mpm_worker_module {mod_path}/mod_mpm_worker.so
- </IfModule>
- User {user}
- Group {group}
-</IfVersion>
-
-ServerRoot {testdir}/apache
-ErrorLog {testdir}/archive/apache.{client}/error.log
-LogFormat "%h l %u %t \"%r\" %>s %b \"{{Referer}}i\" \"%{{User-agent}}i\"" combined
-CustomLog {testdir}/archive/apache.{client}/access.log combined
-PidFile {testdir}/apache/tmp.{client}/apache.pid
-DocumentRoot {testdir}/apache/htdocs.{client}
-FastCgiIPCDir {testdir}/apache/tmp.{client}/fastcgi_sock
-FastCgiExternalServer {testdir}/apache/htdocs.{client}/rgw.fcgi -socket rgw_sock -idle-timeout {idle_timeout}
-RewriteEngine On
-
-RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /rgw.fcgi?page=$1¶ms=$2&%{{QUERY_STRING}} [E=HTTP_AUTHORIZATION:%{{HTTP:Authorization}},L]
-
-# Set fastcgi environment variables.
-# Note that this is separate from Unix environment variables!
-SetEnv RGW_LOG_LEVEL 20
-SetEnv RGW_SHOULD_LOG yes
-SetEnv RGW_PRINT_CONTINUE {print_continue}
-
-<Directory {testdir}/apache/htdocs.{client}>
- Options +ExecCGI
- AllowOverride All
- SetHandler fastcgi-script
-</Directory>
-
-AllowEncodedSlashes On
-ServerSignature Off
+++ /dev/null
-"""
-Run an autotest test on the ceph cluster.
-"""
-import json
-import logging
-import os
-
-from teuthology import misc as teuthology
-from teuthology.parallel import parallel
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Run an autotest test on the ceph cluster.
-
- Only autotest client tests are supported.
-
- The config is a mapping from role name to list of tests to run on
- that client.
-
- For example::
-
- tasks:
- - ceph:
- - ceph-fuse: [client.0, client.1]
- - autotest:
- client.0: [dbench]
- client.1: [bonnie]
-
- You can also specify a list of tests to run on all clients::
-
- tasks:
- - ceph:
- - ceph-fuse:
- - autotest:
- all: [dbench]
- """
- assert isinstance(config, dict)
- config = teuthology.replace_all_with_clients(ctx.cluster, config)
- log.info('Setting up autotest...')
- testdir = teuthology.get_testdir(ctx)
- with parallel() as p:
- for role in config.iterkeys():
- (remote,) = ctx.cluster.only(role).remotes.keys()
- p.spawn(_download, testdir, remote)
-
- log.info('Making a separate scratch dir for every client...')
- for role in config.iterkeys():
- assert isinstance(role, basestring)
- PREFIX = 'client.'
- assert role.startswith(PREFIX)
- id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
- remote.run(
- args=[
- 'sudo',
- 'install',
- '-d',
- '-m', '0755',
- '--owner={user}'.format(user='ubuntu'), #TODO
- '--',
- scratch,
- ],
- )
-
- with parallel() as p:
- for role, tests in config.iteritems():
- (remote,) = ctx.cluster.only(role).remotes.keys()
- p.spawn(_run_tests, testdir, remote, role, tests)
-
-def _download(testdir, remote):
- """
- Download. Does not explicitly support muliple tasks in a single run.
- """
- remote.run(
- args=[
- # explicitly does not support multiple autotest tasks
- # in a single run; the result archival would conflict
- 'mkdir', '{tdir}/archive/autotest'.format(tdir=testdir),
- run.Raw('&&'),
- 'mkdir', '{tdir}/autotest'.format(tdir=testdir),
- run.Raw('&&'),
- 'wget',
- '-nv',
- '--no-check-certificate',
- 'https://github.com/ceph/autotest/tarball/ceph',
- '-O-',
- run.Raw('|'),
- 'tar',
- '-C', '{tdir}/autotest'.format(tdir=testdir),
- '-x',
- '-z',
- '-f-',
- '--strip-components=1',
- ],
- )
-
-def _run_tests(testdir, remote, role, tests):
- """
- Spawned to run test on remote site
- """
- assert isinstance(role, basestring)
- PREFIX = 'client.'
- assert role.startswith(PREFIX)
- id_ = role[len(PREFIX):]
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
-
- assert isinstance(tests, list)
- for idx, testname in enumerate(tests):
- log.info('Running autotest client test #%d: %s...', idx, testname)
-
- tag = 'client.{id}.num{idx}.{testname}'.format(
- idx=idx,
- testname=testname,
- id=id_,
- )
- control = '{tdir}/control.{tag}'.format(tdir=testdir, tag=tag)
- teuthology.write_file(
- remote=remote,
- path=control,
- data='import json; data=json.loads({data!r}); job.run_test(**data)'.format(
- data=json.dumps(dict(
- url=testname,
- dir=scratch,
- # TODO perhaps tag
- # results will be in {testdir}/autotest/client/results/dbench
- # or {testdir}/autotest/client/results/dbench.{tag}
- )),
- ),
- )
- remote.run(
- args=[
- '{tdir}/autotest/client/bin/autotest'.format(tdir=testdir),
- '--verbose',
- '--harness=simple',
- '--tag={tag}'.format(tag=tag),
- control,
- run.Raw('3>&1'),
- ],
- )
-
- remote.run(
- args=[
- 'rm', '-rf', '--', control,
- ],
- )
-
- remote.run(
- args=[
- 'mv',
- '--',
- '{tdir}/autotest/client/results/{tag}'.format(tdir=testdir, tag=tag),
- '{tdir}/archive/autotest/{tag}'.format(tdir=testdir, tag=tag),
- ],
- )
-
- remote.run(
- args=[
- 'rm', '-rf', '--', '{tdir}/autotest'.format(tdir=testdir),
- ],
- )
+++ /dev/null
-"""
-Run blktrace program through teuthology
-"""
-import contextlib
-import logging
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-blktrace = '/usr/sbin/blktrace'
-daemon_signal = 'term'
-
-@contextlib.contextmanager
-def setup(ctx, config):
- """
- Setup all the remotes
- """
- osds = ctx.cluster.only(teuthology.is_type('osd'))
- log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx))
-
- for remote, roles_for_host in osds.remotes.iteritems():
- log.info('Creating %s on %s' % (log_dir, remote.name))
- remote.run(
- args=['mkdir', '-p', '-m0755', '--', log_dir],
- wait=False,
- )
- yield
-
-@contextlib.contextmanager
-def execute(ctx, config):
- """
- Run the blktrace program on remote machines.
- """
- procs = []
- testdir = teuthology.get_testdir(ctx)
- log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir)
-
- osds = ctx.cluster.only(teuthology.is_type('osd'))
- for remote, roles_for_host in osds.remotes.iteritems():
- roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote]
- for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
- if roles_to_devs.get(id_):
- dev = roles_to_devs[id_]
- log.info("running blktrace on %s: %s" % (remote.name, dev))
-
- proc = remote.run(
- args=[
- 'cd',
- log_dir,
- run.Raw(';'),
- 'daemon-helper',
- daemon_signal,
- 'sudo',
- blktrace,
- '-o',
- dev.rsplit("/", 1)[1],
- '-d',
- dev,
- ],
- wait=False,
- stdin=run.PIPE,
- )
- procs.append(proc)
- try:
- yield
- finally:
- osds = ctx.cluster.only(teuthology.is_type('osd'))
- log.info('stopping blktrace processs')
- for proc in procs:
- proc.stdin.close()
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Usage:
- blktrace:
-
- Runs blktrace on all clients.
- """
- if config is None:
- config = dict(('client.{id}'.format(id=id_), None)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
- elif isinstance(config, list):
- config = dict.fromkeys(config)
-
- with contextutil.nested(
- lambda: setup(ctx=ctx, config=config),
- lambda: execute(ctx=ctx, config=config),
- ):
- yield
-
+++ /dev/null
-[Boto]
-http_socket_timeout = {idle_timeout}
+++ /dev/null
-#!/usr/bin/env python
-
-import json
-import logging
-import requests
-
-log = logging.getLogger(__name__)
-
-
-class AuthenticatedHttpClient(requests.Session):
- """
- Client for the calamari REST API, principally exists to do
- authentication, but also helpfully prefixes
- URLs in requests with the API base URL and JSONizes
- POST data.
- """
- def __init__(self, api_url, username, password):
- super(AuthenticatedHttpClient, self).__init__()
- self._username = username
- self._password = password
- self._api_url = api_url
- self.headers = {
- 'Content-type': "application/json; charset=UTF-8"
- }
-
- def request(self, method, url, **kwargs):
- if not url.startswith('/'):
- url = self._api_url + url
- response = super(AuthenticatedHttpClient, self).request(method, url, **kwargs)
- if response.status_code >= 400:
- # For the benefit of test logs
- print "%s: %s" % (response.status_code, response.content)
- return response
-
- def post(self, url, data=None, **kwargs):
- if isinstance(data, dict):
- data = json.dumps(data)
- return super(AuthenticatedHttpClient, self).post(url, data, **kwargs)
-
- def patch(self, url, data=None, **kwargs):
- if isinstance(data, dict):
- data = json.dumps(data)
- return super(AuthenticatedHttpClient, self).patch(url, data, **kwargs)
-
- def login(self):
- """
- Authenticate with the Django auth system as
- it is exposed in the Calamari REST API.
- """
- log.info("Logging in as %s" % self._username)
- response = self.get("auth/login/")
- response.raise_for_status()
- self.headers['X-XSRF-TOKEN'] = response.cookies['XSRF-TOKEN']
-
- self.post("auth/login/", {
- 'next': "/",
- 'username': self._username,
- 'password': self._password
- })
- response.raise_for_status()
-
- # Check we're allowed in now.
- response = self.get("cluster")
- response.raise_for_status()
-
-if __name__ == "__main__":
-
- import argparse
-
- p = argparse.ArgumentParser()
- p.add_argument('-u', '--uri', default='http://mira035/api/v1/')
- p.add_argument('--user', default='admin')
- p.add_argument('--pass', dest='password', default='admin')
- args, remainder = p.parse_known_args()
-
- c = AuthenticatedHttpClient(args.uri, args.user, args.password)
- c.login()
- response = c.request('GET', ''.join(remainder)).json()
- print json.dumps(response, indent=2)
+++ /dev/null
-#!/usr/bin/env python
-
-import datetime
-import os
-import logging
-import logging.handlers
-import requests
-import uuid
-import unittest
-from http_client import AuthenticatedHttpClient
-
-log = logging.getLogger(__name__)
-log.addHandler(logging.StreamHandler())
-log.setLevel(logging.INFO)
-
-global base_uri
-global client
-base_uri = None
-server_uri = None
-client = None
-
-def setUpModule():
- global base_uri
- global server_uri
- global client
- try:
- base_uri = os.environ['CALAMARI_BASE_URI']
- except KeyError:
- log.error('Must define CALAMARI_BASE_URI')
- os._exit(1)
- if not base_uri.endswith('/'):
- base_uri += '/'
- if not base_uri.endswith('api/v1/'):
- base_uri += 'api/v1/'
- client = AuthenticatedHttpClient(base_uri, 'admin', 'admin')
- server_uri = base_uri.replace('api/v1/', '')
- client.login()
-
-class RestTest(unittest.TestCase):
- 'Base class for all tests here; get class\'s data'
-
- def setUp(self):
- # Called once for each test_* case. A bit wasteful, but we
- # really like using the simple class variable self.uri
- # to customize each derived TestCase
- method = getattr(self, 'method', 'GET')
- raw = self.uri.startswith('/')
- self.response = self.get_object(method, self.uri, raw=raw)
-
- def get_object(self, method, url, raw=False):
- global server_uri
- 'Return Python object decoded from JSON response to method/url'
- if not raw:
- return client.request(method, url).json()
- else:
- return requests.request(method, server_uri + url).json()
-
-class TestUserMe(RestTest):
-
- uri = 'user/me'
-
- def test_me(self):
- self.assertEqual(self.response['username'], 'admin')
-
-class TestCluster(RestTest):
-
- uri = 'cluster'
-
- def test_id(self):
- self.assertEqual(self.response[0]['id'], 1)
-
- def test_times(self):
- for time in (
- self.response[0]['cluster_update_time'],
- self.response[0]['cluster_update_attempt_time'],
- ):
- self.assertTrue(is_datetime(time))
-
- def test_api_base_url(self):
- api_base_url = self.response[0]['api_base_url']
- self.assertTrue(api_base_url.startswith('http'))
- self.assertIn('api/v0.1', api_base_url)
-
-class TestHealth(RestTest):
-
- uri = 'cluster/1/health'
-
- def test_cluster(self):
- self.assertEqual(self.response['cluster'], 1)
-
- def test_times(self):
- for time in (
- self.response['cluster_update_time'],
- self.response['added'],
- ):
- self.assertTrue(is_datetime(time))
-
- def test_report_and_overall_status(self):
- self.assertIn('report', self.response)
- self.assertIn('overall_status', self.response['report'])
-
-class TestHealthCounters(RestTest):
-
- uri = 'cluster/1/health_counters'
-
- def test_cluster(self):
- self.assertEqual(self.response['cluster'], 1)
-
- def test_time(self):
- self.assertTrue(is_datetime(self.response['cluster_update_time']))
-
- def test_existence(self):
- for section in ('pg', 'mon', 'osd'):
- for counter in ('warn', 'critical', 'ok'):
- count = self.response[section][counter]['count']
- self.assertIsInstance(count, int)
- self.assertIsInstance(self.response['pool']['total'], int)
-
- def test_mds_sum(self):
- count = self.response['mds']
- self.assertEqual(
- count['up_not_in'] + count['not_up_not_in'] + count['up_in'],
- count['total']
- )
-
-class TestSpace(RestTest):
-
- uri = 'cluster/1/space'
-
- def test_cluster(self):
- self.assertEqual(self.response['cluster'], 1)
-
- def test_times(self):
- for time in (
- self.response['cluster_update_time'],
- self.response['added'],
- ):
- self.assertTrue(is_datetime(time))
-
- def test_space(self):
- for size in ('free_bytes', 'used_bytes', 'capacity_bytes'):
- self.assertIsInstance(self.response['space'][size], int)
- self.assertGreater(self.response['space'][size], 0)
-
- def test_report(self):
- for size in ('total_used', 'total_space', 'total_avail'):
- self.assertIsInstance(self.response['report'][size], int)
- self.assertGreater(self.response['report'][size], 0)
-
-class TestOSD(RestTest):
-
- uri = 'cluster/1/osd'
-
- def test_cluster(self):
- self.assertEqual(self.response['cluster'], 1)
-
- def test_times(self):
- for time in (
- self.response['cluster_update_time'],
- self.response['added'],
- ):
- self.assertTrue(is_datetime(time))
-
- def test_osd_uuid(self):
- for osd in self.response['osds']:
- uuidobj = uuid.UUID(osd['uuid'])
- self.assertEqual(str(uuidobj), osd['uuid'])
-
- def test_osd_pools(self):
- for osd in self.response['osds']:
- if osd['up'] != 1:
- continue
- self.assertIsInstance(osd['pools'], list)
- self.assertIsInstance(osd['pools'][0], basestring)
-
- def test_osd_up_in(self):
- for osd in self.response['osds']:
- for flag in ('up', 'in'):
- self.assertIn(osd[flag], (0, 1))
-
- def test_osd_0(self):
- osd0 = self.get_object('GET', 'cluster/1/osd/0')['osd']
- for field in osd0.keys():
- if not field.startswith('cluster_update_time'):
- self.assertEqual(self.response['osds'][0][field], osd0[field])
-
-class TestPool(RestTest):
-
- uri = 'cluster/1/pool'
-
- def test_cluster(self):
- for pool in self.response:
- self.assertEqual(pool['cluster'], 1)
-
- def test_fields_are_ints(self):
- for pool in self.response:
- for field in ('id', 'used_objects', 'used_bytes'):
- self.assertIsInstance(pool[field], int)
-
- def test_name_is_str(self):
- for pool in self.response:
- self.assertIsInstance(pool['name'], basestring)
-
- def test_pool_0(self):
- poolid = self.response[0]['id']
- pool = self.get_object('GET', 'cluster/1/pool/{id}'.format(id=poolid))
- self.assertEqual(self.response[0], pool)
-
-class TestServer(RestTest):
-
- uri = 'cluster/1/server'
-
- def test_ipaddr(self):
- for server in self.response:
- octets = server['addr'].split('.')
- self.assertEqual(len(octets), 4)
- for octetstr in octets:
- octet = int(octetstr)
- self.assertIsInstance(octet, int)
- self.assertGreaterEqual(octet, 0)
- self.assertLessEqual(octet, 255)
-
- def test_hostname_name_strings(self):
- for server in self.response:
- for field in ('name', 'hostname'):
- self.assertIsInstance(server[field], basestring)
-
- def test_services(self):
- for server in self.response:
- self.assertIsInstance(server['services'], list)
- for service in server['services']:
- self.assertIn(service['type'], ('osd', 'mon', 'mds'))
-
-class TestGraphitePoolIOPS(RestTest):
-
- uri = '/graphite/render?format=json-array&' \
- 'target=ceph.cluster.ceph.pool.0.num_read&' \
- 'target=ceph.cluster.ceph.pool.0.num_write'
-
- def test_targets_contain_request(self):
- self.assertIn('targets', self.response)
- self.assertIn('ceph.cluster.ceph.pool.0.num_read',
- self.response['targets'])
- self.assertIn('ceph.cluster.ceph.pool.0.num_write',
- self.response['targets'])
-
- def test_datapoints(self):
- self.assertIn('datapoints', self.response)
- self.assertGreater(len(self.response['datapoints']), 0)
- data = self.response['datapoints'][0]
- self.assertEqual(len(data), 3)
- self.assertIsInstance(data[0], int)
- if data[1]:
- self.assertIsInstance(data[1], float)
- if data[2]:
- self.assertIsInstance(data[2], float)
-
-#
-# Utility functions
-#
-
-DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
-
-def is_datetime(time):
- datetime.datetime.strptime(time, DATETIME_FORMAT)
- return True
-
-if __name__ == '__main__':
- unittest.main()
+++ /dev/null
-"""
-Ceph cluster task.
-
-Handle the setup, starting, and clean-up of a Ceph cluster.
-"""
-from cStringIO import StringIO
-
-import argparse
-import contextlib
-import logging
-import os
-import json
-import time
-
-from ceph_manager import CephManager
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from teuthology.orchestra import run
-from teuthology.orchestra.daemon import DaemonGroup
-import ceph_client as cclient
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def ceph_log(ctx, config):
- """
- Create /var/log/ceph log directory that is open to everyone.
- Add valgrind and profiling-logger directories.
-
- :param ctx: Context
- :param config: Configuration
- """
- log.info('Making ceph log dir writeable by non-root...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'sudo',
- 'chmod',
- '777',
- '/var/log/ceph',
- ],
- wait=False,
- )
- )
- log.info('Disabling ceph logrotate...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'sudo',
- 'rm', '-f', '--',
- '/etc/logrotate.d/ceph',
- ],
- wait=False,
- )
- )
- log.info('Creating extra log directories...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'sudo',
- 'install', '-d', '-m0755', '--',
- '/var/log/ceph/valgrind',
- '/var/log/ceph/profiling-logger',
- ],
- wait=False,
- )
- )
-
- try:
- yield
-
- finally:
- pass
-
-
-def assign_devs(roles, devs):
- """
- Create a dictionary of devs indexed by roles
-
- :param roles: List of roles
- :param devs: Corresponding list of devices.
- :returns: Dictionary of devs indexed by roles.
- """
- return dict(zip(roles, devs))
-
-@contextlib.contextmanager
-def valgrind_post(ctx, config):
- """
- After the tests run, look throught all the valgrind logs. Exceptions are raised
- if textual errors occured in the logs, or if valgrind exceptions were detected in
- the logs.
-
- :param ctx: Context
- :param config: Configuration
- """
- try:
- yield
- finally:
- lookup_procs = list()
- log.info('Checking for errors in any valgrind logs...');
- for remote in ctx.cluster.remotes.iterkeys():
- #look at valgrind logs for each node
- proc = remote.run(
- args=[
- 'sudo',
- 'zgrep',
- '<kind>',
- run.Raw('/var/log/ceph/valgrind/*'),
- '/dev/null', # include a second file so that we always get a filename prefix on the output
- run.Raw('|'),
- 'sort',
- run.Raw('|'),
- 'uniq',
- ],
- wait=False,
- check_status=False,
- stdout=StringIO(),
- )
- lookup_procs.append((proc, remote))
-
- valgrind_exception = None
- for (proc, remote) in lookup_procs:
- proc.wait()
- out = proc.stdout.getvalue()
- for line in out.split('\n'):
- if line == '':
- continue
- try:
- (file, kind) = line.split(':')
- except Exception:
- log.error('failed to split line %s', line)
- raise
- log.debug('file %s kind %s', file, kind)
- if (file.find('mds') >= 0) and kind.find('Lost') > 0:
- continue
- log.error('saw valgrind issue %s in %s', kind, file)
- valgrind_exception = Exception('saw valgrind issues')
-
- if valgrind_exception is not None:
- raise valgrind_exception
-
-
-
-@contextlib.contextmanager
-def cluster(ctx, config):
- """
- Handle the creation and removal of a ceph cluster.
-
- On startup:
- Create directories needed for the cluster.
- Create remote journals for all osds.
- Create and set keyring.
- Copy the monmap to tht test systems.
- Setup mon nodes.
- Setup mds nodes.
- Mkfs osd nodes.
- Add keyring information to monmaps
- Mkfs mon nodes.
-
- On exit:
- If errors occured, extract a failure message and store in ctx.summary.
- Unmount all test files and temporary journaling files.
- Save the monitor information and archive all ceph logs.
- Cleanup the keyring setup, and remove all monitor map and data files left over.
-
- :param ctx: Context
- :param config: Configuration
- """
- if ctx.config.get('use_existing_cluster', False) is True:
- log.info("'use_existing_cluster' is true; skipping cluster creation")
- yield
-
- testdir = teuthology.get_testdir(ctx)
- log.info('Creating ceph cluster...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'install', '-d', '-m0755', '--',
- '{tdir}/data'.format(tdir=testdir),
- ],
- wait=False,
- )
- )
-
- run.wait(
- ctx.cluster.run(
- args=[
- 'sudo',
- 'install', '-d', '-m0777', '--', '/var/run/ceph',
- ],
- wait=False,
- )
- )
-
-
- devs_to_clean = {}
- remote_to_roles_to_devs = {}
- remote_to_roles_to_journals = {}
- osds = ctx.cluster.only(teuthology.is_type('osd'))
- for remote, roles_for_host in osds.remotes.iteritems():
- devs = teuthology.get_scratch_devices(remote)
- roles_to_devs = {}
- roles_to_journals = {}
- if config.get('fs'):
- log.info('fs option selected, checking for scratch devs')
- log.info('found devs: %s' % (str(devs),))
- devs_id_map = teuthology.get_wwn_id_map(remote, devs)
- iddevs = devs_id_map.values()
- roles_to_devs = assign_devs(
- teuthology.roles_of_type(roles_for_host, 'osd'), iddevs
- )
- if len(roles_to_devs) < len(iddevs):
- iddevs = iddevs[len(roles_to_devs):]
- devs_to_clean[remote] = []
-
- if config.get('block_journal'):
- log.info('block journal enabled')
- roles_to_journals = assign_devs(
- teuthology.roles_of_type(roles_for_host, 'osd'), iddevs
- )
- log.info('journal map: %s', roles_to_journals)
-
- if config.get('tmpfs_journal'):
- log.info('tmpfs journal enabled')
- roles_to_journals = {}
- remote.run( args=[ 'sudo', 'mount', '-t', 'tmpfs', 'tmpfs', '/mnt' ] )
- for osd in teuthology.roles_of_type(roles_for_host, 'osd'):
- tmpfs = '/mnt/osd.%s' % osd
- roles_to_journals[osd] = tmpfs
- remote.run( args=[ 'truncate', '-s', '1500M', tmpfs ] )
- log.info('journal map: %s', roles_to_journals)
-
- log.info('dev map: %s' % (str(roles_to_devs),))
- remote_to_roles_to_devs[remote] = roles_to_devs
- remote_to_roles_to_journals[remote] = roles_to_journals
-
-
- log.info('Generating config...')
- remotes_and_roles = ctx.cluster.remotes.items()
- roles = [role_list for (remote, role_list) in remotes_and_roles]
- ips = [host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
- conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips)
- for remote, roles_to_journals in remote_to_roles_to_journals.iteritems():
- for role, journal in roles_to_journals.iteritems():
- key = "osd." + str(role)
- if key not in conf:
- conf[key] = {}
- conf[key]['osd journal'] = journal
- for section, keys in config['conf'].iteritems():
- for key, value in keys.iteritems():
- log.info("[%s] %s = %s" % (section, key, value))
- if section not in conf:
- conf[section] = {}
- conf[section][key] = value
-
- if config.get('tmpfs_journal'):
- conf['journal dio'] = False
-
- ctx.ceph = argparse.Namespace()
- ctx.ceph.conf = conf
-
- keyring_path = config.get('keyring_path', '/etc/ceph/ceph.keyring')
-
- coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
-
- firstmon = teuthology.get_first_mon(ctx, config)
-
- log.info('Setting up %s...' % firstmon)
- ctx.cluster.only(firstmon).run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- '--create-keyring',
- keyring_path,
- ],
- )
- ctx.cluster.only(firstmon).run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- '--gen-key',
- '--name=mon.',
- keyring_path,
- ],
- )
- ctx.cluster.only(firstmon).run(
- args=[
- 'sudo',
- 'chmod',
- '0644',
- keyring_path,
- ],
- )
- (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
- fsid = teuthology.create_simple_monmap(
- ctx,
- remote=mon0_remote,
- conf=conf,
- )
- if not 'global' in conf:
- conf['global'] = {}
- conf['global']['fsid'] = fsid
-
- log.info('Writing ceph.conf for FSID %s...' % fsid)
- conf_path = config.get('conf_path', '/etc/ceph/ceph.conf')
- conf_fp = StringIO()
- conf.write(conf_fp)
- conf_fp.seek(0)
- writes = ctx.cluster.run(
- args=[
- 'sudo', 'mkdir', '-p', '/etc/ceph', run.Raw('&&'),
- 'sudo', 'chmod', '0755', '/etc/ceph', run.Raw('&&'),
- 'sudo', 'python',
- '-c',
- 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
- conf_path,
- run.Raw('&&'),
- 'sudo', 'chmod', '0644', conf_path,
- ],
- stdin=run.PIPE,
- wait=False,
- )
- teuthology.feed_many_stdins_and_close(conf_fp, writes)
- run.wait(writes)
-
- log.info('Creating admin key on %s...' % firstmon)
- ctx.cluster.only(firstmon).run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- '--gen-key',
- '--name=client.admin',
- '--set-uid=0',
- '--cap', 'mon', 'allow *',
- '--cap', 'osd', 'allow *',
- '--cap', 'mds', 'allow',
- keyring_path,
- ],
- )
-
- log.info('Copying monmap to all nodes...')
- keyring = teuthology.get_file(
- remote=mon0_remote,
- path=keyring_path,
- )
- monmap = teuthology.get_file(
- remote=mon0_remote,
- path='{tdir}/monmap'.format(tdir=testdir),
- )
-
- for rem in ctx.cluster.remotes.iterkeys():
- # copy mon key and initial monmap
- log.info('Sending monmap to node {remote}'.format(remote=rem))
- teuthology.sudo_write_file(
- remote=rem,
- path=keyring_path,
- data=keyring,
- perms='0644'
- )
- teuthology.write_file(
- remote=rem,
- path='{tdir}/monmap'.format(tdir=testdir),
- data=monmap,
- )
-
- log.info('Setting up mon nodes...')
- mons = ctx.cluster.only(teuthology.is_type('mon'))
- run.wait(
- mons.run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'osdmaptool',
- '-c', conf_path,
- '--clobber',
- '--createsimple', '{num:d}'.format(
- num=teuthology.num_instances_of_type(ctx.cluster, 'osd'),
- ),
- '{tdir}/osdmap'.format(tdir=testdir),
- '--pg_bits', '2',
- '--pgp_bits', '4',
- ],
- wait=False,
- ),
- )
-
- log.info('Setting up mds nodes...')
- mdss = ctx.cluster.only(teuthology.is_type('mds'))
- for remote, roles_for_host in mdss.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, 'mds'):
- remote.run(
- args=[
- 'sudo',
- 'mkdir',
- '-p',
- '/var/lib/ceph/mds/ceph-{id}'.format(id=id_),
- run.Raw('&&'),
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- '--create-keyring',
- '--gen-key',
- '--name=mds.{id}'.format(id=id_),
- '/var/lib/ceph/mds/ceph-{id}/keyring'.format(id=id_),
- ],
- )
-
- cclient.create_keyring(ctx)
- log.info('Running mkfs on osd nodes...')
-
- ctx.disk_config = argparse.Namespace()
- ctx.disk_config.remote_to_roles_to_dev = remote_to_roles_to_devs
- ctx.disk_config.remote_to_roles_to_journals = remote_to_roles_to_journals
- ctx.disk_config.remote_to_roles_to_dev_mount_options = {}
- ctx.disk_config.remote_to_roles_to_dev_fstype = {}
-
- log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(r=str(ctx.disk_config.remote_to_roles_to_dev)))
- for remote, roles_for_host in osds.remotes.iteritems():
- roles_to_devs = remote_to_roles_to_devs[remote]
- roles_to_journals = remote_to_roles_to_journals[remote]
-
-
- for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
- remote.run(
- args=[
- 'sudo',
- 'mkdir',
- '-p',
- '/var/lib/ceph/osd/ceph-{id}'.format(id=id_),
- ])
- log.info(str(roles_to_journals))
- log.info(id_)
- if roles_to_devs.get(id_):
- dev = roles_to_devs[id_]
- fs = config.get('fs')
- package = None
- mkfs_options = config.get('mkfs_options')
- mount_options = config.get('mount_options')
- if fs == 'btrfs':
- #package = 'btrfs-tools'
- if mount_options is None:
- mount_options = ['noatime','user_subvol_rm_allowed']
- if mkfs_options is None:
- mkfs_options = ['-m', 'single',
- '-l', '32768',
- '-n', '32768']
- if fs == 'xfs':
- #package = 'xfsprogs'
- if mount_options is None:
- mount_options = ['noatime']
- if mkfs_options is None:
- mkfs_options = ['-f', '-i', 'size=2048']
- if fs == 'ext4' or fs == 'ext3':
- if mount_options is None:
- mount_options = ['noatime','user_xattr']
-
- if mount_options is None:
- mount_options = []
- if mkfs_options is None:
- mkfs_options = []
- mkfs = ['mkfs.%s' % fs] + mkfs_options
- log.info('%s on %s on %s' % (mkfs, dev, remote))
- if package is not None:
- remote.run(
- args=[
- 'sudo',
- 'apt-get', 'install', '-y', package
- ],
- stdout=StringIO(),
- )
-
- try:
- remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
- except run.CommandFailedError:
- # Newer btfs-tools doesn't prompt for overwrite, use -f
- if '-f' not in mount_options:
- mkfs_options.append('-f')
- mkfs = ['mkfs.%s' % fs] + mkfs_options
- log.info('%s on %s on %s' % (mkfs, dev, remote))
- remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
-
- log.info('mount %s on %s -o %s' % (dev, remote,
- ','.join(mount_options)))
- remote.run(
- args=[
- 'sudo',
- 'mount',
- '-t', fs,
- '-o', ','.join(mount_options),
- dev,
- os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)),
- ]
- )
- if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options:
- ctx.disk_config.remote_to_roles_to_dev_mount_options[remote] = {}
- ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][id_] = mount_options
- if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype:
- ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {}
- ctx.disk_config.remote_to_roles_to_dev_fstype[remote][id_] = fs
- devs_to_clean[remote].append(
- os.path.join(
- os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=id_)),
- )
- )
-
- for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
- remote.run(
- args=[
- 'sudo',
- 'MALLOC_CHECK_=3',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph-osd',
- '--mkfs',
- '--mkkey',
- '-i', id_,
- '--monmap', '{tdir}/monmap'.format(tdir=testdir),
- ],
- )
-
-
- log.info('Reading keys from all nodes...')
- keys_fp = StringIO()
- keys = []
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
- for type_ in ['mds','osd']:
- for id_ in teuthology.roles_of_type(roles_for_host, type_):
- data = teuthology.get_file(
- remote=remote,
- path='/var/lib/ceph/{type}/ceph-{id}/keyring'.format(
- type=type_,
- id=id_,
- ),
- sudo=True,
- )
- keys.append((type_, id_, data))
- keys_fp.write(data)
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
- for type_ in ['client']:
- for id_ in teuthology.roles_of_type(roles_for_host, type_):
- data = teuthology.get_file(
- remote=remote,
- path='/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
- )
- keys.append((type_, id_, data))
- keys_fp.write(data)
-
- log.info('Adding keys to all mons...')
- writes = mons.run(
- args=[
- 'sudo', 'tee', '-a',
- keyring_path,
- ],
- stdin=run.PIPE,
- wait=False,
- stdout=StringIO(),
- )
- keys_fp.seek(0)
- teuthology.feed_many_stdins_and_close(keys_fp, writes)
- run.wait(writes)
- for type_, id_, data in keys:
- run.wait(
- mons.run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- keyring_path,
- '--name={type}.{id}'.format(
- type=type_,
- id=id_,
- ),
- ] + list(teuthology.generate_caps(type_)),
- wait=False,
- ),
- )
-
- log.info('Running mkfs on mon nodes...')
- for remote, roles_for_host in mons.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, 'mon'):
- remote.run(
- args=[
- 'sudo',
- 'mkdir',
- '-p',
- '/var/lib/ceph/mon/ceph-{id}'.format(id=id_),
- ],
- )
- remote.run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph-mon',
- '--mkfs',
- '-i', id_,
- '--monmap={tdir}/monmap'.format(tdir=testdir),
- '--osdmap={tdir}/osdmap'.format(tdir=testdir),
- '--keyring={kpath}'.format(kpath=keyring_path),
- ],
- )
-
-
- run.wait(
- mons.run(
- args=[
- 'rm',
- '--',
- '{tdir}/monmap'.format(tdir=testdir),
- '{tdir}/osdmap'.format(tdir=testdir),
- ],
- wait=False,
- ),
- )
-
- try:
- yield
- except Exception:
- # we need to know this below
- ctx.summary['success'] = False
- raise
- finally:
- (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
-
- log.info('Checking cluster log for badness...')
- def first_in_ceph_log(pattern, excludes):
- """
- Find the first occurence of the pattern specified in the Ceph log,
- Returns None if none found.
-
- :param pattern: Pattern scanned for.
- :param excludes: Patterns to ignore.
- :return: First line of text (or None if not found)
- """
- args = [
- 'sudo',
- 'egrep', pattern,
- '/var/log/ceph/ceph.log',
- ]
- for exclude in excludes:
- args.extend([run.Raw('|'), 'egrep', '-v', exclude])
- args.extend([
- run.Raw('|'), 'head', '-n', '1',
- ])
- r = mon0_remote.run(
- stdout=StringIO(),
- args=args,
- )
- stdout = r.stdout.getvalue()
- if stdout != '':
- return stdout
- return None
-
- if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]',
- config['log_whitelist']) is not None:
- log.warning('Found errors (ERR|WRN|SEC) in cluster log')
- ctx.summary['success'] = False
- # use the most severe problem as the failure reason
- if 'failure_reason' not in ctx.summary:
- for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']:
- match = first_in_ceph_log(pattern, config['log_whitelist'])
- if match is not None:
- ctx.summary['failure_reason'] = \
- '"{match}" in cluster log'.format(
- match=match.rstrip('\n'),
- )
- break
-
- for remote, dirs in devs_to_clean.iteritems():
- for dir_ in dirs:
- log.info('Unmounting %s on %s' % (dir_, remote))
- remote.run(
- args=[
- 'sync',
- run.Raw('&&'),
- 'sudo',
- 'umount',
- '-f',
- dir_
- ]
- )
-
- if config.get('tmpfs_journal'):
- log.info('tmpfs journal enabled - unmounting tmpfs at /mnt')
- for remote, roles_for_host in osds.remotes.iteritems():
- remote.run(
- args=[ 'sudo', 'umount', '-f', '/mnt' ],
- check_status=False,
- )
-
- if ctx.archive is not None and \
- not (ctx.config.get('archive-on-error') and ctx.summary['success']):
- # archive mon data, too
- log.info('Archiving mon data...')
- path = os.path.join(ctx.archive, 'data')
- os.makedirs(path)
- for remote, roles in mons.remotes.iteritems():
- for role in roles:
- if role.startswith('mon.'):
- teuthology.pull_directory_tarball(
- remote,
- '/var/lib/ceph/mon',
- path + '/' + role + '.tgz')
-
- # and logs
- log.info('Compressing logs...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'sudo',
- 'find',
- '/var/log/ceph',
- '-name',
- '*.log',
- '-print0',
- run.Raw('|'),
- 'sudo',
- 'xargs',
- '-0',
- '--no-run-if-empty',
- '--',
- 'gzip',
- '--',
- ],
- wait=False,
- ),
- )
-
- log.info('Archiving logs...')
- path = os.path.join(ctx.archive, 'remote')
- os.makedirs(path)
- for remote in ctx.cluster.remotes.iterkeys():
- sub = os.path.join(path, remote.shortname)
- os.makedirs(sub)
- teuthology.pull_directory(remote, '/var/log/ceph',
- os.path.join(sub, 'log'))
-
-
- log.info('Cleaning ceph cluster...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'sudo',
- 'rm',
- '-rf',
- '--',
- conf_path,
- keyring_path,
- '{tdir}/data'.format(tdir=testdir),
- '{tdir}/monmap'.format(tdir=testdir),
- ],
- wait=False,
- ),
- )
-
-def get_all_pg_info(rem_site, testdir):
- """
- Get the results of a ceph pg dump
- """
- info = rem_site.run(args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'ceph', 'pg', 'dump',
- '--format', 'json'], stdout=StringIO())
- all_info = json.loads(info.stdout.getvalue())
- return all_info['pg_stats']
-
-def osd_scrub_pgs(ctx, config):
- """
- Scrub pgs when we exit.
-
- First make sure all pgs are active and clean.
- Next scrub all osds.
- Then periodically check until all pgs have scrub time stamps that
- indicate the last scrub completed. Time out if no progess is made
- here after two minutes.
- """
- retries = 12
- delays = 10
- vlist = ctx.cluster.remotes.values()
- testdir = teuthology.get_testdir(ctx)
- rem_site = ctx.cluster.remotes.keys()[0]
- all_clean = False
- for _ in range(0, retries):
- stats = get_all_pg_info(rem_site, testdir)
- states = [stat['state'] for stat in stats]
- if len(set(states)) == 1 and states[0] == 'active+clean':
- all_clean = True
- break
- log.info("Waiting for all osds to be active and clean.")
- time.sleep(delays)
- if not all_clean:
- log.info("Scrubbing terminated -- not all pgs were active and clean.")
- return
- check_time_now = time.localtime()
- time.sleep(1)
- for slists in vlist:
- for role in slists:
- if role.startswith('osd.'):
- log.info("Scrubbing osd {osd}".format(osd=role))
- rem_site.run(args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'ceph', 'osd', 'scrub', role])
- prev_good = 0
- gap_cnt = 0
- loop = True
- while loop:
- stats = get_all_pg_info(rem_site, testdir)
- timez = [stat['last_scrub_stamp'] for stat in stats]
- loop = False
- thiscnt = 0
- for tmval in timez:
- pgtm = time.strptime(tmval[0:tmval.find('.')], '%Y-%m-%d %H:%M:%S')
- if pgtm > check_time_now:
- thiscnt += 1
- else:
- loop = True
- if thiscnt > prev_good:
- prev_good = thiscnt
- gap_cnt = 0
- else:
- gap_cnt += 1
- if gap_cnt > retries:
- log.info('Exiting scrub checking -- not all pgs scrubbed.')
- return
- if loop:
- log.info('Still waiting for all pgs to be scrubbed.')
- time.sleep(delays)
-
-@contextlib.contextmanager
-def run_daemon(ctx, config, type_):
- """
- Run daemons for a role type. Handle the startup and termination of a a daemon.
- On startup -- set coverages, cpu_profile, valgrind values for all remotes,
- and a max_mds value for one mds.
- On cleanup -- Stop all existing daemons of this type.
-
- :param ctx: Context
- :param config: Configuration
- :paran type_: Role type
- """
- log.info('Starting %s daemons...' % type_)
- testdir = teuthology.get_testdir(ctx)
- daemons = ctx.cluster.only(teuthology.is_type(type_))
-
- # check whether any daemons if this type are configured
- if daemons is None:
- return
- coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
-
- daemon_signal = 'kill'
- if config.get('coverage') or config.get('valgrind') is not None:
- daemon_signal = 'term'
-
- num_active = 0
- for remote, roles_for_host in daemons.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, type_):
- name = '%s.%s' % (type_, id_)
-
- if not (id_.endswith('-s')) and (id_.find('-s-') == -1):
- num_active += 1
-
- run_cmd = [
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'daemon-helper',
- daemon_signal,
- ]
- run_cmd_tail = [
- 'ceph-%s' % (type_),
- '-f',
- '-i', id_]
-
- if type_ in config.get('cpu_profile', []):
- profile_path = '/var/log/ceph/profiling-logger/%s.%s.prof' % (type_, id_)
- run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ])
-
- if config.get('valgrind') is not None:
- valgrind_args = None
- if type_ in config['valgrind']:
- valgrind_args = config['valgrind'][type_]
- if name in config['valgrind']:
- valgrind_args = config['valgrind'][name]
- run_cmd = teuthology.get_valgrind_args(testdir, name,
- run_cmd,
- valgrind_args)
-
- run_cmd.extend(run_cmd_tail)
-
- ctx.daemons.add_daemon(remote, type_, id_,
- args=run_cmd,
- logger=log.getChild(name),
- stdin=run.PIPE,
- wait=False,
- )
-
- if type_ == 'mds':
- firstmon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
-
- mon0_remote.run(args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph',
- 'mds', 'set_max_mds', str(num_active)])
-
- try:
- yield
- finally:
- teuthology.stop_daemons_of_type(ctx, type_)
-
-def healthy(ctx, config):
- """
- Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK.
-
- :param ctx: Context
- :param config: Configuration
- """
- log.info('Waiting until ceph is healthy...')
- firstmon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
- teuthology.wait_until_osds_up(
- ctx,
- cluster=ctx.cluster,
- remote=mon0_remote
- )
- teuthology.wait_until_healthy(
- ctx,
- remote=mon0_remote,
- )
-
-def wait_for_osds_up(ctx, config):
- """
- Wait for all osd's to come up.
-
- :param ctx: Context
- :param config: Configuration
- """
- log.info('Waiting until ceph osds are all up...')
- firstmon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
- teuthology.wait_until_osds_up(
- ctx,
- cluster=ctx.cluster,
- remote=mon0_remote
- )
-
-def wait_for_mon_quorum(ctx, config):
- """
- Check renote ceph status until all monitors are up.
-
- :param ctx: Context
- :param config: Configuration
- """
-
- assert isinstance(config, list)
- firstmon = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(firstmon).remotes.keys()
- while True:
- r = remote.run(
- args=[
- 'ceph',
- 'quorum_status',
- ],
- stdout=StringIO(),
- logger=log.getChild('quorum_status'),
- )
- j = json.loads(r.stdout.getvalue())
- q = j.get('quorum_names', [])
- log.debug('Quorum: %s', q)
- if sorted(q) == sorted(config):
- break
- time.sleep(1)
-
-
-@contextlib.contextmanager
-def restart(ctx, config):
- """
- restart ceph daemons
-
- For example::
- tasks:
- - ceph.restart: [all]
-
- For example::
- tasks:
- - ceph.restart: [osd.0, mon.1]
-
- or::
-
- tasks:
- - ceph.restart:
- daemons: [osd.0, mon.1]
- wait-for-healthy: false
- wait-for-osds-up: true
-
- :param ctx: Context
- :param config: Configuration
- """
- if config is None:
- config = {}
- if isinstance(config, list):
- config = { 'daemons': config }
- if 'daemons' not in config:
- config['daemons'] = []
- type_daemon = ['mon', 'osd', 'mds', 'rgw']
- for d in type_daemon:
- type_ = d
- for daemon in ctx.daemons.iter_daemons_of_role(type_):
- config['daemons'].append(type_ + '.' + daemon.id_)
-
- assert isinstance(config['daemons'], list)
- daemons = dict.fromkeys(config['daemons'])
- for i in daemons.keys():
- type_ = i.split('.', 1)[0]
- id_ = i.split('.', 1)[1]
- ctx.daemons.get_daemon(type_, id_).stop()
- ctx.daemons.get_daemon(type_, id_).restart()
-
- if config.get('wait-for-healthy', True):
- healthy(ctx=ctx, config=None)
- if config.get('wait-for-osds-up', False):
- wait_for_osds_up(ctx=ctx, config=None)
- yield
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Set up and tear down a Ceph cluster.
-
- For example::
-
- tasks:
- - ceph:
- - interactive:
-
- You can also specify what branch to run::
-
- tasks:
- - ceph:
- branch: foo
-
- Or a tag::
-
- tasks:
- - ceph:
- tag: v0.42.13
-
- Or a sha1::
-
- tasks:
- - ceph:
- sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed
-
- Or a local source dir::
-
- tasks:
- - ceph:
- path: /home/sage/ceph
-
- To capture code coverage data, use::
-
- tasks:
- - ceph:
- coverage: true
-
- To use btrfs, ext4, or xfs on the target's scratch disks, use::
-
- tasks:
- - ceph:
- fs: xfs
- mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1]
- mount_options: [nobarrier, inode64]
-
- Note, this will cause the task to check the /scratch_devs file on each node
- for available devices. If no such file is found, /dev/sdb will be used.
-
- To run some daemons under valgrind, include their names
- and the tool/args to use in a valgrind section::
-
- tasks:
- - ceph:
- valgrind:
- mds.1: --tool=memcheck
- osd.1: [--tool=memcheck, --leak-check=no]
-
- Those nodes which are using memcheck or valgrind will get
- checked for bad results.
-
- To adjust or modify config options, use::
-
- tasks:
- - ceph:
- conf:
- section:
- key: value
-
- For example::
-
- tasks:
- - ceph:
- conf:
- mds.0:
- some option: value
- other key: other value
- client.0:
- debug client: 10
- debug ms: 1
-
- By default, the cluster log is checked for errors and warnings,
- and the run marked failed if any appear. You can ignore log
- entries by giving a list of egrep compatible regexes, i.e.:
-
- tasks:
- - ceph:
- log-whitelist: ['foo.*bar', 'bad message']
-
- :param ctx: Context
- :param config: Configuration
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- "task ceph only supports a dictionary for configuration"
-
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('ceph', {}))
-
- ctx.daemons = DaemonGroup()
-
- testdir = teuthology.get_testdir(ctx)
- if config.get('coverage'):
- coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
- log.info('Creating coverage directory...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'install', '-d', '-m0755', '--',
- coverage_dir,
- ],
- wait=False,
- )
- )
-
- with contextutil.nested(
- lambda: ceph_log(ctx=ctx, config=None),
- lambda: valgrind_post(ctx=ctx, config=config),
- lambda: cluster(ctx=ctx, config=dict(
- conf=config.get('conf', {}),
- fs=config.get('fs', None),
- mkfs_options=config.get('mkfs_options', None),
- mount_options=config.get('mount_options',None),
- block_journal=config.get('block_journal', None),
- tmpfs_journal=config.get('tmpfs_journal', None),
- log_whitelist=config.get('log-whitelist', []),
- cpu_profile=set(config.get('cpu_profile', [])),
- )),
- lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
- lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
- lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
- ):
- try:
- if config.get('wait-for-healthy', True):
- healthy(ctx=ctx, config=None)
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
- ctx.manager = CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
- yield
- finally:
- osd_scrub_pgs(ctx, config)
+++ /dev/null
-"""
-Set up client keyring
-"""
-import logging
-
-from teuthology import misc as teuthology
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-def create_keyring(ctx):
- """
- Set up key ring on remote sites
- """
- log.info('Setting up client nodes...')
- clients = ctx.cluster.only(teuthology.is_type('client'))
- testdir = teuthology.get_testdir(ctx)
- coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
- for remote, roles_for_host in clients.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
- client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
- remote.run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- '--create-keyring',
- '--gen-key',
- # TODO this --name= is not really obeyed, all unknown "types" are munged to "client"
- '--name=client.{id}'.format(id=id_),
- client_keyring,
- run.Raw('&&'),
- 'sudo',
- 'chmod',
- '0644',
- client_keyring,
- ],
- )
+++ /dev/null
-"""
-Execute ceph-deploy as a task
-"""
-from cStringIO import StringIO
-
-import contextlib
-import os
-import time
-import logging
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from teuthology.config import config as teuth_config
-from teuthology.task import install as install_fn
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def download_ceph_deploy(ctx, config):
- """
- Downloads ceph-deploy from the ceph.com git mirror and (by default)
- switches to the master branch. If the `ceph-deploy-branch` is specified, it
- will use that instead.
- """
- log.info('Downloading ceph-deploy...')
- testdir = teuthology.get_testdir(ctx)
- ceph_admin = teuthology.get_first_mon(ctx, config)
- default_cd_branch = {'ceph-deploy-branch': 'master'}
- ceph_deploy_branch = config.get(
- 'ceph-deploy',
- default_cd_branch).get('ceph-deploy-branch')
-
- ctx.cluster.only(ceph_admin).run(
- args=[
- 'git', 'clone', '-b', ceph_deploy_branch,
- teuth_config.ceph_git_base_url + 'ceph-deploy.git',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- ],
- )
- ctx.cluster.only(ceph_admin).run(
- args=[
- 'cd',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- run.Raw('&&'),
- './bootstrap',
- ],
- )
-
- try:
- yield
- finally:
- log.info('Removing ceph-deploy ...')
- ctx.cluster.only(ceph_admin).run(
- args=[
- 'rm',
- '-rf',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- ],
- )
-
-
-def is_healthy(ctx, config):
- """Wait until a Ceph cluster is healthy."""
- testdir = teuthology.get_testdir(ctx)
- ceph_admin = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
- max_tries = 90 # 90 tries * 10 secs --> 15 minutes
- tries = 0
- while True:
- tries += 1
- if tries >= max_tries:
- msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes"
- raise RuntimeError(msg)
-
- r = remote.run(
- args=[
- 'cd',
- '{tdir}'.format(tdir=testdir),
- run.Raw('&&'),
- 'sudo', 'ceph',
- 'health',
- ],
- stdout=StringIO(),
- logger=log.getChild('health'),
- )
- out = r.stdout.getvalue()
- log.debug('Ceph health: %s', out.rstrip('\n'))
- if out.split(None, 1)[0] == 'HEALTH_OK':
- break
- time.sleep(10)
-
-def get_nodes_using_roles(ctx, config, role):
- """Extract the names of nodes that match a given role from a cluster"""
- newl = []
- for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, role):
- rem = _remote
- if role == 'mon':
- req1 = str(rem).split('@')[-1]
- else:
- req = str(rem).split('.')[0]
- req1 = str(req).split('@')[1]
- newl.append(req1)
- return newl
-
-def get_dev_for_osd(ctx, config):
- """Get a list of all osd device names."""
- osd_devs = []
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
- host = remote.name.split('@')[-1]
- shortname = host.split('.')[0]
- devs = teuthology.get_scratch_devices(remote)
- num_osd_per_host = list(teuthology.roles_of_type(roles_for_host, 'osd'))
- num_osds = len(num_osd_per_host)
- assert num_osds <= len(devs), 'fewer disks than osds on ' + shortname
- for dev in devs[:num_osds]:
- dev_short = dev.split('/')[-1]
- osd_devs.append('{host}:{dev}'.format(host=shortname, dev=dev_short))
- return osd_devs
-
-def get_all_nodes(ctx, config):
- """Return a string of node names separated by blanks"""
- nodelist = []
- for t, k in ctx.config['targets'].iteritems():
- host = t.split('@')[-1]
- simple_host = host.split('.')[0]
- nodelist.append(simple_host)
- nodelist = " ".join(nodelist)
- return nodelist
-
-def execute_ceph_deploy(ctx, config, cmd):
- """Remotely execute a ceph_deploy command"""
- testdir = teuthology.get_testdir(ctx)
- ceph_admin = teuthology.get_first_mon(ctx, config)
- exec_cmd = cmd
- (remote,) = ctx.cluster.only(ceph_admin).remotes.iterkeys()
- proc = remote.run(
- args = [
- 'cd',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- run.Raw('&&'),
- run.Raw(exec_cmd),
- ],
- check_status=False,
- )
- exitstatus = proc.exitstatus
- return exitstatus
-
-
-@contextlib.contextmanager
-def build_ceph_cluster(ctx, config):
- """Build a ceph cluster"""
-
- try:
- log.info('Building ceph cluster using ceph-deploy...')
- testdir = teuthology.get_testdir(ctx)
- ceph_branch = None
- if config.get('branch') is not None:
- cbranch = config.get('branch')
- for var, val in cbranch.iteritems():
- if var == 'testing':
- ceph_branch = '--{var}'.format(var=var)
- ceph_branch = '--{var}={val}'.format(var=var, val=val)
- node_dev_list = []
- all_nodes = get_all_nodes(ctx, config)
- mds_nodes = get_nodes_using_roles(ctx, config, 'mds')
- mds_nodes = " ".join(mds_nodes)
- mon_node = get_nodes_using_roles(ctx, config, 'mon')
- mon_nodes = " ".join(mon_node)
- new_mon = './ceph-deploy new'+" "+mon_nodes
- install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes
- purge_nodes = './ceph-deploy purge'+" "+all_nodes
- purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
- mon_hostname = mon_nodes.split(' ')[0]
- mon_hostname = str(mon_hostname)
- gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname
- deploy_mds = './ceph-deploy mds create'+" "+mds_nodes
- no_of_osds = 0
-
- if mon_nodes is None:
- raise RuntimeError("no monitor nodes in the config file")
-
- estatus_new = execute_ceph_deploy(ctx, config, new_mon)
- if estatus_new != 0:
- raise RuntimeError("ceph-deploy: new command failed")
-
- log.info('adding config inputs...')
- testdir = teuthology.get_testdir(ctx)
- conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
- first_mon = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(first_mon).remotes.keys()
-
- lines = None
- if config.get('conf') is not None:
- confp = config.get('conf')
- for section, keys in confp.iteritems():
- lines = '[{section}]\n'.format(section=section)
- teuthology.append_lines_to_file(remote, conf_path, lines,
- sudo=True)
- for key, value in keys.iteritems():
- log.info("[%s] %s = %s" % (section, key, value))
- lines = '{key} = {value}\n'.format(key=key, value=value)
- teuthology.append_lines_to_file(remote, conf_path, lines,
- sudo=True)
-
- estatus_install = execute_ceph_deploy(ctx, config, install_nodes)
- if estatus_install != 0:
- raise RuntimeError("ceph-deploy: Failed to install ceph")
-
- mon_create_nodes = './ceph-deploy mon create-initial'
- # If the following fails, it is OK, it might just be that the monitors
- # are taking way more than a minute/monitor to form quorum, so lets
- # try the next block which will wait up to 15 minutes to gatherkeys.
- execute_ceph_deploy(ctx, config, mon_create_nodes)
-
- estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
- max_gather_tries = 90
- gather_tries = 0
- while (estatus_gather != 0):
- gather_tries += 1
- if gather_tries >= max_gather_tries:
- msg = 'ceph-deploy was not able to gatherkeys after 15 minutes'
- raise RuntimeError(msg)
- estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
- time.sleep(10)
-
- if mds_nodes:
- estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds)
- if estatus_mds != 0:
- raise RuntimeError("ceph-deploy: Failed to deploy mds")
-
- if config.get('test_mon_destroy') is not None:
- for d in range(1, len(mon_node)):
- mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d]
- estatus_mon_d = execute_ceph_deploy(ctx, config,
- mon_destroy_nodes)
- if estatus_mon_d != 0:
- raise RuntimeError("ceph-deploy: Failed to delete monitor")
-
- node_dev_list = get_dev_for_osd(ctx, config)
- for d in node_dev_list:
- osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d
- estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
- if estatus_osd == 0:
- log.info('successfully created osd')
- no_of_osds += 1
- else:
- zap_disk = './ceph-deploy disk zap'+" "+d
- execute_ceph_deploy(ctx, config, zap_disk)
- estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
- if estatus_osd == 0:
- log.info('successfully created osd')
- no_of_osds += 1
- else:
- raise RuntimeError("ceph-deploy: Failed to create osds")
-
- if config.get('wait-for-healthy', True) and no_of_osds >= 2:
- is_healthy(ctx=ctx, config=None)
-
- log.info('Setting up client nodes...')
- conf_path = '/etc/ceph/ceph.conf'
- admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
- conf_data = teuthology.get_file(
- remote=mon0_remote,
- path=conf_path,
- sudo=True,
- )
- admin_keyring = teuthology.get_file(
- remote=mon0_remote,
- path=admin_keyring_path,
- sudo=True,
- )
-
- clients = ctx.cluster.only(teuthology.is_type('client'))
- for remot, roles_for_host in clients.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
- client_keyring = \
- '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
- mon0_remote.run(
- args=[
- 'cd',
- '{tdir}'.format(tdir=testdir),
- run.Raw('&&'),
- 'sudo', 'bash', '-c',
- run.Raw('"'), 'ceph',
- 'auth',
- 'get-or-create',
- 'client.{id}'.format(id=id_),
- 'mds', 'allow',
- 'mon', 'allow *',
- 'osd', 'allow *',
- run.Raw('>'),
- client_keyring,
- run.Raw('"'),
- ],
- )
- key_data = teuthology.get_file(
- remote=mon0_remote,
- path=client_keyring,
- sudo=True,
- )
- teuthology.sudo_write_file(
- remote=remot,
- path=client_keyring,
- data=key_data,
- perms='0644'
- )
- teuthology.sudo_write_file(
- remote=remot,
- path=admin_keyring_path,
- data=admin_keyring,
- perms='0644'
- )
- teuthology.sudo_write_file(
- remote=remot,
- path=conf_path,
- data=conf_data,
- perms='0644'
- )
- else:
- raise RuntimeError(
- "The cluster is NOT operational due to insufficient OSDs")
- yield
-
- finally:
- log.info('Stopping ceph...')
- ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
- 'sudo', 'service', 'ceph', 'stop' ])
-
- # Are you really not running anymore?
- # try first with the init tooling
- # ignoring the status so this becomes informational only
- ctx.cluster.run(args=['sudo', 'status', 'ceph-all', run.Raw('||'),
- 'sudo', 'service', 'ceph', 'status'],
- check_status=False)
-
- # and now just check for the processes themselves, as if upstart/sysvinit
- # is lying to us. Ignore errors if the grep fails
- ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'),
- 'grep', '-v', 'grep', run.Raw('|'),
- 'grep', 'ceph'], check_status=False)
-
- if ctx.archive is not None:
- # archive mon data, too
- log.info('Archiving mon data...')
- path = os.path.join(ctx.archive, 'data')
- os.makedirs(path)
- mons = ctx.cluster.only(teuthology.is_type('mon'))
- for remote, roles in mons.remotes.iteritems():
- for role in roles:
- if role.startswith('mon.'):
- teuthology.pull_directory_tarball(
- remote,
- '/var/lib/ceph/mon',
- path + '/' + role + '.tgz')
-
- log.info('Compressing logs...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'sudo',
- 'find',
- '/var/log/ceph',
- '-name',
- '*.log',
- '-print0',
- run.Raw('|'),
- 'sudo',
- 'xargs',
- '-0',
- '--no-run-if-empty',
- '--',
- 'gzip',
- '--',
- ],
- wait=False,
- ),
- )
-
- log.info('Archiving logs...')
- path = os.path.join(ctx.archive, 'remote')
- os.makedirs(path)
- for remote in ctx.cluster.remotes.iterkeys():
- sub = os.path.join(path, remote.shortname)
- os.makedirs(sub)
- teuthology.pull_directory(remote, '/var/log/ceph',
- os.path.join(sub, 'log'))
-
- # Prevent these from being undefined if the try block fails
- all_nodes = get_all_nodes(ctx, config)
- purge_nodes = './ceph-deploy purge'+" "+all_nodes
- purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
-
- log.info('Purging package...')
- execute_ceph_deploy(ctx, config, purge_nodes)
- log.info('Purging data...')
- execute_ceph_deploy(ctx, config, purgedata_nodes)
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Set up and tear down a Ceph cluster.
-
- For example::
-
- tasks:
- - install:
- extras: yes
- - ssh_keys:
- - ceph-deploy:
- branch:
- stable: bobtail
- mon_initial_members: 1
-
- tasks:
- - install:
- extras: yes
- - ssh_keys:
- - ceph-deploy:
- branch:
- dev: master
- conf:
- mon:
- debug mon = 20
-
- tasks:
- - install:
- extras: yes
- - ssh_keys:
- - ceph-deploy:
- branch:
- testing:
- """
- if config is None:
- config = {}
-
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
-
- assert isinstance(config, dict), \
- "task ceph-deploy only supports a dictionary for configuration"
-
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
-
- if config.get('branch') is not None:
- assert isinstance(config['branch'], dict), 'branch must be a dictionary'
-
- with contextutil.nested(
- lambda: install_fn.ship_utilities(ctx=ctx, config=None),
- lambda: download_ceph_deploy(ctx=ctx, config=config),
- lambda: build_ceph_cluster(ctx=ctx, config=dict(
- conf=config.get('conf', {}),
- branch=config.get('branch',{}),
- mon_initial_members=config.get('mon_initial_members', None),
- test_mon_destroy=config.get('test_mon_destroy', None),
- )),
- ):
- yield
+++ /dev/null
-"""
-Ceph FUSE client task
-"""
-import contextlib
-import logging
-import os
-import time
-from cStringIO import StringIO
-
-from teuthology import misc
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Mount/unmount a ``ceph-fuse`` client.
-
- The config is optional and defaults to mounting on all clients. If
- a config is given, it is expected to be a list of clients to do
- this operation on. This lets you e.g. set up one client with
- ``ceph-fuse`` and another with ``kclient``.
-
- Example that mounts all clients::
-
- tasks:
- - ceph:
- - ceph-fuse:
- - interactive:
-
- Example that uses both ``kclient` and ``ceph-fuse``::
-
- tasks:
- - ceph:
- - ceph-fuse: [client.0]
- - kclient: [client.1]
- - interactive:
-
- Example that enables valgrind:
-
- tasks:
- - ceph:
- - ceph-fuse:
- client.0:
- valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
- - interactive:
-
- :param ctx: Context
- :param config: Configuration
- """
- log.info('Mounting ceph-fuse clients...')
- fuse_daemons = {}
-
- testdir = misc.get_testdir(ctx)
-
- if config is None:
- config = dict(('client.{id}'.format(id=id_), None)
- for id_ in misc.all_roles_of_type(ctx.cluster, 'client'))
- elif isinstance(config, list):
- config = dict((name, None) for name in config)
-
- overrides = ctx.config.get('overrides', {})
- misc.deep_merge(config, overrides.get('ceph-fuse', {}))
-
- clients = list(misc.get_clients(ctx=ctx, roles=config.keys()))
-
- for id_, remote in clients:
- client_config = config.get("client.%s" % id_)
- if client_config is None:
- client_config = {}
- log.info("Client client.%s config is %s" % (id_, client_config))
-
- daemon_signal = 'kill'
- if client_config.get('coverage') or client_config.get('valgrind') is not None:
- daemon_signal = 'term'
-
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
- id=id_, remote=remote,mnt=mnt))
-
- remote.run(
- args=[
- 'mkdir',
- '--',
- mnt,
- ],
- )
-
- run_cmd=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'daemon-helper',
- daemon_signal,
- ]
- run_cmd_tail=[
- 'ceph-fuse',
- '-f',
- '--name', 'client.{id}'.format(id=id_),
- # TODO ceph-fuse doesn't understand dash dash '--',
- mnt,
- ]
-
- if client_config.get('valgrind') is not None:
- run_cmd = misc.get_valgrind_args(
- testdir,
- 'client.{id}'.format(id=id_),
- run_cmd,
- client_config.get('valgrind'),
- )
-
- run_cmd.extend(run_cmd_tail)
-
- proc = remote.run(
- args=run_cmd,
- logger=log.getChild('ceph-fuse.{id}'.format(id=id_)),
- stdin=run.PIPE,
- wait=False,
- )
- fuse_daemons[id_] = proc
-
- for id_, remote in clients:
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- wait_until_fuse_mounted(
- remote=remote,
- fuse=fuse_daemons[id_],
- mountpoint=mnt,
- )
- remote.run(args=['sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_)],)
-
- try:
- yield
- finally:
- log.info('Unmounting ceph-fuse clients...')
- for id_, remote in clients:
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- try:
- remote.run(
- args=[
- 'sudo',
- 'fusermount',
- '-u',
- mnt,
- ],
- )
- except run.CommandFailedError:
- log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=remote.name))
- # abort the fuse mount, killing all hung processes
- remote.run(
- args=[
- 'if', 'test', '-e', '/sys/fs/fuse/connections/*/abort',
- run.Raw(';'), 'then',
- 'echo',
- '1',
- run.Raw('>'),
- run.Raw('/sys/fs/fuse/connections/*/abort'),
- run.Raw(';'), 'fi',
- ],
- )
- # make sure its unmounted
- remote.run(
- args=[
- 'sudo',
- 'umount',
- '-l',
- '-f',
- mnt,
- ],
- )
-
- run.wait(fuse_daemons.itervalues())
-
- for id_, remote in clients:
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- remote.run(
- args=[
- 'rmdir',
- '--',
- mnt,
- ],
- )
-
-
-def wait_until_fuse_mounted(remote, fuse, mountpoint):
- while True:
- proc = remote.run(
- args=[
- 'stat',
- '--file-system',
- '--printf=%T\n',
- '--',
- mountpoint,
- ],
- stdout=StringIO(),
- )
- fstype = proc.stdout.getvalue().rstrip('\n')
- if fstype == 'fuseblk':
- break
- log.debug('ceph-fuse not yet mounted, got fs type {fstype!r}'.format(fstype=fstype))
-
- # it shouldn't have exited yet; exposes some trivial problems
- assert not fuse.poll()
-
- time.sleep(5)
- log.info('ceph-fuse is mounted on %s', mountpoint)
+++ /dev/null
-"""
-ceph manager -- Thrasher and CephManager objects
-"""
-from cStringIO import StringIO
-import random
-import time
-import gevent
-import json
-import logging
-import threading
-import os
-from teuthology import misc as teuthology
-from tasks.scrub import Scrubber
-from teuthology.orchestra.remote import Remote
-
-log = logging.getLogger(__name__)
-
-def make_admin_daemon_dir(ctx, remote):
- """
- Create /var/run/ceph directory on remote site.
-
- :param ctx: Context
- :param remote: Remote site
- """
- remote.run(
- args=[
- 'sudo',
- 'install', '-d', '-m0777', '--', '/var/run/ceph',
- ],
- )
-
-
-def mount_osd_data(ctx, remote, osd):
- """
- Mount a remote OSD
-
- :param ctx: Context
- :param remote: Remote site
- :param ods: Osd name
- """
- log.debug('Mounting data for osd.{o} on {r}'.format(o=osd, r=remote))
- if remote in ctx.disk_config.remote_to_roles_to_dev and osd in ctx.disk_config.remote_to_roles_to_dev[remote]:
- dev = ctx.disk_config.remote_to_roles_to_dev[remote][osd]
- mount_options = ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][osd]
- fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][osd]
- mnt = os.path.join('/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd))
-
- log.info('Mounting osd.{o}: dev: {n}, mountpoint: {p}, type: {t}, options: {v}'.format(
- o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options))
-
- remote.run(
- args=[
- 'sudo',
- 'mount',
- '-t', fstype,
- '-o', ','.join(mount_options),
- dev,
- mnt,
- ]
- )
-
-
-class Thrasher:
- """
- Object used to thrash Ceph
- """
- def __init__(self, manager, config, logger=None):
- self.ceph_manager = manager
- self.ceph_manager.wait_for_clean()
- osd_status = self.ceph_manager.get_osd_status()
- self.in_osds = osd_status['in']
- self.live_osds = osd_status['live']
- self.out_osds = osd_status['out']
- self.dead_osds = osd_status['dead']
- self.stopping = False
- self.logger = logger
- self.config = config
- self.revive_timeout = self.config.get("revive_timeout", 150)
- if self.config.get('powercycle'):
- self.revive_timeout += 120
- self.clean_wait = self.config.get('clean_wait', 0)
- self.minin = self.config.get("min_in", 3)
- self.chance_move_pg = self.config.get('chance_move_pg', 1.0)
-
- num_osds = self.in_osds + self.out_osds
- self.max_pgs = self.config.get("max_pgs_per_pool_osd", 1200) * num_osds
- if self.logger is not None:
- self.log = lambda x: self.logger.info(x)
- else:
- def tmp(x):
- """
- Implement log behavior
- """
- print x
- self.log = tmp
- if self.config is None:
- self.config = dict()
- # prevent monitor from auto-marking things out while thrasher runs
- # try both old and new tell syntax, in case we are testing old code
- try:
- manager.raw_cluster_cmd('--', 'tell', 'mon.*', 'injectargs',
- '--mon-osd-down-out-interval 0')
- except Exception:
- manager.raw_cluster_cmd('--', 'mon', 'tell', '*', 'injectargs',
- '--mon-osd-down-out-interval 0')
- self.thread = gevent.spawn(self.do_thrash)
- if self.config.get('powercycle') or not self.cmd_exists_on_osds("ceph-objectstore-tool"):
- self.ceph_objectstore_tool = False
- self.test_rm_past_intervals = False
- if self.config.get('powercycle'):
- self.log("Unable to test ceph-objectstore-tool, "
- "powercycle testing")
- else:
- self.log("Unable to test ceph-objectstore-tool, "
- "not available on all OSD nodes")
- else:
- self.ceph_objectstore_tool = \
- self.config.get('ceph_objectstore_tool', True)
- self.test_rm_past_intervals = \
- self.config.get('test_rm_past_intervals', True)
-
- def cmd_exists_on_osds(self, cmd):
- allremotes = self.ceph_manager.ctx.cluster.only(\
- teuthology.is_type('osd')).remotes.keys()
- allremotes = list(set(allremotes))
- for remote in allremotes:
- proc = remote.run(args=['type', cmd], wait=True,
- check_status=False, stdout=StringIO(),
- stderr=StringIO())
- if proc.exitstatus != 0:
- return False;
- return True;
-
- def kill_osd(self, osd=None, mark_down=False, mark_out=False):
- """
- :param osd: Osd to be killed.
- :mark_down: Mark down if true.
- :mark_out: Mark out if true.
- """
- if osd is None:
- osd = random.choice(self.live_osds)
- self.log("Killing osd %s, live_osds are %s" % (str(osd), str(self.live_osds)))
- self.live_osds.remove(osd)
- self.dead_osds.append(osd)
- self.ceph_manager.kill_osd(osd)
- if mark_down:
- self.ceph_manager.mark_down_osd(osd)
- if mark_out and osd in self.in_osds:
- self.out_osd(osd)
- if self.ceph_objectstore_tool:
- self.log("Testing ceph-objectstore-tool on down osd")
- (remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
- FSPATH = self.ceph_manager.get_filepath()
- JPATH = os.path.join(FSPATH, "journal")
- exp_osd = imp_osd = osd
- exp_remote = imp_remote = remote
- # If an older osd is available we'll move a pg from there
- if len(self.dead_osds) > 1 and random.random() < self.chance_move_pg:
- exp_osd = random.choice(self.dead_osds[:-1])
- (exp_remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=exp_osd)).remotes.iterkeys()
- if 'keyvaluestore_backend' in self.ceph_manager.ctx.ceph.conf['osd']:
- prefix = "sudo ceph-objectstore-tool --data-path {fpath} --journal-path {jpath} --type keyvaluestore-dev --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH)
- else:
- prefix = "sudo ceph-objectstore-tool --data-path {fpath} --journal-path {jpath} --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH)
- cmd = (prefix + "--op list-pgs").format(id=exp_osd)
- proc = exp_remote.run(args=cmd, wait=True,
- check_status=False, stdout=StringIO())
- if proc.exitstatus:
- raise Exception("ceph-objectstore-tool: exp list-pgs failure with status {ret}".format(ret=proc.exitstatus))
- pgs = proc.stdout.getvalue().split('\n')[:-1]
- if len(pgs) == 0:
- self.log("No PGs found for osd.{osd}".format(osd=exp_osd))
- return
- pg = random.choice(pgs)
- exp_path = os.path.join(os.path.join(teuthology.get_testdir(self.ceph_manager.ctx), "data"), "exp.{pg}.{id}".format(pg=pg, id=exp_osd))
- # export
- cmd = (prefix + "--op export --pgid {pg} --file {file}").format(id=exp_osd, pg=pg, file=exp_path)
- proc = exp_remote.run(args=cmd)
- if proc.exitstatus:
- raise Exception("ceph-objectstore-tool: export failure with status {ret}".format(ret=proc.exitstatus))
- # remove
- cmd = (prefix + "--op remove --pgid {pg}").format(id=exp_osd, pg=pg)
- proc = exp_remote.run(args=cmd)
- if proc.exitstatus:
- raise Exception("ceph-objectstore-tool: remove failure with status {ret}".format(ret=proc.exitstatus))
- # If there are at least 2 dead osds we might move the pg
- if exp_osd != imp_osd:
- # If pg isn't already on this osd, then we will move it there
- cmd = (prefix + "--op list-pgs").format(id=imp_osd)
- proc = imp_remote.run(args=cmd, wait=True,
- check_status=False, stdout=StringIO())
- if proc.exitstatus:
- raise Exception("ceph-objectstore-tool: imp list-pgs failure with status {ret}".format(ret=proc.exitstatus))
- pgs = proc.stdout.getvalue().split('\n')[:-1]
- if pg not in pgs:
- self.log("Moving pg {pg} from osd.{fosd} to osd.{tosd}".format(pg=pg, fosd=exp_osd, tosd=imp_osd))
- if imp_remote != exp_remote:
- # Copy export file to the other machine
- self.log("Transfer export file from {srem} to {trem}".format(srem=exp_remote, trem=imp_remote))
- tmpexport = Remote.get_file(exp_remote, exp_path)
- Remote.put_file(imp_remote, tmpexport, exp_path)
- os.remove(tmpexport)
- else:
- # Can't move the pg after all
- imp_osd = exp_osd
- imp_remote = exp_remote
- # import
- cmd = (prefix + "--op import --file {file}")
- cmd = cmd.format(id=imp_osd, file=exp_path)
- proc = imp_remote.run(args=cmd, wait=True, check_status=False)
- if proc.exitstatus == 10:
- self.log("Pool went away before processing an import"
- "...ignored")
- elif proc.exitstatus == 11:
- self.log("Attempt to import an incompatible export"
- "...ignored")
- elif proc.exitstatus:
- raise Exception("ceph-objectstore-tool: "
- "import failure with status {ret}".
- format(ret=proc.exitstatus))
- cmd = "rm -f {file}".format(file=exp_path)
- exp_remote.run(args=cmd)
- if imp_remote != exp_remote:
- imp_remote.run(args=cmd)
-
- def rm_past_intervals(self, osd=None):
- """
- :param osd: Osd to find pg to remove past intervals
- """
- if self.test_rm_past_intervals:
- if osd is None:
- osd = random.choice(self.dead_osds)
- self.log("Use ceph_objectstore_tool to remove past intervals")
- (remote,) = self.ceph_manager.ctx.\
- cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
- FSPATH = self.ceph_manager.get_filepath()
- JPATH = os.path.join(FSPATH, "journal")
- if ('keyvaluestore_backend' in
- self.ceph_manager.ctx.ceph.conf['osd']):
- prefix = ("sudo ceph-objectstore-tool "
- "--data-path {fpath} --journal-path {jpath} "
- "--type keyvaluestore-dev "
- "--log-file="
- "/var/log/ceph/objectstore_tool.\\$pid.log ".
- format(fpath=FSPATH, jpath=JPATH))
- else:
- prefix = ("sudo ceph-objectstore-tool "
- "--data-path {fpath} --journal-path {jpath} "
- "--log-file="
- "/var/log/ceph/objectstore_tool.\\$pid.log ".
- format(fpath=FSPATH, jpath=JPATH))
- cmd = (prefix + "--op list-pgs").format(id=osd)
- proc = remote.run(args=cmd, wait=True,
- check_status=False, stdout=StringIO())
- if proc.exitstatus:
- raise Exception("ceph_objectstore_tool: "
- "exp list-pgs failure with status {ret}".
- format(ret=proc.exitstatus))
- pgs = proc.stdout.getvalue().split('\n')[:-1]
- if len(pgs) == 0:
- self.log("No PGs found for osd.{osd}".format(osd=osd))
- return
- pg = random.choice(pgs)
- cmd = (prefix + "--op rm-past-intervals --pgid {pg}").\
- format(id=osd, pg=pg)
- proc = remote.run(args=cmd)
- if proc.exitstatus:
- raise Exception("ceph_objectstore_tool: "
- "rm-past-intervals failure with status {ret}".
- format(ret=proc.exitstatus))
-
- def blackhole_kill_osd(self, osd=None):
- """
- If all else fails, kill the osd.
- :param osd: Osd to be killed.
- """
- if osd is None:
- osd = random.choice(self.live_osds)
- self.log("Blackholing and then killing osd %s, live_osds are %s" % (str(osd), str(self.live_osds)))
- self.live_osds.remove(osd)
- self.dead_osds.append(osd)
- self.ceph_manager.blackhole_kill_osd(osd)
-
- def revive_osd(self, osd=None):
- """
- Revive the osd.
- :param osd: Osd to be revived.
- """
- if osd is None:
- osd = random.choice(self.dead_osds)
- self.log("Reviving osd %s" % (str(osd),))
- self.live_osds.append(osd)
- self.dead_osds.remove(osd)
- self.ceph_manager.revive_osd(osd, self.revive_timeout)
-
- def out_osd(self, osd=None):
- """
- Mark the osd out
- :param osd: Osd to be marked.
- """
- if osd is None:
- osd = random.choice(self.in_osds)
- self.log("Removing osd %s, in_osds are: %s" % (str(osd), str(self.in_osds)))
- self.ceph_manager.mark_out_osd(osd)
- self.in_osds.remove(osd)
- self.out_osds.append(osd)
-
- def in_osd(self, osd=None):
- """
- Mark the osd out
- :param osd: Osd to be marked.
- """
- if osd is None:
- osd = random.choice(self.out_osds)
- if osd in self.dead_osds:
- return self.revive_osd(osd)
- self.log("Adding osd %s" % (str(osd),))
- self.out_osds.remove(osd)
- self.in_osds.append(osd)
- self.ceph_manager.mark_in_osd(osd)
- self.log("Added osd %s"%(str(osd),))
-
- def reweight_osd(self, osd=None):
- """
- Reweight an osd that is in
- :param osd: Osd to be marked.
- """
- if osd is None:
- osd = random.choice(self.in_osds)
- val = random.uniform(.1, 1.0)
- self.log("Reweighting osd %s to %s" % (str(osd), str(val)))
- self.ceph_manager.raw_cluster_cmd('osd', 'reweight', str(osd), str(val))
-
- def primary_affinity(self, osd=None):
- if osd is None:
- osd = random.choice(self.in_osds)
- if random.random() >= .5:
- pa = random.random()
- elif random.random() >= .5:
- pa = 1
- else:
- pa = 0
- self.log('Setting osd %s primary_affinity to %f' % (str(osd), pa))
- self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity', str(osd), str(pa))
-
- def all_up(self):
- """
- Make sure all osds are up and not out.
- """
- while len(self.dead_osds) > 0:
- self.log("reviving osd")
- self.revive_osd()
- while len(self.out_osds) > 0:
- self.log("inning osd")
- self.in_osd()
-
- def do_join(self):
- """
- Break out of this Ceph loop
- """
- self.stopping = True
- self.thread.get()
-
- def grow_pool(self):
- """
- Increase the size of the pool
- """
- pool = self.ceph_manager.get_pool()
- self.log("Growing pool %s"%(pool,))
- self.ceph_manager.expand_pool(pool, self.config.get('pool_grow_by', 10), self.max_pgs)
-
- def fix_pgp_num(self):
- """
- Fix number of pgs in pool.
- """
- pool = self.ceph_manager.get_pool()
- self.log("fixing pg num pool %s"%(pool,))
- self.ceph_manager.set_pool_pgpnum(pool)
-
- def test_pool_min_size(self):
- """
- Kill and revive all osds except one.
- """
- self.log("test_pool_min_size")
- self.all_up()
- self.ceph_manager.wait_for_recovery(
- timeout=self.config.get('timeout')
- )
- the_one = random.choice(self.in_osds)
- self.log("Killing everyone but %s", the_one)
- to_kill = filter(lambda x: x != the_one, self.in_osds)
- [self.kill_osd(i) for i in to_kill]
- [self.out_osd(i) for i in to_kill]
- time.sleep(self.config.get("test_pool_min_size_time", 10))
- self.log("Killing %s" % (the_one,))
- self.kill_osd(the_one)
- self.out_osd(the_one)
- self.log("Reviving everyone but %s" % (the_one,))
- [self.revive_osd(i) for i in to_kill]
- [self.in_osd(i) for i in to_kill]
- self.log("Revived everyone but %s" % (the_one,))
- self.log("Waiting for clean")
- self.ceph_manager.wait_for_recovery(
- timeout=self.config.get('timeout')
- )
-
- def inject_pause(self, conf_key, duration, check_after, should_be_down):
- """
- Pause injection testing. Check for osd being down when finished.
- """
- the_one = random.choice(self.live_osds)
- self.log("inject_pause on {osd}".format(osd = the_one))
- self.log(
- "Testing {key} pause injection for duration {duration}".format(
- key = conf_key,
- duration = duration
- ))
- self.log(
- "Checking after {after}, should_be_down={shouldbedown}".format(
- after = check_after,
- shouldbedown = should_be_down
- ))
- self.ceph_manager.set_config(the_one, **{conf_key:duration})
- if not should_be_down:
- return
- time.sleep(check_after)
- status = self.ceph_manager.get_osd_status()
- assert the_one in status['down']
- time.sleep(duration - check_after + 20)
- status = self.ceph_manager.get_osd_status()
- assert not the_one in status['down']
-
- def test_backfill_full(self):
- """
- Test backfills stopping when the replica fills up.
-
- First, use osd_backfill_full_ratio to simulate a now full
- osd by setting it to 0 on all of the OSDs.
-
- Second, on a random subset, set
- osd_debug_skip_full_check_in_backfill_reservation to force
- the more complicated check in do_scan to be exercised.
-
- Then, verify that all backfills stop.
- """
- self.log("injecting osd_backfill_full_ratio = 0")
- for i in self.live_osds:
- self.ceph_manager.set_config(
- i,
- osd_debug_skip_full_check_in_backfill_reservation = random.choice(
- ['false', 'true']),
- osd_backfill_full_ratio = 0)
- for i in range(30):
- status = self.ceph_manager.compile_pg_status()
- if 'backfill' not in status.keys():
- break
- self.log(
- "waiting for {still_going} backfills".format(
- still_going=status.get('backfill')))
- time.sleep(1)
- assert('backfill' not in self.ceph_manager.compile_pg_status().keys())
- for i in self.live_osds:
- self.ceph_manager.set_config(
- i,
- osd_debug_skip_full_check_in_backfill_reservation = \
- 'false',
- osd_backfill_full_ratio = 0.85)
-
- def test_map_discontinuity(self):
- """
- 1) Allows the osds to recover
- 2) kills an osd
- 3) allows the remaining osds to recover
- 4) waits for some time
- 5) revives the osd
- This sequence should cause the revived osd to have to handle
- a map gap since the mons would have trimmed
- """
- while len(self.in_osds) < (self.minin + 1):
- self.in_osd()
- self.log("Waiting for recovery")
- self.ceph_manager.wait_for_all_up(
- timeout=self.config.get('timeout')
- )
- # now we wait 20s for the pg status to change, if it takes longer,
- # the test *should* fail!
- time.sleep(20)
- self.ceph_manager.wait_for_clean(
- timeout=self.config.get('timeout')
- )
-
- # now we wait 20s for the backfill replicas to hear about the clean
- time.sleep(20)
- self.log("Recovered, killing an osd")
- self.kill_osd(mark_down=True, mark_out=True)
- self.log("Waiting for clean again")
- self.ceph_manager.wait_for_clean(
- timeout=self.config.get('timeout')
- )
- self.log("Waiting for trim")
- time.sleep(int(self.config.get("map_discontinuity_sleep_time", 40)))
- self.revive_osd()
-
- def choose_action(self):
- """
- Random action selector.
- """
- chance_down = self.config.get('chance_down', 0.4)
- chance_test_min_size = self.config.get('chance_test_min_size', 0)
- chance_test_backfill_full = self.config.get('chance_test_backfill_full', 0)
- if isinstance(chance_down, int):
- chance_down = float(chance_down) / 100
- minin = self.minin
- minout = self.config.get("min_out", 0)
- minlive = self.config.get("min_live", 2)
- mindead = self.config.get("min_dead", 0)
-
- self.log('choose_action: min_in %d min_out %d min_live %d min_dead %d' %
- (minin, minout, minlive, mindead))
- actions = []
- if len(self.in_osds) > minin:
- actions.append((self.out_osd, 1.0,))
- if len(self.live_osds) > minlive and chance_down > 0:
- actions.append((self.kill_osd, chance_down,))
- if len(self.dead_osds) > 1:
- actions.append((self.rm_past_intervals, 1.0,))
- if len(self.out_osds) > minout:
- actions.append((self.in_osd, 1.7,))
- if len(self.dead_osds) > mindead:
- actions.append((self.revive_osd, 1.0,))
- if self.config.get('thrash_primary_affinity', True):
- actions.append((self.primary_affinity, 1.0,))
- actions.append((self.reweight_osd, self.config.get('reweight_osd',.5),))
- actions.append((self.grow_pool, self.config.get('chance_pgnum_grow', 0),))
- actions.append((self.fix_pgp_num, self.config.get('chance_pgpnum_fix', 0),))
- actions.append((self.test_pool_min_size, chance_test_min_size,))
- actions.append((self.test_backfill_full, chance_test_backfill_full,))
- for key in ['heartbeat_inject_failure', 'filestore_inject_stall']:
- for scenario in [
- (lambda: self.inject_pause(key,
- self.config.get('pause_short', 3),
- 0,
- False),
- self.config.get('chance_inject_pause_short', 1),),
- (lambda: self.inject_pause(key,
- self.config.get('pause_long', 80),
- self.config.get('pause_check_after', 70),
- True),
- self.config.get('chance_inject_pause_long', 0),)]:
- actions.append(scenario)
-
- total = sum([y for (x, y) in actions])
- val = random.uniform(0, total)
- for (action, prob) in actions:
- if val < prob:
- return action
- val -= prob
- return None
-
- def do_thrash(self):
- """
- Loop to select random actions to thrash ceph manager with.
- """
- cleanint = self.config.get("clean_interval", 60)
- scrubint = self.config.get("scrub_interval", -1)
- maxdead = self.config.get("max_dead", 0)
- delay = self.config.get("op_delay", 5)
- self.log("starting do_thrash")
- while not self.stopping:
- self.log(" ".join([str(x) for x in ["in_osds: ", self.in_osds, " out_osds: ", self.out_osds,
- "dead_osds: ", self.dead_osds, "live_osds: ",
- self.live_osds]]))
- if random.uniform(0, 1) < (float(delay) / cleanint):
- while len(self.dead_osds) > maxdead:
- self.revive_osd()
- for osd in self.in_osds:
- self.ceph_manager.raw_cluster_cmd('osd', 'reweight',
- str(osd), str(1))
- if random.uniform(0, 1) < float(
- self.config.get('chance_test_map_discontinuity', 0)):
- self.test_map_discontinuity()
- else:
- self.ceph_manager.wait_for_recovery(
- timeout=self.config.get('timeout')
- )
- time.sleep(self.clean_wait)
- if scrubint > 0:
- if random.uniform(0, 1) < (float(delay) / scrubint):
- self.log('Scrubbing while thrashing being performed')
- Scrubber(self.ceph_manager, self.config)
- self.choose_action()()
- time.sleep(delay)
- self.all_up()
-
-class CephManager:
- """
- Ceph manager object.
- Contains several local functions that form a bulk of this module.
- """
-
- REPLICATED_POOL = 1
- ERASURE_CODED_POOL = 3
-
- def __init__(self, controller, ctx=None, config=None, logger=None):
- self.lock = threading.RLock()
- self.ctx = ctx
- self.config = config
- self.controller = controller
- self.next_pool_id = 0
- self.created_erasure_pool = False
- if (logger):
- self.log = lambda x: logger.info(x)
- else:
- def tmp(x):
- """
- implement log behavior.
- """
- print x
- self.log = tmp
- if self.config is None:
- self.config = dict()
- pools = self.list_pools()
- self.pools = {}
- for pool in pools:
- self.pools[pool] = self.get_pool_property(pool, 'pg_num')
-
- def raw_cluster_cmd(self, *args):
- """
- Start ceph on a raw cluster. Return count
- """
- testdir = teuthology.get_testdir(self.ctx)
- ceph_args = [
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'ceph',
- ]
- ceph_args.extend(args)
- proc = self.controller.run(
- args=ceph_args,
- stdout=StringIO(),
- )
- return proc.stdout.getvalue()
-
- def raw_cluster_cmd_result(self, *args):
- """
- Start ceph on a cluster. Return success or failure information.
- """
- testdir = teuthology.get_testdir(self.ctx)
- ceph_args = [
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'ceph',
- ]
- ceph_args.extend(args)
- proc = self.controller.run(
- args=ceph_args,
- check_status=False,
- )
- return proc.exitstatus
-
- def do_rados(self, remote, cmd):
- """
- Execute a remote rados command.
- """
- testdir = teuthology.get_testdir(self.ctx)
- pre = [
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rados',
- ]
- pre.extend(cmd)
- proc = remote.run(
- args=pre,
- wait=True,
- )
- return proc
-
- def rados_write_objects(
- self, pool, num_objects, size, timelimit, threads, cleanup=False):
- """
- Write rados objects
- Threads not used yet.
- """
- args = [
- '-p', pool,
- '--num-objects', num_objects,
- '-b', size,
- 'bench', timelimit,
- 'write'
- ]
- if not cleanup: args.append('--no-cleanup')
- return self.do_rados(self.controller, map(str, args))
-
- def do_put(self, pool, obj, fname):
- """
- Implement rados put operation
- """
- return self.do_rados(
- self.controller,
- [
- '-p',
- pool,
- 'put',
- obj,
- fname
- ]
- )
-
- def do_get(self, pool, obj, fname='/dev/null'):
- """
- Implement rados get operation
- """
- return self.do_rados(
- self.controller,
- [
- '-p',
- pool,
- 'stat',
- obj,
- fname
- ]
- )
-
- def osd_admin_socket(self, osdnum, command, check_status=True):
- """
- Remotely start up ceph specifying the admin socket
- :param command a list of words to use as the command to the admin socket
- """
- testdir = teuthology.get_testdir(self.ctx)
- remote = None
- for _remote, roles_for_host in self.ctx.cluster.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
- if int(id_) == int(osdnum):
- remote = _remote
- assert remote is not None
- args = [
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'ceph',
- '--admin-daemon',
- '/var/run/ceph/ceph-osd.{id}.asok'.format(id=osdnum),
- ]
- args.extend(command)
- return remote.run(
- args=args,
- stdout=StringIO(),
- wait=True,
- check_status=check_status
- )
-
- def get_pgid(self, pool, pgnum):
- """
- :param pool: pool name
- :param pgnum: pg number
- :returns: a string representing this pg.
- """
- poolnum = self.get_pool_num(pool)
- pg_str = "{poolnum}.{pgnum}".format(
- poolnum=poolnum,
- pgnum=pgnum)
- return pg_str
-
- def get_pg_replica(self, pool, pgnum):
- """
- get replica for pool, pgnum (e.g. (data, 0)->0
- """
- output = self.raw_cluster_cmd("pg", "dump", '--format=json')
- j = json.loads('\n'.join(output.split('\n')[1:]))
- pg_str = self.get_pgid(pool, pgnum)
- for pg in j['pg_stats']:
- if pg['pgid'] == pg_str:
- return int(pg['acting'][-1])
- assert False
-
- def get_pg_primary(self, pool, pgnum):
- """
- get primary for pool, pgnum (e.g. (data, 0)->0
- """
- output = self.raw_cluster_cmd("pg", "dump", '--format=json')
- j = json.loads('\n'.join(output.split('\n')[1:]))
- pg_str = self.get_pgid(pool, pgnum)
- for pg in j['pg_stats']:
- if pg['pgid'] == pg_str:
- return int(pg['acting'][0])
- assert False
-
- def get_pool_num(self, pool):
- """
- get number for pool (e.g., data -> 2)
- """
- out = self.raw_cluster_cmd('osd', 'dump', '--format=json')
- j = json.loads('\n'.join(out.split('\n')[1:]))
- for i in j['pools']:
- if i['pool_name'] == pool:
- return int(i['pool'])
- assert False
-
- def list_pools(self):
- """
- list all pool names
- """
- out = self.raw_cluster_cmd('osd', 'dump', '--format=json')
- j = json.loads('\n'.join(out.split('\n')[1:]))
- self.log(j['pools'])
- return [str(i['pool_name']) for i in j['pools']]
-
- def clear_pools(self):
- """
- remove all pools
- """
- [self.remove_pool(i) for i in self.list_pools()]
-
- def kick_recovery_wq(self, osdnum):
- """
- Run kick_recovery_wq on cluster.
- """
- return self.raw_cluster_cmd(
- 'tell', "osd.%d" % (int(osdnum),),
- 'debug',
- 'kick_recovery_wq',
- '0')
-
- def wait_run_admin_socket(self, osdnum, args=['version'], timeout=300):
- """
- If osd_admin_socket call suceeds, return. Otherwise wait
- five seconds and try again.
- """
- tries = 0
- while True:
- proc = self.osd_admin_socket(
- osdnum, args,
- check_status=False)
- if proc.exitstatus is 0:
- break
- else:
- tries += 1
- if (tries * 5) > timeout:
- raise Exception('timed out waiting for admin_socket to appear after osd.{o} restart'.format(o=osdnum))
- self.log(
- "waiting on admin_socket for {osdnum}, {command}".format(
- osdnum=osdnum,
- command=args))
- time.sleep(5)
-
- def get_pool_dump(self, pool):
- """
- get the osd dump part of a pool
- """
- osd_dump = self.get_osd_dump_json()
- for i in osd_dump['pools']:
- if i['pool_name'] == pool:
- return i
- assert False
-
- def set_config(self, osdnum, **argdict):
- """
- :param osdnum: osd number
- :param argdict: dictionary containing values to set.
- """
- for k, v in argdict.iteritems():
- self.wait_run_admin_socket(
- osdnum,
- ['config', 'set', str(k), str(v)])
-
- def raw_cluster_status(self):
- """
- Get status from cluster
- """
- status = self.raw_cluster_cmd('status', '--format=json-pretty')
- return json.loads(status)
-
- def raw_osd_status(self):
- """
- Get osd status from cluster
- """
- return self.raw_cluster_cmd('osd', 'dump')
-
- def get_osd_status(self):
- """
- Get osd statuses sorted by states that the osds are in.
- """
- osd_lines = filter(
- lambda x: x.startswith('osd.') and (("up" in x) or ("down" in x)),
- self.raw_osd_status().split('\n'))
- self.log(osd_lines)
- in_osds = [int(i[4:].split()[0]) for i in filter(
- lambda x: " in " in x,
- osd_lines)]
- out_osds = [int(i[4:].split()[0]) for i in filter(
- lambda x: " out " in x,
- osd_lines)]
- up_osds = [int(i[4:].split()[0]) for i in filter(
- lambda x: " up " in x,
- osd_lines)]
- down_osds = [int(i[4:].split()[0]) for i in filter(
- lambda x: " down " in x,
- osd_lines)]
- dead_osds = [int(x.id_) for x in
- filter(lambda x: not x.running(), self.ctx.daemons.iter_daemons_of_role('osd'))]
- live_osds = [int(x.id_) for x in
- filter(lambda x: x.running(), self.ctx.daemons.iter_daemons_of_role('osd'))]
- return { 'in' : in_osds, 'out' : out_osds, 'up' : up_osds,
- 'down' : down_osds, 'dead' : dead_osds, 'live' : live_osds,
- 'raw' : osd_lines}
-
- def get_num_pgs(self):
- """
- Check cluster status for the number of pgs
- """
- status = self.raw_cluster_status()
- self.log(status)
- return status['pgmap']['num_pgs']
-
- def create_pool_with_unique_name(self, pg_num=16, ec_pool=False, ec_m=1, ec_k=2):
- """
- Create a pool named unique_pool_X where X is unique.
- """
- name = ""
- with self.lock:
- name = "unique_pool_%s" % (str(self.next_pool_id),)
- self.next_pool_id += 1
- self.create_pool(
- name,
- pg_num,
- ec_pool=ec_pool,
- ec_m=ec_m,
- ec_k=ec_k)
- return name
-
- def create_pool(self, pool_name, pg_num=16, ec_pool=False, ec_m=1, ec_k=2):
- """
- Create a pool named from the pool_name parameter.
- :param pool_name: name of the pool being created.
- :param pg_num: initial number of pgs.
- """
- with self.lock:
- assert isinstance(pool_name, str)
- assert isinstance(pg_num, int)
- assert pool_name not in self.pools
- self.log("creating pool_name %s"%(pool_name,))
- if ec_pool and not self.created_erasure_pool:
- self.created_erasure_pool = True
- self.raw_cluster_cmd('osd', 'erasure-code-profile', 'set', 'teuthologyprofile', 'ruleset-failure-domain=osd', 'm='+str(ec_m), 'k='+str(ec_k))
-
- if ec_pool:
- self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num), str(pg_num), 'erasure', 'teuthologyprofile')
- else:
- self.raw_cluster_cmd('osd', 'pool', 'create', pool_name, str(pg_num))
- self.pools[pool_name] = pg_num
-
- def remove_pool(self, pool_name):
- """
- Remove the indicated pool
- :param pool_name: Pool to be removed
- """
- with self.lock:
- assert isinstance(pool_name, str)
- assert pool_name in self.pools
- self.log("removing pool_name %s" % (pool_name,))
- del self.pools[pool_name]
- self.do_rados(
- self.controller,
- ['rmpool', pool_name, pool_name, "--yes-i-really-really-mean-it"]
- )
-
- def get_pool(self):
- """
- Pick a random pool
- """
- with self.lock:
- return random.choice(self.pools.keys())
-
- def get_pool_pg_num(self, pool_name):
- """
- Return the number of pgs in the pool specified.
- """
- with self.lock:
- assert isinstance(pool_name, str)
- if pool_name in self.pools:
- return self.pools[pool_name]
- return 0
-
- def get_pool_property(self, pool_name, prop):
- """
- :param pool_name: pool
- :param prop: property to be checked.
- :returns: property as an int value.
- """
- with self.lock:
- assert isinstance(pool_name, str)
- assert isinstance(prop, str)
- output = self.raw_cluster_cmd(
- 'osd',
- 'pool',
- 'get',
- pool_name,
- prop)
- return int(output.split()[1])
-
- def set_pool_property(self, pool_name, prop, val):
- """
- :param pool_name: pool
- :param prop: property to be set.
- :param val: value to set.
-
- This routine retries if set operation fails.
- """
- with self.lock:
- assert isinstance(pool_name, str)
- assert isinstance(prop, str)
- assert isinstance(val, int)
- tries = 0
- while True:
- r = self.raw_cluster_cmd_result(
- 'osd',
- 'pool',
- 'set',
- pool_name,
- prop,
- str(val))
- if r != 11: # EAGAIN
- break
- tries += 1
- if tries > 50:
- raise Exception('timed out getting EAGAIN when setting pool property %s %s = %s' % (pool_name, prop, val))
- self.log('got EAGAIN setting pool property, waiting a few seconds...')
- time.sleep(2)
-
- def expand_pool(self, pool_name, by, max_pgs):
- """
- Increase the number of pgs in a pool
- """
- with self.lock:
- assert isinstance(pool_name, str)
- assert isinstance(by, int)
- assert pool_name in self.pools
- if self.get_num_creating() > 0:
- return
- if (self.pools[pool_name] + by) > max_pgs:
- return
- self.log("increase pool size by %d"%(by,))
- new_pg_num = self.pools[pool_name] + by
- self.set_pool_property(pool_name, "pg_num", new_pg_num)
- self.pools[pool_name] = new_pg_num
-
- def set_pool_pgpnum(self, pool_name):
- """
- Set pgpnum property of pool_name pool.
- """
- with self.lock:
- assert isinstance(pool_name, str)
- assert pool_name in self.pools
- if self.get_num_creating() > 0:
- return
- self.set_pool_property(pool_name, 'pgp_num', self.pools[pool_name])
-
- def list_pg_missing(self, pgid):
- """
- return list of missing pgs with the id specified
- """
- r = None
- offset = {}
- while True:
- out = self.raw_cluster_cmd('--', 'pg', pgid, 'list_missing',
- json.dumps(offset))
- j = json.loads(out)
- if r is None:
- r = j
- else:
- r['objects'].extend(j['objects'])
- if not 'more' in j:
- break
- if j['more'] == 0:
- break
- offset = j['objects'][-1]['oid']
- if 'more' in r:
- del r['more']
- return r
-
- def get_pg_stats(self):
- """
- Dump the cluster and get pg stats
- """
- out = self.raw_cluster_cmd('pg', 'dump', '--format=json')
- j = json.loads('\n'.join(out.split('\n')[1:]))
- return j['pg_stats']
-
- def compile_pg_status(self):
- """
- Return a histogram of pg state values
- """
- ret = {}
- j = self.get_pg_stats()
- for pg in j:
- for status in pg['state'].split('+'):
- if status not in ret:
- ret[status] = 0
- ret[status] += 1
- return ret
-
- def pg_scrubbing(self, pool, pgnum):
- """
- pg scrubbing wrapper
- """
- pgstr = self.get_pgid(pool, pgnum)
- stats = self.get_single_pg_stats(pgstr)
- return 'scrub' in stats['state']
-
- def pg_repairing(self, pool, pgnum):
- """
- pg repairing wrapper
- """
- pgstr = self.get_pgid(pool, pgnum)
- stats = self.get_single_pg_stats(pgstr)
- return 'repair' in stats['state']
-
- def pg_inconsistent(self, pool, pgnum):
- """
- pg inconsistent wrapper
- """
- pgstr = self.get_pgid(pool, pgnum)
- stats = self.get_single_pg_stats(pgstr)
- return 'inconsistent' in stats['state']
-
- def get_last_scrub_stamp(self, pool, pgnum):
- """
- Get the timestamp of the last scrub.
- """
- stats = self.get_single_pg_stats(self.get_pgid(pool, pgnum))
- return stats["last_scrub_stamp"]
-
- def do_pg_scrub(self, pool, pgnum, stype):
- """
- Scrub pg and wait for scrubbing to finish
- """
- init = self.get_last_scrub_stamp(pool, pgnum)
- self.raw_cluster_cmd('pg', stype, self.get_pgid(pool, pgnum))
- while init == self.get_last_scrub_stamp(pool, pgnum):
- self.log("waiting for scrub type %s"%(stype,))
- time.sleep(10)
-
- def get_single_pg_stats(self, pgid):
- """
- Return pg for the pgid specified.
- """
- all_stats = self.get_pg_stats()
-
- for pg in all_stats:
- if pg['pgid'] == pgid:
- return pg
-
- return None
-
- def get_osd_dump_json(self):
- """
- osd dump --format=json converted to a python object
- :returns: the python object
- """
- out = self.raw_cluster_cmd('osd', 'dump', '--format=json')
- return json.loads('\n'.join(out.split('\n')[1:]))
-
- def get_osd_dump(self):
- """
- Dump osds
- :returns: all osds
- """
- out = self.raw_cluster_cmd('osd', 'dump', '--format=json')
- j = json.loads('\n'.join(out.split('\n')[1:]))
- return j['osds']
-
- def get_stuck_pgs(self, type_, threshold):
- """
- :returns: stuck pg information from the cluster
- """
- out = self.raw_cluster_cmd('pg', 'dump_stuck', type_, str(threshold),
- '--format=json')
- return json.loads(out)
-
- def get_num_unfound_objects(self):
- """
- Check cluster status to get the number of unfound objects
- """
- status = self.raw_cluster_status()
- self.log(status)
- return status['pgmap'].get('unfound_objects', 0)
-
- def get_num_creating(self):
- """
- Find the number of pgs in creating mode.
- """
- pgs = self.get_pg_stats()
- num = 0
- for pg in pgs:
- if 'creating' in pg['state']:
- num += 1
- return num
-
- def get_num_active_clean(self):
- """
- Find the number of active and clean pgs.
- """
- pgs = self.get_pg_stats()
- num = 0
- for pg in pgs:
- if pg['state'].count('active') and pg['state'].count('clean') and not pg['state'].count('stale'):
- num += 1
- return num
-
- def get_num_active_recovered(self):
- """
- Find the number of active and recovered pgs.
- """
- pgs = self.get_pg_stats()
- num = 0
- for pg in pgs:
- if pg['state'].count('active') and not pg['state'].count('recover') and not pg['state'].count('backfill') and not pg['state'].count('stale'):
- num += 1
- return num
-
- def get_is_making_recovery_progress(self):
- """
- Return whether there is recovery progress discernable in the
- raw cluster status
- """
- status = self.raw_cluster_status()
- kps = status['pgmap'].get('recovering_keys_per_sec', 0)
- bps = status['pgmap'].get('recovering_bytes_per_sec', 0)
- ops = status['pgmap'].get('recovering_objects_per_sec', 0)
- return kps > 0 or bps > 0 or ops > 0
-
- def get_num_active(self):
- """
- Find the number of active pgs.
- """
- pgs = self.get_pg_stats()
- num = 0
- for pg in pgs:
- if pg['state'].count('active') and not pg['state'].count('stale'):
- num += 1
- return num
-
- def get_num_down(self):
- """
- Find the number of pgs that are down.
- """
- pgs = self.get_pg_stats()
- num = 0
- for pg in pgs:
- if (pg['state'].count('down') and not pg['state'].count('stale')) or \
- (pg['state'].count('incomplete') and not pg['state'].count('stale')):
- num += 1
- return num
-
- def get_num_active_down(self):
- """
- Find the number of pgs that are either active or down.
- """
- pgs = self.get_pg_stats()
- num = 0
- for pg in pgs:
- if (pg['state'].count('active') and not pg['state'].count('stale')) or \
- (pg['state'].count('down') and not pg['state'].count('stale')) or \
- (pg['state'].count('incomplete') and not pg['state'].count('stale')):
- num += 1
- return num
-
- def is_clean(self):
- """
- True if all pgs are clean
- """
- return self.get_num_active_clean() == self.get_num_pgs()
-
- def is_recovered(self):
- """
- True if all pgs have recovered
- """
- return self.get_num_active_recovered() == self.get_num_pgs()
-
- def is_active_or_down(self):
- """
- True if all pgs are active or down
- """
- return self.get_num_active_down() == self.get_num_pgs()
-
- def wait_for_clean(self, timeout=None):
- """
- Returns trues when all pgs are clean.
- """
- self.log("waiting for clean")
- start = time.time()
- num_active_clean = self.get_num_active_clean()
- while not self.is_clean():
- if timeout is not None:
- if self.get_is_making_recovery_progress():
- self.log("making progress, resetting timeout")
- start = time.time()
- else:
- self.log("no progress seen, keeping timeout for now")
- assert time.time() - start < timeout, \
- 'failed to become clean before timeout expired'
- cur_active_clean = self.get_num_active_clean()
- if cur_active_clean != num_active_clean:
- start = time.time()
- num_active_clean = cur_active_clean
- time.sleep(3)
- self.log("clean!")
-
- def are_all_osds_up(self):
- """
- Returns true if all osds are up.
- """
- x = self.get_osd_dump()
- return (len(x) == \
- sum([(y['up'] > 0) for y in x]))
-
- def wait_for_all_up(self, timeout=None):
- """
- When this exits, either the timeout has expired, or all
- osds are up.
- """
- self.log("waiting for all up")
- start = time.time()
- while not self.are_all_osds_up():
- if timeout is not None:
- assert time.time() - start < timeout, \
- 'timeout expired in wait_for_all_up'
- time.sleep(3)
- self.log("all up!")
-
- def wait_for_recovery(self, timeout=None):
- """
- Check peering. When this exists, we have recovered.
- """
- self.log("waiting for recovery to complete")
- start = time.time()
- num_active_recovered = self.get_num_active_recovered()
- while not self.is_recovered():
- if timeout is not None:
- if self.get_is_making_recovery_progress():
- self.log("making progress, resetting timeout")
- start = time.time()
- else:
- self.log("no progress seen, keeping timeout for now")
- assert time.time() - start < timeout, \
- 'failed to recover before timeout expired'
- cur_active_recovered = self.get_num_active_recovered()
- if cur_active_recovered != num_active_recovered:
- start = time.time()
- num_active_recovered = cur_active_recovered
- time.sleep(3)
- self.log("recovered!")
-
- def wait_for_active(self, timeout=None):
- """
- Check peering. When this exists, we are definitely active
- """
- self.log("waiting for peering to complete")
- start = time.time()
- num_active = self.get_num_active()
- while not self.is_active():
- if timeout is not None:
- assert time.time() - start < timeout, \
- 'failed to recover before timeout expired'
- cur_active = self.get_num_active()
- if cur_active != num_active:
- start = time.time()
- num_active = cur_active
- time.sleep(3)
- self.log("active!")
-
- def wait_for_active_or_down(self, timeout=None):
- """
- Check peering. When this exists, we are definitely either
- active or down
- """
- self.log("waiting for peering to complete or become blocked")
- start = time.time()
- num_active_down = self.get_num_active_down()
- while not self.is_active_or_down():
- if timeout is not None:
- assert time.time() - start < timeout, \
- 'failed to recover before timeout expired'
- cur_active_down = self.get_num_active_down()
- if cur_active_down != num_active_down:
- start = time.time()
- num_active_down = cur_active_down
- time.sleep(3)
- self.log("active or down!")
-
- def osd_is_up(self, osd):
- """
- Wrapper for osd check
- """
- osds = self.get_osd_dump()
- return osds[osd]['up'] > 0
-
- def wait_till_osd_is_up(self, osd, timeout=None):
- """
- Loop waiting for osd.
- """
- self.log('waiting for osd.%d to be up' % osd)
- start = time.time()
- while not self.osd_is_up(osd):
- if timeout is not None:
- assert time.time() - start < timeout, \
- 'osd.%d failed to come up before timeout expired' % osd
- time.sleep(3)
- self.log('osd.%d is up' % osd)
-
- def is_active(self):
- """
- Wrapper to check if active
- """
- return self.get_num_active() == self.get_num_pgs()
-
- def wait_till_active(self, timeout=None):
- """
- Wait until osds are active.
- """
- self.log("waiting till active")
- start = time.time()
- while not self.is_active():
- if timeout is not None:
- assert time.time() - start < timeout, \
- 'failed to become active before timeout expired'
- time.sleep(3)
- self.log("active!")
-
- def mark_out_osd(self, osd):
- """
- Wrapper to mark osd out.
- """
- self.raw_cluster_cmd('osd', 'out', str(osd))
-
- def kill_osd(self, osd):
- """
- Kill osds by either power cycling (if indicated by the config)
- or by stopping.
- """
- if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
- self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name))
- assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
- remote.console.power_off()
- else:
- self.ctx.daemons.get_daemon('osd', osd).stop()
-
- def blackhole_kill_osd(self, osd):
- """
- Stop osd if nothing else works.
- """
- self.raw_cluster_cmd('--', 'tell', 'osd.%d' % osd,
- 'injectargs', '--filestore-blackhole')
- time.sleep(2)
- self.ctx.daemons.get_daemon('osd', osd).stop()
-
- def revive_osd(self, osd, timeout=150):
- """
- Revive osds by either power cycling (if indicated by the config)
- or by restarting.
- """
- if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
- self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name))
- assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
- remote.console.power_on()
- if not remote.console.check_status(300):
- raise Exception('Failed to revive osd.{o} via ipmi'.format(o=osd))
- teuthology.reconnect(self.ctx, 60, [remote])
- mount_osd_data(self.ctx, remote, str(osd))
- make_admin_daemon_dir(self.ctx, remote)
- self.ctx.daemons.get_daemon('osd', osd).reset()
- self.ctx.daemons.get_daemon('osd', osd).restart()
- # wait for dump_ops_in_flight; this command doesn't appear
- # until after the signal handler is installed and it is safe
- # to stop the osd again without making valgrind leak checks
- # unhappy. see #5924.
- self.wait_run_admin_socket(osd,
- args=['dump_ops_in_flight'],
- timeout=timeout)
-
- def mark_down_osd(self, osd):
- """
- Cluster command wrapper
- """
- self.raw_cluster_cmd('osd', 'down', str(osd))
-
- def mark_in_osd(self, osd):
- """
- Cluster command wrapper
- """
- self.raw_cluster_cmd('osd', 'in', str(osd))
-
-
- ## monitors
-
- def signal_mon(self, mon, sig):
- """
- Wrapper to local get_deamon call
- """
- self.ctx.daemons.get_daemon('mon', mon).signal(sig)
-
- def kill_mon(self, mon):
- """
- Kill the monitor by either power cycling (if the config says so),
- or by doing a stop.
- """
- if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys()
- self.log('kill_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name))
- assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
- remote.console.power_off()
- else:
- self.ctx.daemons.get_daemon('mon', mon).stop()
-
- def revive_mon(self, mon):
- """
- Restart by either power cycling (if the config says so),
- or by doing a normal restart.
- """
- if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys()
- self.log('revive_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name))
- assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
- remote.console.power_on()
- make_admin_daemon_dir(self.ctx, remote)
- self.ctx.daemons.get_daemon('mon', mon).restart()
-
- def get_mon_status(self, mon):
- """
- Extract all the monitor status information from the cluster
- """
- addr = self.ctx.ceph.conf['mon.%s' % mon]['mon addr']
- out = self.raw_cluster_cmd('-m', addr, 'mon_status')
- return json.loads(out)
-
- def get_mon_quorum(self):
- """
- Extract monitor quorum information from the cluster
- """
- out = self.raw_cluster_cmd('quorum_status')
- j = json.loads(out)
- self.log('quorum_status is %s' % out)
- return j['quorum']
-
- def wait_for_mon_quorum_size(self, size, timeout=300):
- """
- Loop until quorum size is reached.
- """
- self.log('waiting for quorum size %d' % size)
- start = time.time()
- while not len(self.get_mon_quorum()) == size:
- if timeout is not None:
- assert time.time() - start < timeout, \
- 'failed to reach quorum size %d before timeout expired' % size
- time.sleep(3)
- self.log("quorum is size %d" % size)
-
- def get_mon_health(self, debug=False):
- """
- Extract all the monitor health information.
- """
- out = self.raw_cluster_cmd('health', '--format=json')
- if debug:
- self.log('health:\n{h}'.format(h=out))
- return json.loads(out)
-
- ## metadata servers
-
- def kill_mds(self, mds):
- """
- Powercyle if set in config, otherwise just stop.
- """
- if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys()
- self.log('kill_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name))
- assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
- remote.console.power_off()
- else:
- self.ctx.daemons.get_daemon('mds', mds).stop()
-
- def kill_mds_by_rank(self, rank):
- """
- kill_mds wrapper to kill based on rank passed.
- """
- status = self.get_mds_status_by_rank(rank)
- self.kill_mds(status['name'])
-
- def revive_mds(self, mds, standby_for_rank=None):
- """
- Revive mds -- do an ipmpi powercycle (if indicated by the config)
- and then restart (using --hot-standby if specified.
- """
- if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys()
- self.log('revive_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name))
- assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
- remote.console.power_on()
- make_admin_daemon_dir(self.ctx, remote)
- args = []
- if standby_for_rank:
- args.extend(['--hot-standby', standby_for_rank])
- self.ctx.daemons.get_daemon('mds', mds).restart(*args)
-
- def revive_mds_by_rank(self, rank, standby_for_rank=None):
- """
- revive_mds wrapper to revive based on rank passed.
- """
- status = self.get_mds_status_by_rank(rank)
- self.revive_mds(status['name'], standby_for_rank)
-
- def get_mds_status(self, mds):
- """
- Run cluster commands for the mds in order to get mds information
- """
- out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
- j = json.loads(' '.join(out.splitlines()[1:]))
- # collate; for dup ids, larger gid wins.
- for info in j['info'].itervalues():
- if info['name'] == mds:
- return info
- return None
-
- def get_mds_status_by_rank(self, rank):
- """
- Run cluster commands for the mds in order to get mds information
- check rank.
- """
- out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
- j = json.loads(' '.join(out.splitlines()[1:]))
- # collate; for dup ids, larger gid wins.
- for info in j['info'].itervalues():
- if info['rank'] == rank:
- return info
- return None
-
- def get_mds_status_all(self):
- """
- Run cluster command to extract all the mds status.
- """
- out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
- j = json.loads(' '.join(out.splitlines()[1:]))
- return j
-
- def get_filepath(self):
- """
- Return path to osd data with {id} needing to be replaced
- """
- return "/var/lib/ceph/osd/ceph-{id}"
-
-def utility_task(name):
- """
- Generate ceph_manager subtask corresponding to ceph_manager
- method name
- """
- def task(ctx, config):
- if config is None:
- config = {}
- args = config.get('args', [])
- kwargs = config.get('kwargs', {})
- fn = getattr(ctx.manager, name)
- fn(*args, **kwargs)
- return task
-
-revive_osd = utility_task("revive_osd")
-kill_osd = utility_task("kill_osd")
-create_pool = utility_task("create_pool")
-remove_pool = utility_task("remove_pool")
-wait_for_clean = utility_task("wait_for_clean")
-set_pool_property = utility_task("set_pool_property")
+++ /dev/null
-"""
-ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility
-"""
-from cStringIO import StringIO
-import contextlib
-import logging
-import ceph_manager
-from teuthology import misc as teuthology
-import time
-import os
-import string
-from teuthology.orchestra import run
-import sys
-import tempfile
-import json
-from util.rados import (rados, create_replicated_pool, create_ec_pool)
-# from util.rados import (rados, create_ec_pool,
-# create_replicated_pool,
-# create_cache_pool)
-
-log = logging.getLogger(__name__)
-
-# Should get cluster name "ceph" from somewhere
-# and normal path from osd_data and osd_journal in conf
-FSPATH = "/var/lib/ceph/osd/ceph-{id}"
-JPATH = "/var/lib/ceph/osd/ceph-{id}/journal"
-
-
-def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR,
- BASE_NAME, DATALINECOUNT):
- objects = range(1, NUM_OBJECTS + 1)
- for i in objects:
- NAME = BASE_NAME + "{num}".format(num=i)
- LOCALNAME = os.path.join(DATADIR, NAME)
-
- dataline = range(DATALINECOUNT)
- fd = open(LOCALNAME, "w")
- data = "This is the data for " + NAME + "\n"
- for _ in dataline:
- fd.write(data)
- fd.close()
-
-
-def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
- BASE_NAME, DATALINECOUNT):
-
- objects = range(1, NUM_OBJECTS + 1)
- for i in objects:
- NAME = BASE_NAME + "{num}".format(num=i)
- DDNAME = os.path.join(DATADIR, NAME)
-
- remote.run(args=['rm', '-f', DDNAME])
-
- dataline = range(DATALINECOUNT)
- data = "This is the data for " + NAME + "\n"
- DATA = ""
- for _ in dataline:
- DATA += data
- teuthology.write_file(remote, DDNAME, DATA)
-
-
-def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR,
- BASE_NAME, DATALINECOUNT, POOL, db, ec):
- ERRORS = 0
- log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS))
-
- objects = range(1, NUM_OBJECTS + 1)
- for i in objects:
- NAME = BASE_NAME + "{num}".format(num=i)
- DDNAME = os.path.join(DATADIR, NAME)
-
- proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME],
- wait=False)
- # proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME])
- ret = proc.wait()
- if ret != 0:
- log.critical("Rados put failed with status {ret}".
- format(ret=proc.exitstatus))
- sys.exit(1)
-
- db[NAME] = {}
-
- keys = range(i)
- db[NAME]["xattr"] = {}
- for k in keys:
- if k == 0:
- continue
- mykey = "key{i}-{k}".format(i=i, k=k)
- myval = "val{i}-{k}".format(i=i, k=k)
- proc = remote.run(args=['rados', '-p', POOL, 'setxattr',
- NAME, mykey, myval])
- ret = proc.wait()
- if ret != 0:
- log.error("setxattr failed with {ret}".format(ret=ret))
- ERRORS += 1
- db[NAME]["xattr"][mykey] = myval
-
- # Erasure coded pools don't support omap
- if ec:
- continue
-
- # Create omap header in all objects but REPobject1
- if i != 1:
- myhdr = "hdr{i}".format(i=i)
- proc = remote.run(args=['rados', '-p', POOL, 'setomapheader',
- NAME, myhdr])
- ret = proc.wait()
- if ret != 0:
- log.critical("setomapheader failed with {ret}".format(ret=ret))
- ERRORS += 1
- db[NAME]["omapheader"] = myhdr
-
- db[NAME]["omap"] = {}
- for k in keys:
- if k == 0:
- continue
- mykey = "okey{i}-{k}".format(i=i, k=k)
- myval = "oval{i}-{k}".format(i=i, k=k)
- proc = remote.run(args=['rados', '-p', POOL, 'setomapval',
- NAME, mykey, myval])
- ret = proc.wait()
- if ret != 0:
- log.critical("setomapval failed with {ret}".format(ret=ret))
- db[NAME]["omap"][mykey] = myval
-
- return ERRORS
-
-
-def get_lines(filename):
- tmpfd = open(filename, "r")
- line = True
- lines = []
- while line:
- line = tmpfd.readline().rstrip('\n')
- if line:
- lines += [line]
- tmpfd.close()
- os.unlink(filename)
- return lines
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run ceph_objectstore_tool test
-
- The config should be as follows::
-
- ceph_objectstore_tool:
- objects: 20 # <number of objects>
- pgnum: 12
- """
-
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'ceph_objectstore_tool task only accepts a dict for configuration'
-
- log.info('Beginning ceph_objectstore_tool...')
-
- log.debug(config)
- log.debug(ctx)
- clients = ctx.cluster.only(teuthology.is_type('client'))
- assert len(clients.remotes) > 0, 'Must specify at least 1 client'
- (cli_remote, _) = clients.remotes.popitem()
- log.debug(cli_remote)
-
- # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
- # client = clients.popitem()
- # log.info(client)
- osds = ctx.cluster.only(teuthology.is_type('osd'))
- log.info("OSDS")
- log.info(osds)
- log.info(osds.remotes)
-
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- config=config,
- logger=log.getChild('ceph_manager'),
- )
- ctx.manager = manager
-
- while (len(manager.get_osd_status()['up']) !=
- len(manager.get_osd_status()['raw'])):
- time.sleep(10)
- while (len(manager.get_osd_status()['in']) !=
- len(manager.get_osd_status()['up'])):
- time.sleep(10)
- manager.raw_cluster_cmd('osd', 'set', 'noout')
- manager.raw_cluster_cmd('osd', 'set', 'nodown')
-
- PGNUM = config.get('pgnum', 12)
- log.info("pgnum: {num}".format(num=PGNUM))
-
- ERRORS = 0
-
- REP_POOL = "rep_pool"
- REP_NAME = "REPobject"
- create_replicated_pool(cli_remote, REP_POOL, PGNUM)
- ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)
-
- EC_POOL = "ec_pool"
- EC_NAME = "ECobject"
- create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
- ERRORS += test_objectstore(ctx, config, cli_remote,
- EC_POOL, EC_NAME, ec=True)
-
- if ERRORS == 0:
- log.info("TEST PASSED")
- else:
- log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
-
- assert ERRORS == 0
-
- try:
- yield
- finally:
- log.info('Ending ceph_objectstore_tool')
-
-
-def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False):
- manager = ctx.manager
-
- osds = ctx.cluster.only(teuthology.is_type('osd'))
-
- TEUTHDIR = teuthology.get_testdir(ctx)
- DATADIR = os.path.join(TEUTHDIR, "data")
- DATALINECOUNT = 10000
- ERRORS = 0
- NUM_OBJECTS = config.get('objects', 10)
- log.info("objects: {num}".format(num=NUM_OBJECTS))
-
- pool_dump = manager.get_pool_dump(REP_POOL)
- REPID = pool_dump['pool']
-
- log.debug("repid={num}".format(num=REPID))
-
- db = {}
-
- LOCALDIR = tempfile.mkdtemp("cod")
-
- cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR,
- REP_NAME, DATALINECOUNT)
- allremote = []
- allremote.append(cli_remote)
- allremote += osds.remotes.keys()
- allremote = list(set(allremote))
- for remote in allremote:
- cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
- REP_NAME, DATALINECOUNT)
-
- ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR,
- REP_NAME, DATALINECOUNT, REP_POOL, db, ec)
-
- pgs = {}
- for stats in manager.get_pg_stats():
- if stats["pgid"].find(str(REPID) + ".") != 0:
- continue
- if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL:
- for osd in stats["acting"]:
- pgs.setdefault(osd, []).append(stats["pgid"])
- elif pool_dump["type"] == ceph_manager.CephManager.ERASURE_CODED_POOL:
- shard = 0
- for osd in stats["acting"]:
- pgs.setdefault(osd, []).append("{pgid}s{shard}".
- format(pgid=stats["pgid"],
- shard=shard))
- shard += 1
- else:
- raise Exception("{pool} has an unexpected type {type}".
- format(pool=REP_POOL, type=pool_dump["type"]))
-
- log.info(pgs)
- log.info(db)
-
- for osd in manager.get_osd_status()['up']:
- manager.kill_osd(osd)
- time.sleep(5)
-
- pgswithobjects = set()
- objsinpg = {}
-
- # Test --op list and generate json for all objects
- log.info("Test --op list by generating json for all objects")
- prefix = ("sudo ceph-objectstore-tool "
- "--data-path {fpath} "
- "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH)
- for remote in osds.remotes.iterkeys():
- log.debug(remote)
- log.debug(osds.remotes[remote])
- for role in osds.remotes[remote]:
- if string.find(role, "osd.") != 0:
- continue
- osdid = int(role.split('.')[1])
- log.info("process osd.{id} on {remote}".
- format(id=osdid, remote=remote))
- cmd = (prefix + "--op list").format(id=osdid)
- proc = remote.run(args=cmd.split(), check_status=False,
- stdout=StringIO())
- if proc.exitstatus != 0:
- log.error("Bad exit status {ret} from --op list request".
- format(ret=proc.exitstatus))
- ERRORS += 1
- else:
- for pgline in proc.stdout.getvalue().splitlines():
- if not pgline:
- continue
- (pg, obj) = json.loads(pgline)
- name = obj['oid']
- if name in db:
- pgswithobjects.add(pg)
- objsinpg.setdefault(pg, []).append(name)
- db[name].setdefault("pg2json",
- {})[pg] = json.dumps(obj)
-
- log.info(db)
- log.info(pgswithobjects)
- log.info(objsinpg)
-
- if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL:
- # Test get-bytes
- log.info("Test get-bytes and set-bytes")
- for basename in db.keys():
- file = os.path.join(DATADIR, basename)
- GETNAME = os.path.join(DATADIR, "get")
- SETNAME = os.path.join(DATADIR, "set")
-
- for remote in osds.remotes.iterkeys():
- for role in osds.remotes[remote]:
- if string.find(role, "osd.") != 0:
- continue
- osdid = int(role.split('.')[1])
- if osdid not in pgs:
- continue
-
- for pg, JSON in db[basename]["pg2json"].iteritems():
- if pg in pgs[osdid]:
- cmd = ((prefix + "--pgid {pg}").
- format(id=osdid, pg=pg).split())
- cmd.append(run.Raw("'{json}'".format(json=JSON)))
- cmd += ("get-bytes {fname}".
- format(fname=GETNAME).split())
- proc = remote.run(args=cmd, check_status=False)
- if proc.exitstatus != 0:
- remote.run(args="rm -f {getfile}".
- format(getfile=GETNAME).split())
- log.error("Bad exit status {ret}".
- format(ret=proc.exitstatus))
- ERRORS += 1
- continue
- cmd = ("diff -q {file} {getfile}".
- format(file=file, getfile=GETNAME))
- proc = remote.run(args=cmd.split())
- if proc.exitstatus != 0:
- log.error("Data from get-bytes differ")
- # log.debug("Got:")
- # cat_file(logging.DEBUG, GETNAME)
- # log.debug("Expected:")
- # cat_file(logging.DEBUG, file)
- ERRORS += 1
- remote.run(args="rm -f {getfile}".
- format(getfile=GETNAME).split())
-
- data = ("put-bytes going into {file}\n".
- format(file=file))
- teuthology.write_file(remote, SETNAME, data)
- cmd = ((prefix + "--pgid {pg}").
- format(id=osdid, pg=pg).split())
- cmd.append(run.Raw("'{json}'".format(json=JSON)))
- cmd += ("set-bytes {fname}".
- format(fname=SETNAME).split())
- proc = remote.run(args=cmd, check_status=False)
- proc.wait()
- if proc.exitstatus != 0:
- log.info("set-bytes failed for object {obj} "
- "in pg {pg} osd.{id} ret={ret}".
- format(obj=basename, pg=pg,
- id=osdid, ret=proc.exitstatus))
- ERRORS += 1
-
- cmd = ((prefix + "--pgid {pg}").
- format(id=osdid, pg=pg).split())
- cmd.append(run.Raw("'{json}'".format(json=JSON)))
- cmd += "get-bytes -".split()
- proc = remote.run(args=cmd, check_status=False,
- stdout=StringIO())
- proc.wait()
- if proc.exitstatus != 0:
- log.error("get-bytes after "
- "set-bytes ret={ret}".
- format(ret=proc.exitstatus))
- ERRORS += 1
- else:
- if data != proc.stdout.getvalue():
- log.error("Data inconsistent after "
- "set-bytes, got:")
- log.error(proc.stdout.getvalue())
- ERRORS += 1
-
- cmd = ((prefix + "--pgid {pg}").
- format(id=osdid, pg=pg).split())
- cmd.append(run.Raw("'{json}'".format(json=JSON)))
- cmd += ("set-bytes {fname}".
- format(fname=file).split())
- proc = remote.run(args=cmd, check_status=False)
- proc.wait()
- if proc.exitstatus != 0:
- log.info("set-bytes failed for object {obj} "
- "in pg {pg} osd.{id} ret={ret}".
- format(obj=basename, pg=pg,
- id=osdid, ret=proc.exitstatus))
- ERRORS += 1
-
- log.info("Test list-attrs get-attr")
- for basename in db.keys():
- file = os.path.join(DATADIR, basename)
- GETNAME = os.path.join(DATADIR, "get")
- SETNAME = os.path.join(DATADIR, "set")
-
- for remote in osds.remotes.iterkeys():
- for role in osds.remotes[remote]:
- if string.find(role, "osd.") != 0:
- continue
- osdid = int(role.split('.')[1])
- if osdid not in pgs:
- continue
-
- for pg, JSON in db[basename]["pg2json"].iteritems():
- if pg in pgs[osdid]:
- cmd = ((prefix + "--pgid {pg}").
- format(id=osdid, pg=pg).split())
- cmd.append(run.Raw("'{json}'".format(json=JSON)))
- cmd += ["list-attrs"]
- proc = remote.run(args=cmd, check_status=False,
- stdout=StringIO(), stderr=StringIO())
- proc.wait()
- if proc.exitstatus != 0:
- log.error("Bad exit status {ret}".
- format(ret=proc.exitstatus))
- ERRORS += 1
- continue
- keys = proc.stdout.getvalue().split()
- values = dict(db[basename]["xattr"])
-
- for key in keys:
- if (key == "_" or
- key == "snapset" or
- key == "hinfo_key"):
- continue
- key = key.strip("_")
- if key not in values:
- log.error("The key {key} should be present".
- format(key=key))
- ERRORS += 1
- continue
- exp = values.pop(key)
- cmd = ((prefix + "--pgid {pg}").
- format(id=osdid, pg=pg).split())
- cmd.append(run.Raw("'{json}'".format(json=JSON)))
- cmd += ("get-attr {key}".
- format(key="_" + key).split())
- proc = remote.run(args=cmd, check_status=False,
- stdout=StringIO())
- proc.wait()
- if proc.exitstatus != 0:
- log.error("get-attr failed with {ret}".
- format(ret=proc.exitstatus))
- ERRORS += 1
- continue
- val = proc.stdout.getvalue()
- if exp != val:
- log.error("For key {key} got value {got} "
- "instead of {expected}".
- format(key=key, got=val,
- expected=exp))
- ERRORS += 1
- if "hinfo_key" in keys:
- cmd_prefix = prefix.format(id=osdid)
- cmd = """
- expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64)
- echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} -
- test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder
- echo $expected | base64 --decode | \
- {prefix} --pgid {pg} '{json}' set-attr {key} -
- test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected
- """.format(prefix=cmd_prefix, pg=pg, json=JSON,
- key="hinfo_key")
- log.debug(cmd)
- proc = remote.run(args=['bash', '-e', '-x',
- '-c', cmd],
- check_status=False,
- stdout=StringIO(),
- stderr=StringIO())
- proc.wait()
- if proc.exitstatus != 0:
- log.error("failed with " +
- str(proc.exitstatus))
- log.error(proc.stdout.getvalue() + " " +
- proc.stderr.getvalue())
- ERRORS += 1
-
- if len(values) != 0:
- log.error("Not all keys found, remaining keys:")
- log.error(values)
-
- log.info("Test pg info")
- for remote in osds.remotes.iterkeys():
- for role in osds.remotes[remote]:
- if string.find(role, "osd.") != 0:
- continue
- osdid = int(role.split('.')[1])
- if osdid not in pgs:
- continue
-
- for pg in pgs[osdid]:
- cmd = ((prefix + "--op info --pgid {pg}").
- format(id=osdid, pg=pg).split())
- proc = remote.run(args=cmd, check_status=False,
- stdout=StringIO())
- proc.wait()
- if proc.exitstatus != 0:
- log.error("Failure of --op info command with {ret}".
- format(proc.exitstatus))
- ERRORS += 1
- continue
- info = proc.stdout.getvalue()
- if not str(pg) in info:
- log.error("Bad data from info: {info}".format(info=info))
- ERRORS += 1
-
- log.info("Test pg logging")
- for remote in osds.remotes.iterkeys():
- for role in osds.remotes[remote]:
- if string.find(role, "osd.") != 0:
- continue
- osdid = int(role.split('.')[1])
- if osdid not in pgs:
- continue
-
- for pg in pgs[osdid]:
- cmd = ((prefix + "--op log --pgid {pg}").
- format(id=osdid, pg=pg).split())
- proc = remote.run(args=cmd, check_status=False,
- stdout=StringIO())
- proc.wait()
- if proc.exitstatus != 0:
- log.error("Getting log failed for pg {pg} "
- "from osd.{id} with {ret}".
- format(pg=pg, id=osdid, ret=proc.exitstatus))
- ERRORS += 1
- continue
- HASOBJ = pg in pgswithobjects
- MODOBJ = "modify" in proc.stdout.getvalue()
- if HASOBJ != MODOBJ:
- log.error("Bad log for pg {pg} from osd.{id}".
- format(pg=pg, id=osdid))
- MSG = (HASOBJ and [""] or ["NOT "])[0]
- log.error("Log should {msg}have a modify entry".
- format(msg=MSG))
- ERRORS += 1
-
- log.info("Test pg export")
- EXP_ERRORS = 0
- for remote in osds.remotes.iterkeys():
- for role in osds.remotes[remote]:
- if string.find(role, "osd.") != 0:
- continue
- osdid = int(role.split('.')[1])
- if osdid not in pgs:
- continue
-
- for pg in pgs[osdid]:
- fpath = os.path.join(DATADIR, "osd{id}.{pg}".
- format(id=osdid, pg=pg))
-
- cmd = ((prefix + "--op export --pgid {pg} --file {file}").
- format(id=osdid, pg=pg, file=fpath))
- proc = remote.run(args=cmd, check_status=False,
- stdout=StringIO())
- proc.wait()
- if proc.exitstatus != 0:
- log.error("Exporting failed for pg {pg} "
- "on osd.{id} with {ret}".
- format(pg=pg, id=osdid, ret=proc.exitstatus))
- EXP_ERRORS += 1
-
- ERRORS += EXP_ERRORS
-
- log.info("Test pg removal")
- RM_ERRORS = 0
- for remote in osds.remotes.iterkeys():
- for role in osds.remotes[remote]:
- if string.find(role, "osd.") != 0:
- continue
- osdid = int(role.split('.')[1])
- if osdid not in pgs:
- continue
-
- for pg in pgs[osdid]:
- cmd = ((prefix + "--op remove --pgid {pg}").
- format(pg=pg, id=osdid))
- proc = remote.run(args=cmd, check_status=False,
- stdout=StringIO())
- proc.wait()
- if proc.exitstatus != 0:
- log.error("Removing failed for pg {pg} "
- "on osd.{id} with {ret}".
- format(pg=pg, id=osdid, ret=proc.exitstatus))
- RM_ERRORS += 1
-
- ERRORS += RM_ERRORS
-
- IMP_ERRORS = 0
- if EXP_ERRORS == 0 and RM_ERRORS == 0:
- log.info("Test pg import")
-
- for remote in osds.remotes.iterkeys():
- for role in osds.remotes[remote]:
- if string.find(role, "osd.") != 0:
- continue
- osdid = int(role.split('.')[1])
- if osdid not in pgs:
- continue
-
- for pg in pgs[osdid]:
- fpath = os.path.join(DATADIR, "osd{id}.{pg}".
- format(id=osdid, pg=pg))
-
- cmd = ((prefix + "--op import --file {file}").
- format(id=osdid, file=fpath))
- proc = remote.run(args=cmd, check_status=False,
- stdout=StringIO())
- proc.wait()
- if proc.exitstatus != 0:
- log.error("Import failed from {file} with {ret}".
- format(file=fpath, ret=proc.exitstatus))
- IMP_ERRORS += 1
- else:
- log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
-
- ERRORS += IMP_ERRORS
-
- if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
- log.info("Restarting OSDs....")
- # They are still look to be up because of setting nodown
- for osd in manager.get_osd_status()['up']:
- manager.revive_osd(osd)
- # Wait for health?
- time.sleep(5)
- # Let scrub after test runs verify consistency of all copies
- log.info("Verify replicated import data")
- objects = range(1, NUM_OBJECTS + 1)
- for i in objects:
- NAME = REP_NAME + "{num}".format(num=i)
- TESTNAME = os.path.join(DATADIR, "gettest")
- REFNAME = os.path.join(DATADIR, NAME)
-
- proc = rados(ctx, cli_remote,
- ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False)
-
- ret = proc.wait()
- if ret != 0:
- log.error("After import, rados get failed with {ret}".
- format(ret=proc.exitstatus))
- ERRORS += 1
- continue
-
- cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME,
- ref=REFNAME)
- proc = cli_remote.run(args=cmd, check_status=False)
- proc.wait()
- if proc.exitstatus != 0:
- log.error("Data comparison failed for {obj}".format(obj=NAME))
- ERRORS += 1
-
- return ERRORS
+++ /dev/null
-"""
-Chef-solo task
-"""
-import logging
-
-from teuthology.orchestra import run
-from teuthology import misc
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Run chef-solo on all nodes.
- """
- log.info('Running chef-solo...')
-
- run.wait(
- ctx.cluster.run(
- args=[
- 'wget',
-# '-q',
- '-O-',
-# 'https://raw.github.com/ceph/ceph-qa-chef/master/solo/solo-from-scratch',
- 'http://git.ceph.com/?p=ceph-qa-chef.git;a=blob_plain;f=solo/solo-from-scratch;hb=HEAD',
- run.Raw('|'),
- 'sh',
- '-x',
- ],
- wait=False,
- )
- )
-
- log.info('Reconnecting after ceph-qa-chef run')
- misc.reconnect(ctx, 10) #Reconnect for ulimit and other ceph-qa-chef changes
-
+++ /dev/null
-"""
-Mount cifs clients. Unmount when finished.
-"""
-import contextlib
-import logging
-import os
-
-from teuthology import misc as teuthology
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Mount/unmount a cifs client.
-
- The config is optional and defaults to mounting on all clients. If
- a config is given, it is expected to be a list of clients to do
- this operation on.
-
- Example that starts smbd and mounts cifs on all nodes::
-
- tasks:
- - ceph:
- - samba:
- - cifs-mount:
- - interactive:
-
- Example that splits smbd and cifs:
-
- tasks:
- - ceph:
- - samba: [samba.0]
- - cifs-mount: [client.0]
- - ceph-fuse: [client.1]
- - interactive:
-
- Example that specifies the share name:
-
- tasks:
- - ceph:
- - ceph-fuse:
- - samba:
- samba.0:
- cephfuse: "{testdir}/mnt.0"
- - cifs-mount:
- client.0:
- share: cephfuse
-
- :param ctx: Context
- :param config: Configuration
- """
- log.info('Mounting cifs clients...')
-
- if config is None:
- config = dict(('client.{id}'.format(id=id_), None)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
- elif isinstance(config, list):
- config = dict((name, None) for name in config)
-
- clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
-
- from .samba import get_sambas
- samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')]
- sambas = list(get_sambas(ctx=ctx, roles=samba_roles))
- (ip, _) = sambas[0][1].ssh.get_transport().getpeername()
- log.info('samba ip: {ip}'.format(ip=ip))
-
- for id_, remote in clients:
- mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
- log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format(
- id=id_, remote=remote,mnt=mnt))
-
- remote.run(
- args=[
- 'mkdir',
- '--',
- mnt,
- ],
- )
-
- rolestr = 'client.{id_}'.format(id_=id_)
- unc = "ceph"
- log.info("config: {c}".format(c=config))
- if config[rolestr] is not None and 'share' in config[rolestr]:
- unc = config[rolestr]['share']
-
- remote.run(
- args=[
- 'sudo',
- 'mount',
- '-t',
- 'cifs',
- '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc),
- '-o',
- 'username=ubuntu,password=ubuntu',
- mnt,
- ],
- )
-
- remote.run(
- args=[
- 'sudo',
- 'chown',
- 'ubuntu:ubuntu',
- '{m}/'.format(m=mnt),
- ],
- )
-
- try:
- yield
- finally:
- log.info('Unmounting cifs clients...')
- for id_, remote in clients:
- remote.run(
- args=[
- 'sudo',
- 'umount',
- mnt,
- ],
- )
- for id_, remote in clients:
- while True:
- try:
- remote.run(
- args=[
- 'rmdir', '--', mnt,
- run.Raw('2>&1'),
- run.Raw('|'),
- 'grep', 'Device or resource busy',
- ],
- )
- import time
- time.sleep(1)
- except Exception:
- break
+++ /dev/null
-"""
-Cram tests
-"""
-import logging
-import os
-
-from teuthology import misc as teuthology
-from teuthology.parallel import parallel
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Run all cram tests from the specified urls on the specified
- clients. Each client runs tests in parallel.
-
- Limitations:
- Tests must have a .t suffix. Tests with duplicate names will
- overwrite each other, so only the last one will run.
-
- For example::
-
- tasks:
- - ceph:
- - cram:
- clients:
- client.0:
- - http://ceph.com/qa/test.t
- - http://ceph.com/qa/test2.t]
- client.1: [http://ceph.com/qa/test.t]
-
- You can also run a list of cram tests on all clients::
-
- tasks:
- - ceph:
- - cram:
- clients:
- all: [http://ceph.com/qa/test.t]
-
- :param ctx: Context
- :param config: Configuration
- """
- assert isinstance(config, dict)
- assert 'clients' in config and isinstance(config['clients'], dict), \
- 'configuration must contain a dictionary of clients'
-
- clients = teuthology.replace_all_with_clients(ctx.cluster,
- config['clients'])
- testdir = teuthology.get_testdir(ctx)
-
- try:
- for client, tests in clients.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
- remote.run(
- args=[
- 'mkdir', '--', client_dir,
- run.Raw('&&'),
- 'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
- run.Raw('&&'),
- '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
- 'install', 'cram',
- ],
- )
- for test in tests:
- log.info('fetching test %s for %s', test, client)
- assert test.endswith('.t'), 'tests must end in .t'
- remote.run(
- args=[
- 'wget', '-nc', '-nv', '-P', client_dir, '--', test,
- ],
- )
-
- with parallel() as p:
- for role in clients.iterkeys():
- p.spawn(_run_tests, ctx, role)
- finally:
- for client, tests in clients.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
- test_files = set([test.rsplit('/', 1)[1] for test in tests])
-
- # remove test files unless they failed
- for test_file in test_files:
- abs_file = os.path.join(client_dir, test_file)
- remote.run(
- args=[
- 'test', '-f', abs_file + '.err',
- run.Raw('||'),
- 'rm', '-f', '--', abs_file,
- ],
- )
-
- # ignore failure since more than one client may
- # be run on a host, and the client dir should be
- # non-empty if the test failed
- remote.run(
- args=[
- 'rm', '-rf', '--',
- '{tdir}/virtualenv'.format(tdir=testdir),
- run.Raw(';'),
- 'rmdir', '--ignore-fail-on-non-empty', client_dir,
- ],
- )
-
-def _run_tests(ctx, role):
- """
- For each role, check to make sure it's a client, then run the cram on that client
-
- :param ctx: Context
- :param role: Roles
- """
- assert isinstance(role, basestring)
- PREFIX = 'client.'
- assert role.startswith(PREFIX)
- id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- ceph_ref = ctx.summary.get('ceph-sha1', 'master')
-
- testdir = teuthology.get_testdir(ctx)
- log.info('Running tests for %s...', role)
- remote.run(
- args=[
- run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
- run.Raw('CEPH_ID="{id}"'.format(id=id_)),
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/virtualenv/bin/cram'.format(tdir=testdir),
- '-v', '--',
- run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)),
- ],
- logger=log.getChild(role),
- )
+++ /dev/null
-#!/usr/bin/env python
-import contextlib
-import logging
-from cStringIO import StringIO
-import textwrap
-from configparser import ConfigParser
-import time
-
-from teuthology.orchestra import run
-from teuthology import misc
-from teuthology.contextutil import nested
-
-log = logging.getLogger(__name__)
-
-DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git'
-DS_STABLE_BRANCHES = ("havana", "grizzly")
-
-is_devstack_node = lambda role: role.startswith('devstack')
-is_osd_node = lambda role: role.startswith('osd')
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- if config is None:
- config = {}
- if not isinstance(config, dict):
- raise TypeError("config must be a dict")
- with nested(lambda: install(ctx=ctx, config=config),
- lambda: smoke(ctx=ctx, config=config),
- ):
- yield
-
-
-@contextlib.contextmanager
-def install(ctx, config):
- """
- Install OpenStack DevStack and configure it to use a Ceph cluster for
- Glance and Cinder.
-
- Requires one node with a role 'devstack'
-
- Since devstack runs rampant on the system it's used on, typically you will
- want to reprovision that machine after using devstack on it.
-
- Also, the default 2GB of RAM that is given to vps nodes is insufficient. I
- recommend 4GB. Downburst can be instructed to give 4GB to a vps node by
- adding this to the yaml:
-
- downburst:
- ram: 4G
-
- This was created using documentation found here:
- https://github.com/openstack-dev/devstack/blob/master/README.md
- http://ceph.com/docs/master/rbd/rbd-openstack/
- """
- if config is None:
- config = {}
- if not isinstance(config, dict):
- raise TypeError("config must be a dict")
-
- devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
- an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
-
- devstack_branch = config.get("branch", "master")
- install_devstack(devstack_node, devstack_branch)
- try:
- configure_devstack_and_ceph(ctx, config, devstack_node, an_osd_node)
- yield
- finally:
- pass
-
-
-def install_devstack(devstack_node, branch="master"):
- log.info("Cloning DevStack repo...")
-
- args = ['git', 'clone', DEVSTACK_GIT_REPO]
- devstack_node.run(args=args)
-
- if branch != "master":
- if branch in DS_STABLE_BRANCHES and not branch.startswith("stable"):
- branch = "stable/" + branch
- log.info("Checking out {branch} branch...".format(branch=branch))
- cmd = "cd devstack && git checkout " + branch
- devstack_node.run(args=cmd)
-
- log.info("Installing DevStack...")
- args = ['cd', 'devstack', run.Raw('&&'), './stack.sh']
- devstack_node.run(args=args)
-
-
-def configure_devstack_and_ceph(ctx, config, devstack_node, ceph_node):
- pool_size = config.get('pool_size', '128')
- create_pools(ceph_node, pool_size)
- distribute_ceph_conf(devstack_node, ceph_node)
- # This is where we would install python-ceph and ceph-common but it appears
- # the ceph task does that for us.
- generate_ceph_keys(ceph_node)
- distribute_ceph_keys(devstack_node, ceph_node)
- secret_uuid = set_libvirt_secret(devstack_node, ceph_node)
- update_devstack_config_files(devstack_node, secret_uuid)
- set_apache_servername(devstack_node)
- # Rebooting is the most-often-used method of restarting devstack services
- misc.reboot(devstack_node)
- start_devstack(devstack_node)
- restart_apache(devstack_node)
-
-
-def create_pools(ceph_node, pool_size):
- log.info("Creating pools on Ceph cluster...")
-
- for pool_name in ['volumes', 'images', 'backups']:
- args = ['ceph', 'osd', 'pool', 'create', pool_name, pool_size]
- ceph_node.run(args=args)
-
-
-def distribute_ceph_conf(devstack_node, ceph_node):
- log.info("Copying ceph.conf to DevStack node...")
-
- ceph_conf_path = '/etc/ceph/ceph.conf'
- ceph_conf = misc.get_file(ceph_node, ceph_conf_path, sudo=True)
- misc.sudo_write_file(devstack_node, ceph_conf_path, ceph_conf)
-
-
-def generate_ceph_keys(ceph_node):
- log.info("Generating Ceph keys...")
-
- ceph_auth_cmds = [
- ['ceph', 'auth', 'get-or-create', 'client.cinder', 'mon',
- 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa
- ['ceph', 'auth', 'get-or-create', 'client.glance', 'mon',
- 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa
- ['ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon',
- 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa
- ]
- for cmd in ceph_auth_cmds:
- ceph_node.run(args=cmd)
-
-
-def distribute_ceph_keys(devstack_node, ceph_node):
- log.info("Copying Ceph keys to DevStack node...")
-
- def copy_key(from_remote, key_name, to_remote, dest_path, owner):
- key_stringio = StringIO()
- from_remote.run(
- args=['ceph', 'auth', 'get-or-create', key_name],
- stdout=key_stringio)
- key_stringio.seek(0)
- misc.sudo_write_file(to_remote, dest_path,
- key_stringio, owner=owner)
- keys = [
- dict(name='client.glance',
- path='/etc/ceph/ceph.client.glance.keyring',
- # devstack appears to just want root:root
- #owner='glance:glance',
- ),
- dict(name='client.cinder',
- path='/etc/ceph/ceph.client.cinder.keyring',
- # devstack appears to just want root:root
- #owner='cinder:cinder',
- ),
- dict(name='client.cinder-backup',
- path='/etc/ceph/ceph.client.cinder-backup.keyring',
- # devstack appears to just want root:root
- #owner='cinder:cinder',
- ),
- ]
- for key_dict in keys:
- copy_key(ceph_node, key_dict['name'], devstack_node,
- key_dict['path'], key_dict.get('owner'))
-
-
-def set_libvirt_secret(devstack_node, ceph_node):
- log.info("Setting libvirt secret...")
-
- cinder_key_stringio = StringIO()
- ceph_node.run(args=['ceph', 'auth', 'get-key', 'client.cinder'],
- stdout=cinder_key_stringio)
- cinder_key = cinder_key_stringio.getvalue().strip()
-
- uuid_stringio = StringIO()
- devstack_node.run(args=['uuidgen'], stdout=uuid_stringio)
- uuid = uuid_stringio.getvalue().strip()
-
- secret_path = '/tmp/secret.xml'
- secret_template = textwrap.dedent("""
- <secret ephemeral='no' private='no'>
- <uuid>{uuid}</uuid>
- <usage type='ceph'>
- <name>client.cinder secret</name>
- </usage>
- </secret>""")
- misc.sudo_write_file(devstack_node, secret_path,
- secret_template.format(uuid=uuid))
- devstack_node.run(args=['sudo', 'virsh', 'secret-define', '--file',
- secret_path])
- devstack_node.run(args=['sudo', 'virsh', 'secret-set-value', '--secret',
- uuid, '--base64', cinder_key])
- return uuid
-
-
-def update_devstack_config_files(devstack_node, secret_uuid):
- log.info("Updating DevStack config files to use Ceph...")
-
- def backup_config(node, file_name, backup_ext='.orig.teuth'):
- node.run(args=['cp', '-f', file_name, file_name + backup_ext])
-
- def update_config(config_name, config_stream, update_dict,
- section='DEFAULT'):
- parser = ConfigParser()
- parser.read_file(config_stream)
- for (key, value) in update_dict.items():
- parser.set(section, key, value)
- out_stream = StringIO()
- parser.write(out_stream)
- out_stream.seek(0)
- return out_stream
-
- updates = [
- dict(name='/etc/glance/glance-api.conf', options=dict(
- default_store='rbd',
- rbd_store_user='glance',
- rbd_store_pool='images',
- show_image_direct_url='True',)),
- dict(name='/etc/cinder/cinder.conf', options=dict(
- volume_driver='cinder.volume.drivers.rbd.RBDDriver',
- rbd_pool='volumes',
- rbd_ceph_conf='/etc/ceph/ceph.conf',
- rbd_flatten_volume_from_snapshot='false',
- rbd_max_clone_depth='5',
- glance_api_version='2',
- rbd_user='cinder',
- rbd_secret_uuid=secret_uuid,
- backup_driver='cinder.backup.drivers.ceph',
- backup_ceph_conf='/etc/ceph/ceph.conf',
- backup_ceph_user='cinder-backup',
- backup_ceph_chunk_size='134217728',
- backup_ceph_pool='backups',
- backup_ceph_stripe_unit='0',
- backup_ceph_stripe_count='0',
- restore_discard_excess_bytes='true',
- )),
- dict(name='/etc/nova/nova.conf', options=dict(
- libvirt_images_type='rbd',
- libvirt_images_rbd_pool='volumes',
- libvirt_images_rbd_ceph_conf='/etc/ceph/ceph.conf',
- rbd_user='cinder',
- rbd_secret_uuid=secret_uuid,
- libvirt_inject_password='false',
- libvirt_inject_key='false',
- libvirt_inject_partition='-2',
- )),
- ]
-
- for update in updates:
- file_name = update['name']
- options = update['options']
- config_str = misc.get_file(devstack_node, file_name, sudo=True)
- config_stream = StringIO(config_str)
- backup_config(devstack_node, file_name)
- new_config_stream = update_config(file_name, config_stream, options)
- misc.sudo_write_file(devstack_node, file_name, new_config_stream)
-
-
-def set_apache_servername(node):
- # Apache complains: "Could not reliably determine the server's fully
- # qualified domain name, using 127.0.0.1 for ServerName"
- # So, let's make sure it knows its name.
- log.info("Setting Apache ServerName...")
-
- hostname = node.hostname
- config_file = '/etc/apache2/conf.d/servername'
- misc.sudo_write_file(node, config_file,
- "ServerName {name}".format(name=hostname))
-
-
-def start_devstack(devstack_node):
- log.info("Patching devstack start script...")
- # This causes screen to start headless - otherwise rejoin-stack.sh fails
- # because there is no terminal attached.
- cmd = "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh"
- devstack_node.run(args=cmd)
-
- log.info("Starting devstack...")
- cmd = "cd devstack && ./rejoin-stack.sh"
- devstack_node.run(args=cmd)
-
- # This was added because I was getting timeouts on Cinder requests - which
- # were trying to access Keystone on port 5000. A more robust way to handle
- # this would be to introduce a wait-loop on devstack_node that checks to
- # see if a service is listening on port 5000.
- log.info("Waiting 30s for devstack to start...")
- time.sleep(30)
-
-
-def restart_apache(node):
- node.run(args=['sudo', '/etc/init.d/apache2', 'restart'], wait=True)
-
-
-@contextlib.contextmanager
-def exercise(ctx, config):
- log.info("Running devstack exercises...")
-
- if config is None:
- config = {}
- if not isinstance(config, dict):
- raise TypeError("config must be a dict")
-
- devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
-
- # TODO: save the log *and* preserve failures
- #devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
-
- try:
- #cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format( # noqa
- # dir=devstack_archive_dir)
- cmd = "cd devstack && ./exercise.sh"
- devstack_node.run(args=cmd, wait=True)
- yield
- finally:
- pass
-
-
-def create_devstack_archive(ctx, devstack_node):
- test_dir = misc.get_testdir(ctx)
- devstack_archive_dir = "{test_dir}/archive/devstack".format(
- test_dir=test_dir)
- devstack_node.run(args="mkdir -p " + devstack_archive_dir)
- return devstack_archive_dir
-
-
-@contextlib.contextmanager
-def smoke(ctx, config):
- log.info("Running a basic smoketest...")
-
- devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
- an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
-
- try:
- create_volume(devstack_node, an_osd_node, 'smoke0', 1)
- yield
- finally:
- pass
-
-
-def create_volume(devstack_node, ceph_node, vol_name, size):
- """
- :param size: The size of the volume, in GB
- """
- size = str(size)
- log.info("Creating a {size}GB volume named {name}...".format(
- name=vol_name,
- size=size))
- args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create',
- '--display-name', vol_name, size]
- out_stream = StringIO()
- devstack_node.run(args=args, stdout=out_stream, wait=True)
- vol_info = parse_os_table(out_stream.getvalue())
- log.debug("Volume info: %s", str(vol_info))
-
- out_stream = StringIO()
- try:
- ceph_node.run(args="rbd --id cinder ls -l volumes", stdout=out_stream,
- wait=True)
- except run.CommandFailedError:
- log.debug("Original rbd call failed; retrying without '--id cinder'")
- ceph_node.run(args="rbd ls -l volumes", stdout=out_stream,
- wait=True)
-
- assert vol_info['id'] in out_stream.getvalue(), \
- "Volume not found on Ceph cluster"
- assert vol_info['size'] == size, \
- "Volume size on Ceph cluster is different than specified"
- return vol_info['id']
-
-
-def parse_os_table(table_str):
- out_dict = dict()
- for line in table_str.split('\n'):
- if line.startswith('|'):
- items = line.split()
- out_dict[items[1]] = items[3]
- return out_dict
+++ /dev/null
-"""
-Raise exceptions on osd coredumps or test err directories
-"""
-import contextlib
-import logging
-import time
-from teuthology.orchestra import run
-
-import ceph_manager
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Die if {testdir}/err exists or if an OSD dumps core
- """
- if config is None:
- config = {}
-
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
- log.info('num_osds is %s' % num_osds)
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- while len(manager.get_osd_status()['up']) < num_osds:
- time.sleep(10)
-
- testdir = teuthology.get_testdir(ctx)
-
- while True:
- for i in range(num_osds):
- (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys()
- p = osd_remote.run(
- args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ],
- wait=True,
- check_status=False,
- )
- exit_status = p.exitstatus
-
- if exit_status == 0:
- log.info("osd %d has an error" % i)
- raise Exception("osd %d error" % i)
-
- log_path = '/var/log/ceph/osd.%d.log' % (i)
-
- p = osd_remote.run(
- args = [
- 'tail', '-1', log_path,
- run.Raw('|'),
- 'grep', '-q', 'end dump'
- ],
- wait=True,
- check_status=False,
- )
- exit_status = p.exitstatus
-
- if exit_status == 0:
- log.info("osd %d dumped core" % i)
- raise Exception("osd %d dumped core" % i)
-
- time.sleep(5)
+++ /dev/null
-"""
-Special case divergence test
-"""
-import logging
-import time
-
-import ceph_manager
-from teuthology import misc as teuthology
-from util.rados import rados
-
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Test handling of divergent entries with prior_version
- prior to log_tail
-
- config: none
-
- Requires 3 osds.
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'divergent_priors task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
- ctx.manager = manager
-
- while len(manager.get_osd_status()['up']) < 3:
- time.sleep(10)
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.raw_cluster_cmd('osd', 'set', 'noout')
- manager.raw_cluster_cmd('osd', 'set', 'noin')
- manager.raw_cluster_cmd('osd', 'set', 'nodown')
- manager.wait_for_clean()
-
- # something that is always there
- dummyfile = '/etc/fstab'
- dummyfile2 = '/etc/resolv.conf'
-
- # create 1 pg pool
- log.info('creating foo')
- manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1')
-
- osds = [0, 1, 2]
- for i in osds:
- manager.set_config(i, osd_min_pg_log_entries=1)
-
- # determine primary
- divergent = manager.get_pg_primary('foo', 0)
- log.info("primary and soon to be divergent is %d", divergent)
- non_divergent = [0,1,2]
- non_divergent.remove(divergent)
-
- log.info('writing initial objects')
- # write 1000 objects
- for i in range(1000):
- rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
-
- manager.wait_for_clean()
-
- # blackhole non_divergent
- log.info("blackholing osds %s", str(non_divergent))
- for i in non_divergent:
- manager.set_config(i, filestore_blackhole='')
-
- # write 1 (divergent) object
- log.info('writing divergent object existing_0')
- rados(
- ctx, mon, ['-p', 'foo', 'put', 'existing_0', dummyfile2],
- wait=False)
- time.sleep(10)
- mon.run(
- args=['killall', '-9', 'rados'],
- wait=True,
- check_status=False)
-
- # kill all the osds
- log.info('killing all the osds')
- for i in osds:
- manager.kill_osd(i)
- for i in osds:
- manager.mark_down_osd(i)
- for i in osds:
- manager.mark_out_osd(i)
-
- # bring up non-divergent
- log.info("bringing up non_divergent %s", str(non_divergent))
- for i in non_divergent:
- manager.revive_osd(i)
- for i in non_divergent:
- manager.mark_in_osd(i)
-
- log.info('making log long to prevent backfill')
- for i in non_divergent:
- manager.set_config(i, osd_min_pg_log_entries=100000)
-
- # write 1 non-divergent object (ensure that old divergent one is divergent)
- log.info('writing non-divergent object existing_1')
- rados(ctx, mon, ['-p', 'foo', 'put', 'existing_1', dummyfile2])
-
- manager.wait_for_recovery()
-
- # ensure no recovery
- log.info('delay recovery')
- for i in non_divergent:
- manager.set_config(i, osd_recovery_delay_start=100000)
-
- # bring in our divergent friend
- log.info("revive divergent %d", divergent)
- manager.revive_osd(divergent)
-
- while len(manager.get_osd_status()['up']) < 3:
- time.sleep(10)
-
- log.info('delay recovery divergent')
- manager.set_config(divergent, osd_recovery_delay_start=100000)
- log.info('mark divergent in')
- manager.mark_in_osd(divergent)
-
- log.info('wait for peering')
- rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
-
- log.info("killing divergent %d", divergent)
- manager.kill_osd(divergent)
- log.info("reviving divergent %d", divergent)
- manager.revive_osd(divergent)
-
- log.info('allowing recovery')
- for i in non_divergent:
- manager.set_config(i, osd_recovery_delay_start=0)
-
- log.info('reading existing_0')
- exit_status = rados(ctx, mon,
- ['-p', 'foo', 'get', 'existing_0',
- '-o', '/tmp/existing'])
- assert exit_status is 0
- log.info("success")
+++ /dev/null
-"""
-Dump_stuck command
-"""
-import logging
-import re
-import time
-
-import ceph_manager
-from teuthology import misc as teuthology
-
-
-log = logging.getLogger(__name__)
-
-def check_stuck(manager, num_inactive, num_unclean, num_stale, timeout=10):
- """
- Do checks. Make sure get_stuck_pgs return the right amout of information, then
- extract health information from the raw_cluster_cmd and compare the results with
- values passed in. This passes if all asserts pass.
-
- :param num_manager: Ceph manager
- :param num_inactive: number of inaactive pages that are stuck
- :param num_unclean: number of unclean pages that are stuck
- :paran num_stale: number of stale pages that are stuck
- :param timeout: timeout value for get_stuck_pgs calls
- """
- inactive = manager.get_stuck_pgs('inactive', timeout)
- assert len(inactive) == num_inactive
- unclean = manager.get_stuck_pgs('unclean', timeout)
- assert len(unclean) == num_unclean
- stale = manager.get_stuck_pgs('stale', timeout)
- assert len(stale) == num_stale
-
- # check health output as well
- health = manager.raw_cluster_cmd('health')
- log.debug('ceph health is: %s', health)
- if num_inactive > 0:
- m = re.search('(\d+) pgs stuck inactive', health)
- assert int(m.group(1)) == num_inactive
- if num_unclean > 0:
- m = re.search('(\d+) pgs stuck unclean', health)
- assert int(m.group(1)) == num_unclean
- if num_stale > 0:
- m = re.search('(\d+) pgs stuck stale', health)
- assert int(m.group(1)) == num_stale
-
-def task(ctx, config):
- """
- Test the dump_stuck command.
-
- :param ctx: Context
- :param config: Configuration
- """
- assert config is None, \
- 'dump_stuck requires no configuration'
- assert teuthology.num_instances_of_type(ctx.cluster, 'osd') == 2, \
- 'dump_stuck requires exactly 2 osds'
-
- timeout = 60
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.wait_for_clean(timeout)
-
- manager.raw_cluster_cmd('tell', 'mon.0', 'injectargs', '--',
-# '--mon-osd-report-timeout 90',
- '--mon-pg-stuck-threshold 10')
-
- check_stuck(
- manager,
- num_inactive=0,
- num_unclean=0,
- num_stale=0,
- )
- num_pgs = manager.get_num_pgs()
-
- manager.mark_out_osd(0)
- time.sleep(timeout)
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.wait_for_recovery(timeout)
-
- check_stuck(
- manager,
- num_inactive=0,
- num_unclean=num_pgs,
- num_stale=0,
- )
-
- manager.mark_in_osd(0)
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.wait_for_clean(timeout)
-
- check_stuck(
- manager,
- num_inactive=0,
- num_unclean=0,
- num_stale=0,
- )
-
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'):
- manager.kill_osd(id_)
- manager.mark_down_osd(id_)
-
- starttime = time.time()
- done = False
- while not done:
- try:
- check_stuck(
- manager,
- num_inactive=0,
- num_unclean=0,
- num_stale=num_pgs,
- )
- done = True
- except AssertionError:
- # wait up to 15 minutes to become stale
- if time.time() - starttime > 900:
- raise
-
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'):
- manager.revive_osd(id_)
- manager.mark_in_osd(id_)
- while True:
- try:
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- break
- except Exception:
- log.exception('osds must not be started yet, waiting...')
- time.sleep(1)
- manager.wait_for_clean(timeout)
-
- check_stuck(
- manager,
- num_inactive=0,
- num_unclean=0,
- num_stale=0,
- )
+++ /dev/null
-"""
-Lost_unfound
-"""
-import logging
-import ceph_manager
-from teuthology import misc as teuthology
-from util.rados import rados
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Test handling of lost objects on an ec pool.
-
- A pretty rigid cluster is brought up andtested by this task
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'lost_unfound task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
- manager.wait_for_clean()
-
-
- pool = manager.create_pool_with_unique_name(
- ec_pool=True,
- ec_m=2,
- ec_k=2)
-
- # something that is always there
- dummyfile = '/etc/fstab'
-
- # kludge to make sure they get a map
- rados(ctx, mon, ['-p', pool, 'put', 'dummy', dummyfile])
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.wait_for_recovery()
-
- # create old objects
- for f in range(1, 10):
- rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', pool, 'rm', 'existed_%d' % f])
-
- # delay recovery, and make the pg log very long (to prevent backfill)
- manager.raw_cluster_cmd(
- 'tell', 'osd.1',
- 'injectargs',
- '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
- )
-
- manager.kill_osd(0)
- manager.mark_down_osd(0)
- manager.kill_osd(3)
- manager.mark_down_osd(3)
-
- for f in range(1, 10):
- rados(ctx, mon, ['-p', pool, 'put', 'new_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile])
-
- # take out osd.1 and a necessary shard of those objects.
- manager.kill_osd(1)
- manager.mark_down_osd(1)
- manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
- manager.revive_osd(0)
- manager.wait_till_osd_is_up(0)
- manager.revive_osd(3)
- manager.wait_till_osd_is_up(3)
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
- manager.wait_till_active()
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
-
- # verify that there are unfound objects
- unfound = manager.get_num_unfound_objects()
- log.info("there are %d unfound objects" % unfound)
- assert unfound
-
- # mark stuff lost
- pgs = manager.get_pg_stats()
- for pg in pgs:
- if pg['stat_sum']['num_objects_unfound'] > 0:
- # verify that i can list them direct from the osd
- log.info('listing missing/lost in %s state %s', pg['pgid'],
- pg['state']);
- m = manager.list_pg_missing(pg['pgid'])
- log.info('%s' % m)
- assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
-
- log.info("reverting unfound in %s", pg['pgid'])
- manager.raw_cluster_cmd('pg', pg['pgid'],
- 'mark_unfound_lost', 'delete')
- else:
- log.info("no unfound in %s", pg['pgid'])
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
- manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
- manager.raw_cluster_cmd('tell', 'osd.3', 'debug', 'kick_recovery_wq', '5')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
- manager.wait_for_recovery()
-
- # verify result
- for f in range(1, 10):
- err = rados(ctx, mon, ['-p', pool, 'get', 'new_%d' % f, '-'])
- assert err
- err = rados(ctx, mon, ['-p', pool, 'get', 'existed_%d' % f, '-'])
- assert err
- err = rados(ctx, mon, ['-p', pool, 'get', 'existing_%d' % f, '-'])
- assert err
-
- # see if osd.1 can cope
- manager.revive_osd(1)
- manager.wait_till_osd_is_up(1)
- manager.wait_for_clean()
+++ /dev/null
-"""
-Filestore/filejournal handler
-"""
-import logging
-from teuthology.orchestra import run
-import random
-
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Test filestore/filejournal handling of non-idempotent events.
-
- Currently this is a kludge; we require the ceph task preceeds us just
- so that we get the tarball installed to run the test binary.
-
- :param ctx: Context
- :param config: Configuration
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- # just use the first client...
- client = clients[0];
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
-
- testdir = teuthology.get_testdir(ctx)
-
- dir = '%s/data/test.%s' % (testdir, client)
-
- seed = str(int(random.uniform(1,100)))
-
- try:
- log.info('creating a working dir')
- remote.run(args=['mkdir', dir])
- remote.run(
- args=[
- 'cd', dir,
- run.Raw('&&'),
- 'wget','-q', '-Orun_seed_to.sh',
- 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to.sh;hb=HEAD',
- run.Raw('&&'),
- 'wget','-q', '-Orun_seed_to_range.sh',
- 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to_range.sh;hb=HEAD',
- run.Raw('&&'),
- 'chmod', '+x', 'run_seed_to.sh', 'run_seed_to_range.sh',
- ]);
-
- log.info('running a series of tests')
- proc = remote.run(
- args=[
- 'cd', dir,
- run.Raw('&&'),
- './run_seed_to_range.sh', seed, '50', '300',
- ],
- wait=False,
- check_status=False)
- result = proc.wait();
-
- if result != 0:
- remote.run(
- args=[
- 'cp', '-a', dir, '{tdir}/archive/idempotent_failure'.format(tdir=testdir),
- ])
- raise Exception("./run_seed_to_range.sh errored out")
-
- finally:
- remote.run(args=[
- 'rm', '-rf', '--', dir
- ])
-
+++ /dev/null
-"""
-Mount/unmount a ``kernel`` client.
-"""
-import contextlib
-import logging
-import os
-
-from teuthology import misc as teuthology
-from util.kclient import write_secret_file
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Mount/unmount a ``kernel`` client.
-
- The config is optional and defaults to mounting on all clients. If
- a config is given, it is expected to be a list of clients to do
- this operation on. This lets you e.g. set up one client with
- ``ceph-fuse`` and another with ``kclient``.
-
- Example that mounts all clients::
-
- tasks:
- - ceph:
- - kclient:
- - interactive:
-
- Example that uses both ``kclient` and ``ceph-fuse``::
-
- tasks:
- - ceph:
- - ceph-fuse: [client.0]
- - kclient: [client.1]
- - interactive:
-
- :param ctx: Context
- :param config: Configuration
- """
- log.info('Mounting kernel clients...')
- assert config is None or isinstance(config, list), \
- "task kclient got invalid config"
-
- if config is None:
- config = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- clients = list(teuthology.get_clients(ctx=ctx, roles=config))
-
- testdir = teuthology.get_testdir(ctx)
-
- for id_, remote in clients:
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format(
- id=id_, remote=remote, mnt=mnt))
-
- # figure mon ips
- remotes_and_roles = ctx.cluster.remotes.items()
- roles = [roles for (remote_, roles) in remotes_and_roles]
- ips = [host for (host, port) in (remote_.ssh.get_transport().getpeername() for (remote_, roles) in remotes_and_roles)]
- mons = teuthology.get_mons(roles, ips).values()
-
- keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
- secret = '{tdir}/data/client.{id}.secret'.format(tdir=testdir, id=id_)
- write_secret_file(ctx, remote, 'client.{id}'.format(id=id_),
- keyring, secret)
-
- remote.run(
- args=[
- 'mkdir',
- '--',
- mnt,
- ],
- )
-
- remote.run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- '/sbin/mount.ceph',
- '{mons}:/'.format(mons=','.join(mons)),
- mnt,
- '-v',
- '-o',
- 'name={id},secretfile={secret}'.format(id=id_,
- secret=secret),
- ],
- )
-
- try:
- yield
- finally:
- log.info('Unmounting kernel clients...')
- for id_, remote in clients:
- log.debug('Unmounting client client.{id}...'.format(id=id_))
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- remote.run(
- args=[
- 'sudo',
- 'umount',
- mnt,
- ],
- )
- remote.run(
- args=[
- 'rmdir',
- '--',
- mnt,
- ],
- )
+++ /dev/null
-"""
-locktests
-"""
-import logging
-
-from teuthology.orchestra import run
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Run locktests, from the xfstests suite, on the given
- clients. Whether the clients are ceph-fuse or kernel does not
- matter, and the two clients can refer to the same mount.
-
- The config is a list of two clients to run the locktest on. The
- first client will be the host.
-
- For example:
- tasks:
- - ceph:
- - ceph-fuse: [client.0, client.1]
- - locktest:
- [client.0, client.1]
-
- This task does not yield; there would be little point.
-
- :param ctx: Context
- :param config: Configuration
- """
-
- assert isinstance(config, list)
- log.info('fetching and building locktests...')
- (host,) = ctx.cluster.only(config[0]).remotes
- (client,) = ctx.cluster.only(config[1]).remotes
- ( _, _, host_id) = config[0].partition('.')
- ( _, _, client_id) = config[1].partition('.')
- testdir = teuthology.get_testdir(ctx)
- hostmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=host_id)
- clientmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=client_id)
-
- try:
- for client_name in config:
- log.info('building on {client_}'.format(client_=client_name))
- ctx.cluster.only(client_name).run(
- args=[
- # explicitly does not support multiple autotest tasks
- # in a single run; the result archival would conflict
- 'mkdir', '{tdir}/archive/locktest'.format(tdir=testdir),
- run.Raw('&&'),
- 'mkdir', '{tdir}/locktest'.format(tdir=testdir),
- run.Raw('&&'),
- 'wget',
- '-nv',
- 'https://raw.github.com/gregsfortytwo/xfstests-ceph/master/src/locktest.c',
- '-O', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
- run.Raw('&&'),
- 'g++', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
- '-o', '{tdir}/locktest/locktest'.format(tdir=testdir)
- ],
- logger=log.getChild('locktest_client.{id}'.format(id=client_name)),
- )
-
- log.info('built locktest on each client')
-
- host.run(args=['sudo', 'touch',
- '{mnt}/locktestfile'.format(mnt=hostmnt),
- run.Raw('&&'),
- 'sudo', 'chown', 'ubuntu.ubuntu',
- '{mnt}/locktestfile'.format(mnt=hostmnt)
- ]
- )
-
- log.info('starting on host')
- hostproc = host.run(
- args=[
- '{tdir}/locktest/locktest'.format(tdir=testdir),
- '-p', '6788',
- '-d',
- '{mnt}/locktestfile'.format(mnt=hostmnt),
- ],
- wait=False,
- logger=log.getChild('locktest.host'),
- )
- log.info('starting on client')
- (_,_,hostaddr) = host.name.partition('@')
- clientproc = client.run(
- args=[
- '{tdir}/locktest/locktest'.format(tdir=testdir),
- '-p', '6788',
- '-d',
- '-h', hostaddr,
- '{mnt}/locktestfile'.format(mnt=clientmnt),
- ],
- logger=log.getChild('locktest.client'),
- wait=False
- )
-
- hostresult = hostproc.wait()
- clientresult = clientproc.wait()
- if (hostresult != 0) or (clientresult != 0):
- raise Exception("Did not pass locking test!")
- log.info('finished locktest executable with results {r} and {s}'. \
- format(r=hostresult, s=clientresult))
-
- finally:
- log.info('cleaning up host dir')
- host.run(
- args=[
- 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir),
- run.Raw('&&'),
- 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
- run.Raw('&&'),
- 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir),
- run.Raw('&&'),
- 'rmdir', '{tdir}/locktest'
- ],
- logger=log.getChild('.{id}'.format(id=config[0])),
- )
- log.info('cleaning up client dir')
- client.run(
- args=[
- 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir),
- run.Raw('&&'),
- 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
- run.Raw('&&'),
- 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir),
- run.Raw('&&'),
- 'rmdir', '{tdir}/locktest'.format(tdir=testdir)
- ],
- logger=log.getChild('.{id}'.format(\
- id=config[1])),
- )
+++ /dev/null
-"""
-Lost_unfound
-"""
-import logging
-import time
-import ceph_manager
-from teuthology import misc as teuthology
-from util.rados import rados
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Test handling of lost objects.
-
- A pretty rigid cluseter is brought up andtested by this task
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'lost_unfound task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- while len(manager.get_osd_status()['up']) < 3:
- time.sleep(10)
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_clean()
-
- # something that is always there
- dummyfile = '/etc/fstab'
-
- # take an osd out until the very end
- manager.kill_osd(2)
- manager.mark_down_osd(2)
- manager.mark_out_osd(2)
-
- # kludge to make sure they get a map
- rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.wait_for_recovery()
-
- # create old objects
- for f in range(1, 10):
- rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', 'data', 'rm', 'existed_%d' % f])
-
- # delay recovery, and make the pg log very long (to prevent backfill)
- manager.raw_cluster_cmd(
- 'tell', 'osd.1',
- 'injectargs',
- '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
- )
-
- manager.kill_osd(0)
- manager.mark_down_osd(0)
-
- for f in range(1, 10):
- rados(ctx, mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
-
- # bring osd.0 back up, let it peer, but don't replicate the new
- # objects...
- log.info('osd.0 command_args is %s' % 'foo')
- log.info(ctx.daemons.get_daemon('osd', 0).command_args)
- ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([
- '--osd-recovery-delay-start', '1000'
- ])
- manager.revive_osd(0)
- manager.mark_in_osd(0)
- manager.wait_till_osd_is_up(0)
-
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.wait_till_active()
-
- # take out osd.1 and the only copy of those objects.
- manager.kill_osd(1)
- manager.mark_down_osd(1)
- manager.mark_out_osd(1)
- manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
-
- # bring up osd.2 so that things would otherwise, in theory, recovery fully
- manager.revive_osd(2)
- manager.mark_in_osd(2)
- manager.wait_till_osd_is_up(2)
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_till_active()
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
-
- # verify that there are unfound objects
- unfound = manager.get_num_unfound_objects()
- log.info("there are %d unfound objects" % unfound)
- assert unfound
-
- # mark stuff lost
- pgs = manager.get_pg_stats()
- for pg in pgs:
- if pg['stat_sum']['num_objects_unfound'] > 0:
- primary = 'osd.%d' % pg['acting'][0]
-
- # verify that i can list them direct from the osd
- log.info('listing missing/lost in %s state %s', pg['pgid'],
- pg['state']);
- m = manager.list_pg_missing(pg['pgid'])
- #log.info('%s' % m)
- assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
- num_unfound=0
- for o in m['objects']:
- if len(o['locations']) == 0:
- num_unfound += 1
- assert m['num_unfound'] == num_unfound
-
- log.info("reverting unfound in %s on %s", pg['pgid'], primary)
- manager.raw_cluster_cmd('pg', pg['pgid'],
- 'mark_unfound_lost', 'revert')
- else:
- log.info("no unfound in %s", pg['pgid'])
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
- manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_recovery()
-
- # verify result
- for f in range(1, 10):
- err = rados(ctx, mon, ['-p', 'data', 'get', 'new_%d' % f, '-'])
- assert err
- err = rados(ctx, mon, ['-p', 'data', 'get', 'existed_%d' % f, '-'])
- assert err
- err = rados(ctx, mon, ['-p', 'data', 'get', 'existing_%d' % f, '-'])
- assert not err
-
- # see if osd.1 can cope
- manager.revive_osd(1)
- manager.mark_in_osd(1)
- manager.wait_till_osd_is_up(1)
- manager.wait_for_clean()
+++ /dev/null
-"""
-Force pg creation on all osds
-"""
-from teuthology import misc as teuthology
-from teuthology.orchestra import run
-import logging
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Create the specified number of pools and write 16 objects to them (thereby forcing
- the PG creation on each OSD). This task creates pools from all the clients,
- in parallel. It is easy to add other daemon types which have the appropriate
- permissions, but I don't think anything else does.
- The config is just the number of pools to create. I recommend setting
- "mon create pg interval" to a very low value in your ceph config to speed
- this up.
-
- You probably want to do this to look at memory consumption, and
- maybe to test how performance changes with the number of PGs. For example:
-
- tasks:
- - ceph:
- config:
- mon:
- mon create pg interval: 1
- - manypools: 3000
- - radosbench:
- clients: [client.0]
- time: 360
- """
-
- log.info('creating {n} pools'.format(n=config))
-
- poolnum = int(config)
- creator_remotes = []
- client_roles = teuthology.all_roles_of_type(ctx.cluster, 'client')
- log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles))
- for role in client_roles:
- log.info('role={role_}'.format(role_=role))
- (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys()
- creator_remotes.append((creator_remote, 'client.{id}'.format(id=role)))
-
- remaining_pools = poolnum
- poolprocs=dict()
- while (remaining_pools > 0):
- log.info('{n} pools remaining to create'.format(n=remaining_pools))
- for remote, role_ in creator_remotes:
- poolnum = remaining_pools
- remaining_pools -= 1
- if remaining_pools < 0:
- continue
- log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
- proc = remote.run(
- args=[
- 'rados',
- '--name', role_,
- 'mkpool', 'pool{num}'.format(num=poolnum), '-1',
- run.Raw('&&'),
- 'rados',
- '--name', role_,
- '--pool', 'pool{num}'.format(num=poolnum),
- 'bench', '0', 'write', '-t', '16', '--block-size', '1'
- ],
- wait = False
- )
- log.info('waiting for pool and object creates')
- poolprocs[remote] = proc
-
- run.wait(poolprocs.itervalues())
-
- log.info('created all {n} pools and wrote 16 objects to each'.format(n=poolnum))
+++ /dev/null
-
-import logging
-import contextlib
-import time
-import ceph_manager
-from teuthology import misc
-from teuthology.orchestra.run import CommandFailedError, Raw
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Go through filesystem creation with a synthetic failure in an MDS
- in its 'up:creating' state, to exercise the retry behaviour.
- """
- # Grab handles to the teuthology objects of interest
- mdslist = list(misc.all_roles_of_type(ctx.cluster, 'mds'))
- if len(mdslist) != 1:
- # Require exactly one MDS, the code path for creation failure when
- # a standby is available is different
- raise RuntimeError("This task requires exactly one MDS")
-
- mds_id = mdslist[0]
- (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys()
- manager = ceph_manager.CephManager(
- mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'),
- )
-
- # Stop the MDS and reset the filesystem so that next start will go into CREATING
- mds = ctx.daemons.get_daemon('mds', mds_id)
- mds.stop()
- data_pool_id = manager.get_pool_num("data")
- md_pool_id = manager.get_pool_num("metadata")
- manager.raw_cluster_cmd_result('mds', 'newfs', md_pool_id.__str__(), data_pool_id.__str__(),
- '--yes-i-really-mean-it')
-
- # Start the MDS with mds_kill_create_at set, it will crash during creation
- mds.restart_with_args(["--mds_kill_create_at=1"])
- try:
- mds.wait_for_exit()
- except CommandFailedError as e:
- if e.exitstatus == 1:
- log.info("MDS creation killed as expected")
- else:
- log.error("Unexpected status code %s" % e.exitstatus)
- raise
-
- # Since I have intentionally caused a crash, I will clean up the resulting core
- # file to avoid task.internal.coredump seeing it as a failure.
- log.info("Removing core file from synthetic MDS failure")
- mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))])
-
- # It should have left the MDS map state still in CREATING
- status = manager.get_mds_status(mds_id)
- assert status['state'] == 'up:creating'
-
- # Start the MDS again without the kill flag set, it should proceed with creation successfully
- mds.restart()
-
- # Wait for state ACTIVE
- t = 0
- create_timeout = 120
- while True:
- status = manager.get_mds_status(mds_id)
- if status['state'] == 'up:active':
- log.info("MDS creation completed successfully")
- break
- elif status['state'] == 'up:creating':
- log.info("MDS still in creating state")
- if t > create_timeout:
- log.error("Creating did not complete within %ss" % create_timeout)
- raise RuntimeError("Creating did not complete within %ss" % create_timeout)
- t += 1
- time.sleep(1)
- else:
- log.error("Unexpected MDS state: %s" % status['state'])
- assert(status['state'] in ['up:active', 'up:creating'])
-
- # The system should be back up in a happy healthy state, go ahead and run any further tasks
- # inside this context.
- yield
+++ /dev/null
-"""
-Thrash mds by simulating failures
-"""
-import logging
-import contextlib
-import ceph_manager
-import random
-import time
-from gevent.greenlet import Greenlet
-from gevent.event import Event
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-
-class MDSThrasher(Greenlet):
- """
- MDSThrasher::
-
- The MDSThrasher thrashes MDSs during execution of other tasks (workunits, etc).
-
- The config is optional. Many of the config parameters are a a maximum value
- to use when selecting a random value from a range. To always use the maximum
- value, set no_random to true. The config is a dict containing some or all of:
-
- seed: [no default] seed the random number generator
-
- randomize: [default: true] enables randomization and use the max/min values
-
- max_thrash: [default: 1] the maximum number of MDSs that will be thrashed at
- any given time.
-
- max_thrash_delay: [default: 30] maximum number of seconds to delay before
- thrashing again.
-
- max_revive_delay: [default: 10] maximum number of seconds to delay before
- bringing back a thrashed MDS
-
- thrash_in_replay: [default: 0.0] likelihood that the MDS will be thrashed
- during replay. Value should be between 0.0 and 1.0
-
- max_replay_thrash_delay: [default: 4] maximum number of seconds to delay while in
- the replay state before thrashing
-
- thrash_weights: allows specific MDSs to be thrashed more/less frequently. This option
- overrides anything specified by max_thrash. This option is a dict containing
- mds.x: weight pairs. For example, [mds.a: 0.7, mds.b: 0.3, mds.c: 0.0]. Each weight
- is a value from 0.0 to 1.0. Any MDSs not specified will be automatically
- given a weight of 0.0. For a given MDS, by default the trasher delays for up
- to max_thrash_delay, trashes, waits for the MDS to recover, and iterates. If a non-zero
- weight is specified for an MDS, for each iteration the thrasher chooses whether to thrash
- during that iteration based on a random value [0-1] not exceeding the weight of that MDS.
-
- Examples::
-
-
- The following example sets the likelihood that mds.a will be thrashed
- to 80%, mds.b to 20%, and other MDSs will not be thrashed. It also sets the
- likelihood that an MDS will be thrashed in replay to 40%.
- Thrash weights do not have to sum to 1.
-
- tasks:
- - ceph:
- - mds_thrash:
- thrash_weights:
- - mds.a: 0.8
- - mds.b: 0.2
- thrash_in_replay: 0.4
- - ceph-fuse:
- - workunit:
- clients:
- all: [suites/fsx.sh]
-
- The following example disables randomization, and uses the max delay values:
-
- tasks:
- - ceph:
- - mds_thrash:
- max_thrash_delay: 10
- max_revive_delay: 1
- max_replay_thrash_delay: 4
-
- """
-
- def __init__(self, ctx, manager, config, logger, failure_group, weight):
- super(MDSThrasher, self).__init__()
-
- self.ctx = ctx
- self.manager = manager
- assert self.manager.is_clean()
-
- self.stopping = Event()
- self.logger = logger
- self.config = config
-
- self.randomize = bool(self.config.get('randomize', True))
- self.max_thrash_delay = float(self.config.get('thrash_delay', 30.0))
- self.thrash_in_replay = float(self.config.get('thrash_in_replay', False))
- assert self.thrash_in_replay >= 0.0 and self.thrash_in_replay <= 1.0, 'thrash_in_replay ({v}) must be between [0.0, 1.0]'.format(
- v=self.thrash_in_replay)
-
- self.max_replay_thrash_delay = float(self.config.get('max_replay_thrash_delay', 4.0))
-
- self.max_revive_delay = float(self.config.get('max_revive_delay', 10.0))
-
- self.failure_group = failure_group
- self.weight = weight
-
- def _run(self):
- try:
- self.do_thrash()
- except:
- # Log exceptions here so we get the full backtrace (it's lost
- # by the time someone does a .get() on this greenlet)
- self.logger.exception("Exception in do_thrash:")
- raise
-
- def log(self, x):
- """Write data to logger assigned to this MDThrasher"""
- self.logger.info(x)
-
- def stop(self):
- self.stopping.set()
-
- def do_thrash(self):
- """
- Perform the random thrashing action
- """
- self.log('starting mds_do_thrash for failure group: ' + ', '.join(
- ['mds.{_id}'.format(_id=_f) for _f in self.failure_group]))
- while not self.stopping.is_set():
- delay = self.max_thrash_delay
- if self.randomize:
- delay = random.randrange(0.0, self.max_thrash_delay)
-
- if delay > 0.0:
- self.log('waiting for {delay} secs before thrashing'.format(delay=delay))
- self.stopping.wait(delay)
- if self.stopping.is_set():
- continue
-
- skip = random.randrange(0.0, 1.0)
- if self.weight < 1.0 and skip > self.weight:
- self.log('skipping thrash iteration with skip ({skip}) > weight ({weight})'.format(skip=skip,
- weight=self.weight))
- continue
-
- # find the active mds in the failure group
- statuses = [self.manager.get_mds_status(m) for m in self.failure_group]
- actives = filter(lambda s: s and s['state'] == 'up:active', statuses)
- assert len(actives) == 1, 'Can only have one active in a failure group'
-
- active_mds = actives[0]['name']
- active_rank = actives[0]['rank']
-
- self.log('kill mds.{id} (rank={r})'.format(id=active_mds, r=active_rank))
- self.manager.kill_mds_by_rank(active_rank)
-
- # wait for mon to report killed mds as crashed
- last_laggy_since = None
- itercount = 0
- while True:
- failed = self.manager.get_mds_status_all()['failed']
- status = self.manager.get_mds_status(active_mds)
- if not status:
- break
- if 'laggy_since' in status:
- last_laggy_since = status['laggy_since']
- break
- if any([(f == active_mds) for f in failed]):
- break
- self.log(
- 'waiting till mds map indicates mds.{_id} is laggy/crashed, in failed state, or mds.{_id} is removed from mdsmap'.format(
- _id=active_mds))
- itercount = itercount + 1
- if itercount > 10:
- self.log('mds map: {status}'.format(status=self.manager.get_mds_status_all()))
- time.sleep(2)
- if last_laggy_since:
- self.log(
- 'mds.{_id} reported laggy/crashed since: {since}'.format(_id=active_mds, since=last_laggy_since))
- else:
- self.log('mds.{_id} down, removed from mdsmap'.format(_id=active_mds, since=last_laggy_since))
-
- # wait for a standby mds to takeover and become active
- takeover_mds = None
- takeover_rank = None
- itercount = 0
- while True:
- statuses = [self.manager.get_mds_status(m) for m in self.failure_group]
- actives = filter(lambda s: s and s['state'] == 'up:active', statuses)
- if len(actives) > 0:
- assert len(actives) == 1, 'Can only have one active in failure group'
- takeover_mds = actives[0]['name']
- takeover_rank = actives[0]['rank']
- break
- itercount = itercount + 1
- if itercount > 10:
- self.log('mds map: {status}'.format(status=self.manager.get_mds_status_all()))
-
- self.log('New active mds is mds.{_id}'.format(_id=takeover_mds))
-
- # wait for a while before restarting old active to become new
- # standby
- delay = self.max_revive_delay
- if self.randomize:
- delay = random.randrange(0.0, self.max_revive_delay)
-
- self.log('waiting for {delay} secs before reviving mds.{id}'.format(
- delay=delay, id=active_mds))
- time.sleep(delay)
-
- self.log('reviving mds.{id}'.format(id=active_mds))
- self.manager.revive_mds(active_mds, standby_for_rank=takeover_rank)
-
- status = {}
- while True:
- status = self.manager.get_mds_status(active_mds)
- if status and (status['state'] == 'up:standby' or status['state'] == 'up:standby-replay'):
- break
- self.log(
- 'waiting till mds map indicates mds.{_id} is in standby or standby-replay'.format(_id=active_mds))
- time.sleep(2)
- self.log('mds.{_id} reported in {state} state'.format(_id=active_mds, state=status['state']))
-
- # don't do replay thrashing right now
- continue
- # this might race with replay -> active transition...
- if status['state'] == 'up:replay' and random.randrange(0.0, 1.0) < self.thrash_in_replay:
-
- delay = self.max_replay_thrash_delay
- if self.randomize:
- delay = random.randrange(0.0, self.max_replay_thrash_delay)
- time.sleep(delay)
- self.log('kill replaying mds.{id}'.format(id=self.to_kill))
- self.manager.kill_mds(self.to_kill)
-
- delay = self.max_revive_delay
- if self.randomize:
- delay = random.randrange(0.0, self.max_revive_delay)
-
- self.log('waiting for {delay} secs before reviving mds.{id}'.format(
- delay=delay, id=self.to_kill))
- time.sleep(delay)
-
- self.log('revive mds.{id}'.format(id=self.to_kill))
- self.manager.revive_mds(self.to_kill)
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Stress test the mds by thrashing while another task/workunit
- is running.
-
- Please refer to MDSThrasher class for further information on the
- available options.
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'mds_thrash task only accepts a dict for configuration'
- mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
- assert len(mdslist) > 1, \
- 'mds_thrash task requires at least 2 metadata servers'
-
- # choose random seed
- seed = None
- if 'seed' in config:
- seed = int(config['seed'])
- else:
- seed = int(time.time())
- log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
- random.seed(seed)
-
- max_thrashers = config.get('max_thrash', 1)
- thrashers = {}
-
- (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys()
- manager = ceph_manager.CephManager(
- first, ctx=ctx, logger=log.getChild('ceph_manager'),
- )
-
- # make sure everyone is in active, standby, or standby-replay
- log.info('Wait for all MDSs to reach steady state...')
- statuses = None
- statuses_by_rank = None
- while True:
- statuses = {m: manager.get_mds_status(m) for m in mdslist}
- statuses_by_rank = {}
- for _, s in statuses.iteritems():
- if isinstance(s, dict):
- statuses_by_rank[s['rank']] = s
-
- ready = filter(lambda (_, s): s is not None and (s['state'] == 'up:active'
- or s['state'] == 'up:standby'
- or s['state'] == 'up:standby-replay'),
- statuses.items())
- if len(ready) == len(statuses):
- break
- time.sleep(2)
- log.info('Ready to start thrashing')
-
- # setup failure groups
- failure_groups = {}
- actives = {s['name']: s for (_, s) in statuses.iteritems() if s['state'] == 'up:active'}
- log.info('Actives is: {d}'.format(d=actives))
- log.info('Statuses is: {d}'.format(d=statuses_by_rank))
- for active in actives:
- for (r, s) in statuses.iteritems():
- if s['standby_for_name'] == active:
- if not active in failure_groups:
- failure_groups[active] = []
- log.info('Assigning mds rank {r} to failure group {g}'.format(r=r, g=active))
- failure_groups[active].append(r)
-
- manager.wait_for_clean()
- for (active, standbys) in failure_groups.iteritems():
- weight = 1.0
- if 'thrash_weights' in config:
- weight = int(config['thrash_weights'].get('mds.{_id}'.format(_id=active), '0.0'))
-
- failure_group = [active]
- failure_group.extend(standbys)
-
- thrasher = MDSThrasher(
- ctx, manager, config,
- logger=log.getChild('mds_thrasher.failure_group.[{a}, {sbs}]'.format(
- a=active,
- sbs=', '.join(standbys)
- )
- ),
- failure_group=failure_group,
- weight=weight)
- thrasher.start()
- thrashers[active] = thrasher
-
- # if thrash_weights isn't specified and we've reached max_thrash,
- # we're done
- if not 'thrash_weights' in config and len(thrashers) == max_thrashers:
- break
-
- try:
- log.debug('Yielding')
- yield
- finally:
- log.info('joining mds_thrashers')
- for t in thrashers:
- log.info('join thrasher for failure group [{fg}]'.format(fg=', '.join(failure_group)))
- thrashers[t].stop()
- thrashers[t].join()
- log.info('done joining')
+++ /dev/null
-instance-id: test
-local-hostname: test
+++ /dev/null
-"""
-Handle clock skews in monitors.
-"""
-import logging
-import contextlib
-import ceph_manager
-import time
-import gevent
-from StringIO import StringIO
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-class ClockSkewCheck:
- """
- Periodically check if there are any clock skews among the monitors in the
- quorum. By default, assume no skews are supposed to exist; that can be
- changed using the 'expect-skew' option. If 'fail-on-skew' is set to false,
- then we will always succeed and only report skews if any are found.
-
- This class does not spawn a thread. It assumes that, if that is indeed
- wanted, it should be done by a third party (for instance, the task using
- this class). We intend it as such in order to reuse this class if need be.
-
- This task accepts the following options:
-
- interval amount of seconds to wait in-between checks. (default: 30.0)
- max-skew maximum skew, in seconds, that is considered tolerable before
- issuing a warning. (default: 0.05)
- expect-skew 'true' or 'false', to indicate whether to expect a skew during
- the run or not. If 'true', the test will fail if no skew is
- found, and succeed if a skew is indeed found; if 'false', it's
- the other way around. (default: false)
- never-fail Don't fail the run if a skew is detected and we weren't
- expecting it, or if no skew is detected and we were expecting
- it. (default: False)
-
- at-least-once Runs at least once, even if we are told to stop.
- (default: True)
- at-least-once-timeout If we were told to stop but we are attempting to
- run at least once, timeout after this many seconds.
- (default: 600)
-
- Example:
- Expect a skew higher than 0.05 seconds, but only report it without
- failing the teuthology run.
-
- - mon_clock_skew_check:
- interval: 30
- max-skew: 0.05
- expect_skew: true
- never-fail: true
- """
-
- def __init__(self, ctx, manager, config, logger):
- self.ctx = ctx
- self.manager = manager
-
- self.stopping = False
- self.logger = logger
- self.config = config
-
- if self.config is None:
- self.config = dict()
-
- self.check_interval = float(self.config.get('interval', 30.0))
-
- first_mon = teuthology.get_first_mon(ctx, config)
- remote = ctx.cluster.only(first_mon).remotes.keys()[0]
- proc = remote.run(
- args=[
- 'sudo',
- 'ceph-mon',
- '-i', first_mon[4:],
- '--show-config-value', 'mon_clock_drift_allowed'
- ], stdout=StringIO(), wait=True
- )
- self.max_skew = self.config.get('max-skew', float(proc.stdout.getvalue()))
-
- self.expect_skew = self.config.get('expect-skew', False)
- self.never_fail = self.config.get('never-fail', False)
- self.at_least_once = self.config.get('at-least-once', True)
- self.at_least_once_timeout = self.config.get('at-least-once-timeout', 600.0)
-
- def info(self, x):
- """
- locally define logger for info messages
- """
- self.logger.info(x)
-
- def warn(self, x):
- """
- locally define logger for warnings
- """
- self.logger.warn(x)
-
- def debug(self, x):
- """
- locally define logger for debug messages
- """
- self.logger.info(x)
- self.logger.debug(x)
-
- def finish(self):
- """
- Break out of the do_check loop.
- """
- self.stopping = True
-
- def sleep_interval(self):
- """
- If a sleep interval is set, sleep for that amount of time.
- """
- if self.check_interval > 0.0:
- self.debug('sleeping for {s} seconds'.format(
- s=self.check_interval))
- time.sleep(self.check_interval)
-
- def print_skews(self, skews):
- """
- Display skew values.
- """
- total = len(skews)
- if total > 0:
- self.info('---------- found {n} skews ----------'.format(n=total))
- for mon_id, values in skews.iteritems():
- self.info('mon.{id}: {v}'.format(id=mon_id, v=values))
- self.info('-------------------------------------')
- else:
- self.info('---------- no skews were found ----------')
-
- def do_check(self):
- """
- Clock skew checker. Loops until finish() is called.
- """
- self.info('start checking for clock skews')
- skews = dict()
- ran_once = False
-
- started_on = None
-
- while not self.stopping or (self.at_least_once and not ran_once):
-
- if self.at_least_once and not ran_once and self.stopping:
- if started_on is None:
- self.info('kicking-off timeout (if any)')
- started_on = time.time()
- elif self.at_least_once_timeout > 0.0:
- assert time.time() - started_on < self.at_least_once_timeout, \
- 'failed to obtain a timecheck before timeout expired'
-
- quorum_size = len(teuthology.get_mon_names(self.ctx))
- self.manager.wait_for_mon_quorum_size(quorum_size)
-
- health = self.manager.get_mon_health(True)
- timechecks = health['timechecks']
-
- clean_check = False
-
- if timechecks['round_status'] == 'finished':
- assert (timechecks['round'] % 2) == 0, \
- 'timecheck marked as finished but round ' \
- 'disagrees (r {r})'.format(
- r=timechecks['round'])
- clean_check = True
- else:
- assert timechecks['round_status'] == 'on-going', \
- 'timecheck status expected \'on-going\' ' \
- 'but found \'{s}\' instead'.format(
- s=timechecks['round_status'])
- if 'mons' in timechecks.keys() and len(timechecks['mons']) > 1:
- self.info('round still on-going, but there are available reports')
- else:
- self.info('no timechecks available just yet')
- self.sleep_interval()
- continue
-
- assert len(timechecks['mons']) > 1, \
- 'there are not enough reported timechecks; ' \
- 'expected > 1 found {n}'.format(n=len(timechecks['mons']))
-
- for check in timechecks['mons']:
- mon_skew = float(check['skew'])
- mon_health = check['health']
- mon_id = check['name']
- if abs(mon_skew) > self.max_skew:
- assert mon_health == 'HEALTH_WARN', \
- 'mon.{id} health is \'{health}\' but skew {s} > max {ms}'.format(
- id=mon_id,health=mon_health,s=abs(mon_skew),ms=self.max_skew)
-
- log_str = 'mon.{id} with skew {s} > max {ms}'.format(
- id=mon_id,s=abs(mon_skew),ms=self.max_skew)
-
- """ add to skew list """
- details = check['details']
- skews[mon_id] = {'skew': mon_skew, 'details': details}
-
- if self.expect_skew:
- self.info('expected skew: {str}'.format(str=log_str))
- else:
- self.warn('unexpected skew: {str}'.format(str=log_str))
-
- if clean_check or (self.expect_skew and len(skews) > 0):
- ran_once = True
- self.print_skews(skews)
- self.sleep_interval()
-
- total = len(skews)
- self.print_skews(skews)
-
- error_str = ''
- found_error = False
-
- if self.expect_skew:
- if total == 0:
- error_str = 'We were expecting a skew, but none was found!'
- found_error = True
- else:
- if total > 0:
- error_str = 'We were not expecting a skew, but we did find it!'
- found_error = True
-
- if found_error:
- self.info(error_str)
- if not self.never_fail:
- assert False, error_str
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Use clas ClockSkewCheck to check for clock skews on the monitors.
- This task will spawn a thread running ClockSkewCheck's do_check().
-
- All the configuration will be directly handled by ClockSkewCheck,
- so please refer to the class documentation for further information.
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'mon_clock_skew_check task only accepts a dict for configuration'
- log.info('Beginning mon_clock_skew_check...')
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- skew_check = ClockSkewCheck(ctx,
- manager, config,
- logger=log.getChild('mon_clock_skew_check'))
- skew_check_thread = gevent.spawn(skew_check.do_check)
- try:
- yield
- finally:
- log.info('joining mon_clock_skew_check')
- skew_check.finish()
- skew_check_thread.get()
-
-
+++ /dev/null
-"""
-Monitor recovery
-"""
-import logging
-import ceph_manager
-from teuthology import misc as teuthology
-
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Test monitor recovery.
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- mons = [f.split('.')[1] for f in teuthology.get_mon_names(ctx)]
- log.info("mon ids = %s" % mons)
-
- manager.wait_for_mon_quorum_size(len(mons))
-
- log.info('verifying all monitors are in the quorum')
- for m in mons:
- s = manager.get_mon_status(m)
- assert s['state'] == 'leader' or s['state'] == 'peon'
- assert len(s['quorum']) == len(mons)
-
- log.info('restarting each monitor in turn')
- for m in mons:
- # stop a monitor
- manager.kill_mon(m)
- manager.wait_for_mon_quorum_size(len(mons) - 1)
-
- # restart
- manager.revive_mon(m)
- manager.wait_for_mon_quorum_size(len(mons))
-
- # in forward and reverse order,
- rmons = mons
- rmons.reverse()
- for mons in mons, rmons:
- log.info('stopping all monitors')
- for m in mons:
- manager.kill_mon(m)
-
- log.info('forming a minimal quorum for %s, then adding monitors' % mons)
- qnum = (len(mons) / 2) + 1
- num = 0
- for m in mons:
- manager.revive_mon(m)
- num += 1
- if num >= qnum:
- manager.wait_for_mon_quorum_size(num)
-
- # on both leader and non-leader ranks...
- for rank in [0, 1]:
- # take one out
- log.info('removing mon %s' % mons[rank])
- manager.kill_mon(mons[rank])
- manager.wait_for_mon_quorum_size(len(mons) - 1)
-
- log.info('causing some monitor log activity')
- m = 30
- for n in range(1, m):
- manager.raw_cluster_cmd('log', '%d of %d' % (n, m))
-
- log.info('adding mon %s back in' % mons[rank])
- manager.revive_mon(mons[rank])
- manager.wait_for_mon_quorum_size(len(mons))
+++ /dev/null
-"""
-Monitor thrash
-"""
-import logging
-import contextlib
-import ceph_manager
-import random
-import time
-import gevent
-import json
-import math
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-def _get_mons(ctx):
- """
- Get monitor names from the context value.
- """
- mons = [f[len('mon.'):] for f in teuthology.get_mon_names(ctx)]
- return mons
-
-class MonitorThrasher:
- """
- How it works::
-
- - pick a monitor
- - kill it
- - wait for quorum to be formed
- - sleep for 'revive_delay' seconds
- - revive monitor
- - wait for quorum to be formed
- - sleep for 'thrash_delay' seconds
-
- Options::
-
- seed Seed to use on the RNG to reproduce a previous
- behaviour (default: None; i.e., not set)
- revive_delay Number of seconds to wait before reviving
- the monitor (default: 10)
- thrash_delay Number of seconds to wait in-between
- test iterations (default: 0)
- thrash_store Thrash monitor store before killing the monitor being thrashed (default: False)
- thrash_store_probability Probability of thrashing a monitor's store
- (default: 50)
- thrash_many Thrash multiple monitors instead of just one. If
- 'maintain-quorum' is set to False, then we will
- thrash up to as many monitors as there are
- available. (default: False)
- maintain_quorum Always maintain quorum, taking care on how many
- monitors we kill during the thrashing. If we
- happen to only have one or two monitors configured,
- if this option is set to True, then we won't run
- this task as we cannot guarantee maintenance of
- quorum. Setting it to false however would allow the
- task to run with as many as just one single monitor.
- (default: True)
- freeze_mon_probability: how often to freeze the mon instead of killing it,
- in % (default: 0)
- freeze_mon_duration: how many seconds to freeze the mon (default: 15)
- scrub Scrub after each iteration (default: True)
-
- Note: if 'store-thrash' is set to True, then 'maintain-quorum' must also
- be set to True.
-
- For example::
-
- tasks:
- - ceph:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- thrash_store: true
- thrash_store_probability: 40
- seed: 31337
- maintain_quorum: true
- thrash_many: true
- - ceph-fuse:
- - workunit:
- clients:
- all:
- - mon/workloadgen.sh
- """
- def __init__(self, ctx, manager, config, logger):
- self.ctx = ctx
- self.manager = manager
- self.manager.wait_for_clean()
-
- self.stopping = False
- self.logger = logger
- self.config = config
-
- if self.config is None:
- self.config = dict()
-
- """ Test reproducibility """
- self.random_seed = self.config.get('seed', None)
-
- if self.random_seed is None:
- self.random_seed = int(time.time())
-
- self.rng = random.Random()
- self.rng.seed(int(self.random_seed))
-
- """ Monitor thrashing """
- self.revive_delay = float(self.config.get('revive_delay', 10.0))
- self.thrash_delay = float(self.config.get('thrash_delay', 0.0))
-
- self.thrash_many = self.config.get('thrash_many', False)
- self.maintain_quorum = self.config.get('maintain_quorum', True)
-
- self.scrub = self.config.get('scrub', True)
-
- self.freeze_mon_probability = float(self.config.get('freeze_mon_probability', 10))
- self.freeze_mon_duration = float(self.config.get('freeze_mon_duration', 15.0))
-
- assert self.max_killable() > 0, \
- 'Unable to kill at least one monitor with the current config.'
-
- """ Store thrashing """
- self.store_thrash = self.config.get('store_thrash', False)
- self.store_thrash_probability = int(
- self.config.get('store_thrash_probability', 50))
- if self.store_thrash:
- assert self.store_thrash_probability > 0, \
- 'store_thrash is set, probability must be > 0'
- assert self.maintain_quorum, \
- 'store_thrash = true must imply maintain_quorum = true'
-
- self.thread = gevent.spawn(self.do_thrash)
-
- def log(self, x):
- """
- locally log info messages
- """
- self.logger.info(x)
-
- def do_join(self):
- """
- Break out of this processes thrashing loop.
- """
- self.stopping = True
- self.thread.get()
-
- def should_thrash_store(self):
- """
- If allowed, indicate that we should thrash a certain percentage of
- the time as determined by the store_thrash_probability value.
- """
- if not self.store_thrash:
- return False
- return self.rng.randrange(0, 101) < self.store_thrash_probability
-
- def thrash_store(self, mon):
- """
- Thrash the monitor specified.
- :param mon: monitor to thrash
- """
- addr = self.ctx.ceph.conf['mon.%s' % mon]['mon addr']
- self.log('thrashing mon.{id}@{addr} store'.format(id=mon, addr=addr))
- out = self.manager.raw_cluster_cmd('-m', addr, 'sync', 'force')
- j = json.loads(out)
- assert j['ret'] == 0, \
- 'error forcing store sync on mon.{id}:\n{ret}'.format(
- id=mon,ret=out)
-
- def should_freeze_mon(self):
- """
- Indicate that we should freeze a certain percentago of the time
- as determined by the freeze_mon_probability value.
- """
- return self.rng.randrange(0, 101) < self.freeze_mon_probability
-
- def freeze_mon(self, mon):
- """
- Send STOP signal to freeze the monitor.
- """
- log.info('Sending STOP to mon %s', mon)
- self.manager.signal_mon(mon, 19) # STOP
-
- def unfreeze_mon(self, mon):
- """
- Send CONT signal to unfreeze the monitor.
- """
- log.info('Sending CONT to mon %s', mon)
- self.manager.signal_mon(mon, 18) # CONT
-
- def kill_mon(self, mon):
- """
- Kill the monitor specified
- """
- self.log('killing mon.{id}'.format(id=mon))
- self.manager.kill_mon(mon)
-
- def revive_mon(self, mon):
- """
- Revive the monitor specified
- """
- self.log('killing mon.{id}'.format(id=mon))
- self.log('reviving mon.{id}'.format(id=mon))
- self.manager.revive_mon(mon)
-
- def max_killable(self):
- """
- Return the maximum number of monitors we can kill.
- """
- m = len(_get_mons(self.ctx))
- if self.maintain_quorum:
- return max(math.ceil(m/2.0)-1, 0)
- else:
- return m
-
- def do_thrash(self):
- """
- Cotinuously loop and thrash the monitors.
- """
- self.log('start thrashing')
- self.log('seed: {s}, revive delay: {r}, thrash delay: {t} '\
- 'thrash many: {tm}, maintain quorum: {mq} '\
- 'store thrash: {st}, probability: {stp} '\
- 'freeze mon: prob {fp} duration {fd}'.format(
- s=self.random_seed,r=self.revive_delay,t=self.thrash_delay,
- tm=self.thrash_many, mq=self.maintain_quorum,
- st=self.store_thrash,stp=self.store_thrash_probability,
- fp=self.freeze_mon_probability,fd=self.freeze_mon_duration,
- ))
-
- while not self.stopping:
- mons = _get_mons(self.ctx)
- self.manager.wait_for_mon_quorum_size(len(mons))
- self.log('making sure all monitors are in the quorum')
- for m in mons:
- s = self.manager.get_mon_status(m)
- assert s['state'] == 'leader' or s['state'] == 'peon'
- assert len(s['quorum']) == len(mons)
-
- kill_up_to = self.rng.randrange(1, self.max_killable()+1)
- mons_to_kill = self.rng.sample(mons, kill_up_to)
- self.log('monitors to thrash: {m}'.format(m=mons_to_kill))
-
- mons_to_freeze = []
- for mon in mons:
- if mon in mons_to_kill:
- continue
- if self.should_freeze_mon():
- mons_to_freeze.append(mon)
- self.log('monitors to freeze: {m}'.format(m=mons_to_freeze))
-
- for mon in mons_to_kill:
- self.log('thrashing mon.{m}'.format(m=mon))
-
- """ we only thrash stores if we are maintaining quorum """
- if self.should_thrash_store() and self.maintain_quorum:
- self.thrash_store(mon)
-
- self.kill_mon(mon)
-
- if mons_to_freeze:
- for mon in mons_to_freeze:
- self.freeze_mon(mon)
- self.log('waiting for {delay} secs to unfreeze mons'.format(
- delay=self.freeze_mon_duration))
- time.sleep(self.freeze_mon_duration)
- for mon in mons_to_freeze:
- self.unfreeze_mon(mon)
-
- if self.maintain_quorum:
- self.manager.wait_for_mon_quorum_size(len(mons)-len(mons_to_kill))
- for m in mons:
- if m in mons_to_kill:
- continue
- s = self.manager.get_mon_status(m)
- assert s['state'] == 'leader' or s['state'] == 'peon'
- assert len(s['quorum']) == len(mons)-len(mons_to_kill)
-
- self.log('waiting for {delay} secs before reviving monitors'.format(
- delay=self.revive_delay))
- time.sleep(self.revive_delay)
-
- for mon in mons_to_kill:
- self.revive_mon(mon)
- # do more freezes
- if mons_to_freeze:
- for mon in mons_to_freeze:
- self.freeze_mon(mon)
- self.log('waiting for {delay} secs to unfreeze mons'.format(
- delay=self.freeze_mon_duration))
- time.sleep(self.freeze_mon_duration)
- for mon in mons_to_freeze:
- self.unfreeze_mon(mon)
-
- self.manager.wait_for_mon_quorum_size(len(mons))
- for m in mons:
- s = self.manager.get_mon_status(m)
- assert s['state'] == 'leader' or s['state'] == 'peon'
- assert len(s['quorum']) == len(mons)
-
- if self.scrub:
- self.log('triggering scrub')
- try:
- self.manager.raw_cluster_cmd('scrub')
- except Exception:
- log.exception("Saw exception while triggering scrub")
-
- if self.thrash_delay > 0.0:
- self.log('waiting for {delay} secs before continuing thrashing'.format(
- delay=self.thrash_delay))
- time.sleep(self.thrash_delay)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Stress test the monitor by thrashing them while another task/workunit
- is running.
-
- Please refer to MonitorThrasher class for further information on the
- available options.
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'mon_thrash task only accepts a dict for configuration'
- assert len(_get_mons(ctx)) > 2, \
- 'mon_thrash task requires at least 3 monitors'
- log.info('Beginning mon_thrash...')
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
- thrash_proc = MonitorThrasher(ctx,
- manager, config,
- logger=log.getChild('mon_thrasher'))
- try:
- log.debug('Yielding')
- yield
- finally:
- log.info('joining mon_thrasher')
- thrash_proc.do_join()
- mons = _get_mons(ctx)
- manager.wait_for_mon_quorum_size(len(mons))
+++ /dev/null
-"""
-Multibench testing
-"""
-import contextlib
-import logging
-import radosbench
-import time
-import copy
-import gevent
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run multibench
-
- The config should be as follows:
-
- multibench:
- time: <seconds to run total>
- segments: <number of concurrent benches>
- radosbench: <config for radosbench>
-
- example:
-
- tasks:
- - ceph:
- - multibench:
- clients: [client.0]
- time: 360
- - interactive:
- """
- log.info('Beginning multibench...')
- assert isinstance(config, dict), \
- "please list clients to run on"
-
- def run_one(num):
- """Run test spawn from gevent"""
- start = time.time()
- benchcontext = copy.copy(config.get('radosbench'))
- iterations = 0
- while time.time() - start < int(config.get('time', 600)):
- log.info("Starting iteration %s of segment %s"%(iterations, num))
- benchcontext['pool'] = str(num) + "-" + str(iterations)
- with radosbench.task(ctx, benchcontext):
- time.sleep()
- iterations += 1
- log.info("Starting %s threads"%(str(config.get('segments', 3)),))
- segments = [
- gevent.spawn(run_one, i)
- for i in range(0, int(config.get('segments', 3)))]
-
- try:
- yield
- finally:
- [i.get() for i in segments]
+++ /dev/null
-"""
-Test Object locations going down
-"""
-import logging
-import ceph_manager
-from teuthology import misc as teuthology
-from util.rados import rados
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Test handling of object location going down
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'lost_unfound task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- while len(manager.get_osd_status()['up']) < 3:
- manager.sleep(10)
- manager.wait_for_clean()
-
- # something that is always there
- dummyfile = '/etc/fstab'
-
- # take 0, 1 out
- manager.mark_out_osd(0)
- manager.mark_out_osd(1)
- manager.wait_for_clean()
-
- # delay recovery, and make the pg log very long (to prevent backfill)
- manager.raw_cluster_cmd(
- 'tell', 'osd.0',
- 'injectargs',
- '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
- )
- # delay recovery, and make the pg log very long (to prevent backfill)
- manager.raw_cluster_cmd(
- 'tell', 'osd.1',
- 'injectargs',
- '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
- )
- # delay recovery, and make the pg log very long (to prevent backfill)
- manager.raw_cluster_cmd(
- 'tell', 'osd.2',
- 'injectargs',
- '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
- )
- # delay recovery, and make the pg log very long (to prevent backfill)
- manager.raw_cluster_cmd(
- 'tell', 'osd.3',
- 'injectargs',
- '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
- )
-
- # kludge to make sure they get a map
- rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
-
- # create old objects
- for f in range(1, 10):
- rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
-
- manager.mark_out_osd(3)
- manager.wait_till_active()
-
- manager.mark_in_osd(0)
- manager.wait_till_active()
-
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
-
- manager.mark_out_osd(2)
- manager.wait_till_active()
-
- # bring up 1
- manager.mark_in_osd(1)
- manager.wait_till_active()
-
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- log.info("Getting unfound objects")
- unfound = manager.get_num_unfound_objects()
- assert not unfound
-
- manager.kill_osd(2)
- manager.mark_down_osd(2)
- manager.kill_osd(3)
- manager.mark_down_osd(3)
-
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- log.info("Getting unfound objects")
- unfound = manager.get_num_unfound_objects()
- assert unfound
+++ /dev/null
-"""
-Run omapbench executable within teuthology
-"""
-import contextlib
-import logging
-
-from teuthology.orchestra import run
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run omapbench
-
- The config should be as follows::
-
- omapbench:
- clients: [client list]
- threads: <threads at once>
- objects: <number of objects to write>
- entries: <number of entries per object map>
- keysize: <number of characters per object map key>
- valsize: <number of characters per object map val>
- increment: <interval to show in histogram (in ms)>
- omaptype: <how the omaps should be generated>
-
- example::
-
- tasks:
- - ceph:
- - omapbench:
- clients: [client.0]
- threads: 30
- objects: 1000
- entries: 10
- keysize: 10
- valsize: 100
- increment: 100
- omaptype: uniform
- - interactive:
- """
- log.info('Beginning omapbench...')
- assert isinstance(config, dict), \
- "please list clients to run on"
- omapbench = {}
- testdir = teuthology.get_testdir(ctx)
- print(str(config.get('increment',-1)))
- for role in config.get('clients', ['client.0']):
- assert isinstance(role, basestring)
- PREFIX = 'client.'
- assert role.startswith(PREFIX)
- id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- proc = remote.run(
- args=[
- "/bin/sh", "-c",
- " ".join(['adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage',
- 'omapbench',
- '--name', role[len(PREFIX):],
- '-t', str(config.get('threads', 30)),
- '-o', str(config.get('objects', 1000)),
- '--entries', str(config.get('entries',10)),
- '--keysize', str(config.get('keysize',10)),
- '--valsize', str(config.get('valsize',1000)),
- '--inc', str(config.get('increment',10)),
- '--omaptype', str(config.get('omaptype','uniform'))
- ]).format(tdir=testdir),
- ],
- logger=log.getChild('omapbench.{id}'.format(id=id_)),
- stdin=run.PIPE,
- wait=False
- )
- omapbench[id_] = proc
-
- try:
- yield
- finally:
- log.info('joining omapbench')
- run.wait(omapbench.itervalues())
+++ /dev/null
-"""
-Osd backfill test
-"""
-import logging
-import ceph_manager
-import time
-from teuthology import misc as teuthology
-
-
-log = logging.getLogger(__name__)
-
-
-def rados_start(ctx, remote, cmd):
- """
- Run a remote rados command (currently used to only write data)
- """
- log.info("rados %s" % ' '.join(cmd))
- testdir = teuthology.get_testdir(ctx)
- pre = [
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rados',
- ];
- pre.extend(cmd)
- proc = remote.run(
- args=pre,
- wait=False,
- )
- return proc
-
-def task(ctx, config):
- """
- Test backfill
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'thrashosds task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
- log.info('num_osds is %s' % num_osds)
- assert num_osds == 3
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- while len(manager.get_osd_status()['up']) < 3:
- manager.sleep(10)
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_clean()
-
- # write some data
- p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096',
- '--no-cleanup'])
- err = p.wait();
- log.info('err is %d' % err)
-
- # mark osd.0 out to trigger a rebalance/backfill
- manager.mark_out_osd(0)
-
- # also mark it down to it won't be included in pg_temps
- manager.kill_osd(0)
- manager.mark_down_osd(0)
-
- # wait for everything to peer and be happy...
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_recovery()
-
- # write some new data
- p = rados_start(ctx, mon, ['-p', 'data', 'bench', '30', 'write', '-b', '4096',
- '--no-cleanup'])
-
- time.sleep(15)
-
- # blackhole + restart osd.1
- # this triggers a divergent backfill target
- manager.blackhole_kill_osd(1)
- time.sleep(2)
- manager.revive_osd(1)
-
- # wait for our writes to complete + succeed
- err = p.wait()
- log.info('err is %d' % err)
-
- # cluster must recover
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_recovery()
-
- # re-add osd.0
- manager.revive_osd(0)
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_clean()
-
-
+++ /dev/null
-"""
-Handle osdfailsafe configuration settings (nearfull ratio and full ratio)
-"""
-from cStringIO import StringIO
-import logging
-import time
-
-import ceph_manager
-from teuthology.orchestra import run
-from util.rados import rados
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Test handling of osd_failsafe_nearfull_ratio and osd_failsafe_full_ratio
- configuration settings
-
- In order for test to pass must use log-whitelist as follows
-
- tasks:
- - chef:
- - install:
- - ceph:
- log-whitelist: ['OSD near full', 'OSD full dropping all updates']
- - osd_failsafe_enospc:
-
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'osd_failsafe_enospc task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
- ctx.manager = manager
-
- # Give 2 seconds for injectargs + osd_op_complaint_time (30) + 2 * osd_heartbeat_interval (6) + 6 padding
- sleep_time = 50
-
- # something that is always there
- dummyfile = '/etc/fstab'
- dummyfile2 = '/etc/resolv.conf'
-
- # create 1 pg pool with 1 rep which can only be on osd.0
- osds = manager.get_osd_dump()
- for osd in osds:
- if osd['osd'] != 0:
- manager.mark_out_osd(osd['osd'])
-
- log.info('creating pool foo')
- manager.create_pool("foo")
- manager.raw_cluster_cmd('osd', 'pool', 'set', 'foo', 'size', '1')
-
- # State NONE -> NEAR
- log.info('1. Verify warning messages when exceeding nearfull_ratio')
-
- proc = mon.run(
- args=[
- 'daemon-helper',
- 'kill',
- 'ceph', '-w'
- ],
- stdin=run.PIPE,
- stdout=StringIO(),
- wait=False,
- )
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .00001')
-
- time.sleep(sleep_time)
- proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
- proc.wait()
-
- lines = proc.stdout.getvalue().split('\n')
-
- count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
- assert count == 2, 'Incorrect number of warning messages expected 2 got %d' % count
- count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
- assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
-
- # State NEAR -> FULL
- log.info('2. Verify error messages when exceeding full_ratio')
-
- proc = mon.run(
- args=[
- 'daemon-helper',
- 'kill',
- 'ceph', '-w'
- ],
- stdin=run.PIPE,
- stdout=StringIO(),
- wait=False,
- )
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001')
-
- time.sleep(sleep_time)
- proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
- proc.wait()
-
- lines = proc.stdout.getvalue().split('\n')
-
- count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
- assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count
-
- log.info('3. Verify write failure when exceeding full_ratio')
-
- # Write data should fail
- ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile1', dummyfile])
- assert ret != 0, 'Expected write failure but it succeeded with exit status 0'
-
- # Put back default
- manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97')
- time.sleep(10)
-
- # State FULL -> NEAR
- log.info('4. Verify write success when NOT exceeding full_ratio')
-
- # Write should succeed
- ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile2', dummyfile2])
- assert ret == 0, 'Expected write to succeed, but got exit status %d' % ret
-
- log.info('5. Verify warning messages again when exceeding nearfull_ratio')
-
- proc = mon.run(
- args=[
- 'daemon-helper',
- 'kill',
- 'ceph', '-w'
- ],
- stdin=run.PIPE,
- stdout=StringIO(),
- wait=False,
- )
-
- time.sleep(sleep_time)
- proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
- proc.wait()
-
- lines = proc.stdout.getvalue().split('\n')
-
- count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
- assert count == 1 or count == 2, 'Incorrect number of warning messages expected 1 or 2 got %d' % count
- count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
- assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .90')
- time.sleep(10)
-
- # State NONE -> FULL
- log.info('6. Verify error messages again when exceeding full_ratio')
-
- proc = mon.run(
- args=[
- 'daemon-helper',
- 'kill',
- 'ceph', '-w'
- ],
- stdin=run.PIPE,
- stdout=StringIO(),
- wait=False,
- )
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001')
-
- time.sleep(sleep_time)
- proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
- proc.wait()
-
- lines = proc.stdout.getvalue().split('\n')
-
- count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
- assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count
- count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
- assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count
-
- # State FULL -> NONE
- log.info('7. Verify no messages settings back to default')
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97')
- time.sleep(10)
-
- proc = mon.run(
- args=[
- 'daemon-helper',
- 'kill',
- 'ceph', '-w'
- ],
- stdin=run.PIPE,
- stdout=StringIO(),
- wait=False,
- )
-
- time.sleep(sleep_time)
- proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
- proc.wait()
-
- lines = proc.stdout.getvalue().split('\n')
-
- count = len(filter(lambda line: '[WRN] OSD near full' in line, lines))
- assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count
- count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines))
- assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count
-
- log.info('Test Passed')
-
- # Bring all OSDs back in
- manager.remove_pool("foo")
- for osd in osds:
- if osd['osd'] != 0:
- manager.mark_in_osd(osd['osd'])
+++ /dev/null
-"""
-osd recovery
-"""
-import logging
-import ceph_manager
-import time
-from teuthology import misc as teuthology
-
-
-log = logging.getLogger(__name__)
-
-
-def rados_start(testdir, remote, cmd):
- """
- Run a remote rados command (currently used to only write data)
- """
- log.info("rados %s" % ' '.join(cmd))
- pre = [
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rados',
- ];
- pre.extend(cmd)
- proc = remote.run(
- args=pre,
- wait=False,
- )
- return proc
-
-def task(ctx, config):
- """
- Test (non-backfill) recovery
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'task only accepts a dict for configuration'
- testdir = teuthology.get_testdir(ctx)
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
- log.info('num_osds is %s' % num_osds)
- assert num_osds == 3
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- while len(manager.get_osd_status()['up']) < 3:
- manager.sleep(10)
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_clean()
-
- # test some osdmap flags
- manager.raw_cluster_cmd('osd', 'set', 'noin')
- manager.raw_cluster_cmd('osd', 'set', 'noout')
- manager.raw_cluster_cmd('osd', 'set', 'noup')
- manager.raw_cluster_cmd('osd', 'set', 'nodown')
- manager.raw_cluster_cmd('osd', 'unset', 'noin')
- manager.raw_cluster_cmd('osd', 'unset', 'noout')
- manager.raw_cluster_cmd('osd', 'unset', 'noup')
- manager.raw_cluster_cmd('osd', 'unset', 'nodown')
-
- # write some new data
- p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '60', 'write', '-b', '4096',
- '--no-cleanup'])
-
- time.sleep(15)
-
- # trigger a divergent target:
- # blackhole + restart osd.1 (shorter log)
- manager.blackhole_kill_osd(1)
- # kill osd.2 (longer log... we'll make it divergent below)
- manager.kill_osd(2)
- time.sleep(2)
- manager.revive_osd(1)
-
- # wait for our writes to complete + succeed
- err = p.wait()
- log.info('err is %d' % err)
-
- # cluster must repeer
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.wait_for_active_or_down()
-
- # write some more (make sure osd.2 really is divergent)
- p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096'])
- p.wait();
-
- # revive divergent osd
- manager.revive_osd(2)
-
- while len(manager.get_osd_status()['up']) < 3:
- log.info('waiting a bit...')
- time.sleep(2)
- log.info('3 are up!')
-
- # cluster must recover
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_clean()
-
-
-def test_incomplete_pgs(ctx, config):
- """
- Test handling of incomplete pgs. Requires 4 osds.
- """
- testdir = teuthology.get_testdir(ctx)
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
- log.info('num_osds is %s' % num_osds)
- assert num_osds == 4
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- while len(manager.get_osd_status()['up']) < 4:
- time.sleep(10)
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
- manager.wait_for_clean()
-
- log.info('Testing incomplete pgs...')
-
- for i in range(4):
- manager.set_config(
- i,
- osd_recovery_delay_start=1000)
-
- # move data off of osd.0, osd.1
- manager.raw_cluster_cmd('osd', 'out', '0', '1')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
- manager.wait_for_clean()
-
- # lots of objects in rbd (no pg log, will backfill)
- p = rados_start(testdir, mon,
- ['-p', 'rbd', 'bench', '60', 'write', '-b', '1',
- '--no-cleanup'])
- p.wait()
-
- # few objects in metadata pool (with pg log, normal recovery)
- for f in range(1, 20):
- p = rados_start(testdir, mon, ['-p', 'metadata', 'put',
- 'foo.%d' % f, '/etc/passwd'])
- p.wait()
-
- # move it back
- manager.raw_cluster_cmd('osd', 'in', '0', '1')
- manager.raw_cluster_cmd('osd', 'out', '2', '3')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.3', 'flush_pg_stats')
- manager.wait_for_active()
-
- assert not manager.is_clean()
- assert not manager.is_recovered()
-
- # kill 2 + 3
- log.info('stopping 2,3')
- manager.kill_osd(2)
- manager.kill_osd(3)
- log.info('...')
- manager.raw_cluster_cmd('osd', 'down', '2', '3')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.wait_for_active_or_down()
-
- assert manager.get_num_down() > 0
-
- # revive 2 + 3
- manager.revive_osd(2)
- manager.revive_osd(3)
- while len(manager.get_osd_status()['up']) < 4:
- log.info('waiting a bit...')
- time.sleep(2)
- log.info('all are up!')
-
- for i in range(4):
- manager.kick_recovery_wq(i)
-
- # cluster must recover
- manager.wait_for_clean()
+++ /dev/null
-"""
-Peer test (Single test, not much configurable here)
-"""
-import logging
-import json
-
-import ceph_manager
-from teuthology import misc as teuthology
-from util.rados import rados
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Test peering.
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'peer task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- while len(manager.get_osd_status()['up']) < 3:
- manager.sleep(10)
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_clean()
-
- for i in range(3):
- manager.set_config(
- i,
- osd_recovery_delay_start=120)
-
- # take on osd down
- manager.kill_osd(2)
- manager.mark_down_osd(2)
-
- # kludge to make sure they get a map
- rados(ctx, mon, ['-p', 'data', 'get', 'dummy', '-'])
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.wait_for_recovery()
-
- # kill another and revive 2, so that some pgs can't peer.
- manager.kill_osd(1)
- manager.mark_down_osd(1)
- manager.revive_osd(2)
- manager.wait_till_osd_is_up(2)
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
-
- manager.wait_for_active_or_down()
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
-
- # look for down pgs
- num_down_pgs = 0
- pgs = manager.get_pg_stats()
- for pg in pgs:
- out = manager.raw_cluster_cmd('pg', pg['pgid'], 'query')
- log.debug("out string %s",out)
- j = json.loads(out)
- log.info("pg is %s, query json is %s", pg, j)
-
- if pg['state'].count('down'):
- num_down_pgs += 1
- # verify that it is blocked on osd.1
- rs = j['recovery_state']
- assert len(rs) > 0
- assert rs[0]['name'] == 'Started/Primary/Peering/GetInfo'
- assert rs[1]['name'] == 'Started/Primary/Peering'
- assert rs[1]['blocked']
- assert rs[1]['down_osds_we_would_probe'] == [1]
- assert len(rs[1]['peering_blocked_by']) == 1
- assert rs[1]['peering_blocked_by'][0]['osd'] == 1
-
- assert num_down_pgs > 0
-
- # bring it all back
- manager.revive_osd(1)
- manager.wait_till_osd_is_up(1)
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_clean()
+++ /dev/null
-"""
-Remotely run peering tests.
-"""
-import logging
-import time
-from teuthology import misc as teuthology
-import ceph_manager
-
-log = logging.getLogger(__name__)
-
-from args import argify
-
-POOLNAME = "POOLNAME"
-ARGS = [
- ('num_pgs', 'number of pgs to create', 256, int),
- ('max_time', 'seconds to complete peering', 0, int),
- ('runs', 'trials to run', 10, int),
- ('num_objects', 'objects to create', 256 * 1024, int),
- ('object_size', 'size in bytes for objects', 64, int),
- ('creation_time_limit', 'time limit for pool population', 60*60, int),
- ('create_threads', 'concurrent writes for create', 256, int)
- ]
-
-def setup(ctx, config):
- """
- Setup peering test on remotes.
- """
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
- ctx.manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
- ctx.manager.clear_pools()
- ctx.manager.create_pool(POOLNAME, config.num_pgs)
- log.info("populating pool")
- ctx.manager.rados_write_objects(
- POOLNAME,
- config.num_objects,
- config.object_size,
- config.creation_time_limit,
- config.create_threads)
- log.info("done populating pool")
-
-def do_run(ctx, config):
- """
- Perform the test.
- """
- start = time.time()
- # mark in osd
- ctx.manager.mark_in_osd(0)
- log.info("writing out objects")
- ctx.manager.rados_write_objects(
- POOLNAME,
- config.num_pgs, # write 1 object per pg or so
- 1,
- config.creation_time_limit,
- config.num_pgs, # lots of concurrency
- cleanup = True)
- peering_end = time.time()
-
- log.info("peering done, waiting on recovery")
- ctx.manager.wait_for_clean()
-
- log.info("recovery done")
- recovery_end = time.time()
- if config.max_time:
- assert(peering_end - start < config.max_time)
- ctx.manager.mark_out_osd(0)
- ctx.manager.wait_for_clean()
- return {
- 'time_to_active': peering_end - start,
- 'time_to_clean': recovery_end - start
- }
-
-@argify("peering_speed_test", ARGS)
-def task(ctx, config):
- """
- Peering speed test
- """
- setup(ctx, config)
- ctx.manager.mark_out_osd(0)
- ctx.manager.wait_for_clean()
- ret = []
- for i in range(config.runs):
- log.info("Run {i}".format(i = i))
- ret.append(do_run(ctx, config))
-
- ctx.manager.mark_in_osd(0)
- ctx.summary['recovery_times'] = {
- 'runs': ret
- }
+++ /dev/null
-"""
-Qemu task
-"""
-from cStringIO import StringIO
-
-import contextlib
-import logging
-import os
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from tasks import rbd
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-DEFAULT_NUM_RBD = 1
-DEFAULT_IMAGE_URL = 'http://ceph.com/qa/ubuntu-12.04.qcow2'
-DEFAULT_MEM = 4096 # in megabytes
-
-@contextlib.contextmanager
-def create_dirs(ctx, config):
- """
- Handle directory creation and cleanup
- """
- testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
- assert 'test' in client_config, 'You must specify a test to run'
- (remote,) = ctx.cluster.only(client).remotes.keys()
- remote.run(
- args=[
- 'install', '-d', '-m0755', '--',
- '{tdir}/qemu'.format(tdir=testdir),
- '{tdir}/archive/qemu'.format(tdir=testdir),
- ]
- )
- try:
- yield
- finally:
- for client, client_config in config.iteritems():
- assert 'test' in client_config, 'You must specify a test to run'
- (remote,) = ctx.cluster.only(client).remotes.keys()
- remote.run(
- args=[
- 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true',
- ]
- )
-
-@contextlib.contextmanager
-def generate_iso(ctx, config):
- """Execute system commands to generate iso"""
- log.info('generating iso...')
- testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
- assert 'test' in client_config, 'You must specify a test to run'
- (remote,) = ctx.cluster.only(client).remotes.keys()
- src_dir = os.path.dirname(__file__)
- userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
- metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
-
- with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
- test_setup = ''.join(f.readlines())
- # configuring the commands to setup the nfs mount
- mnt_dir = "/export/{client}".format(client=client)
- test_setup = test_setup.format(
- mnt_dir=mnt_dir
- )
-
- with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
- test_teardown = ''.join(f.readlines())
-
- user_data = test_setup
- if client_config.get('type', 'filesystem') == 'filesystem':
- for i in xrange(0, client_config.get('num_rbd', DEFAULT_NUM_RBD)):
- dev_letter = chr(ord('b') + i)
- user_data += """
-- |
- #!/bin/bash
- mkdir /mnt/test_{dev_letter}
- mkfs -t xfs /dev/vd{dev_letter}
- mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter}
-""".format(dev_letter=dev_letter)
-
- # this may change later to pass the directories as args to the
- # script or something. xfstests needs that.
- user_data += """
-- |
- #!/bin/bash
- test -d /mnt/test_b && cd /mnt/test_b
- /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
-""" + test_teardown
-
- teuthology.write_file(remote, userdata_path, StringIO(user_data))
-
- with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
- teuthology.write_file(remote, metadata_path, f)
-
- test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
- remote.run(
- args=[
- 'wget', '-nv', '-O', test_file,
- client_config['test'],
- run.Raw('&&'),
- 'chmod', '755', test_file,
- ],
- )
- remote.run(
- args=[
- 'genisoimage', '-quiet', '-input-charset', 'utf-8',
- '-volid', 'cidata', '-joliet', '-rock',
- '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
- '-graft-points',
- 'user-data={userdata}'.format(userdata=userdata_path),
- 'meta-data={metadata}'.format(metadata=metadata_path),
- 'test.sh={file}'.format(file=test_file),
- ],
- )
- try:
- yield
- finally:
- for client in config.iterkeys():
- (remote,) = ctx.cluster.only(client).remotes.keys()
- remote.run(
- args=[
- 'rm', '-f',
- '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
- os.path.join(testdir, 'qemu', 'userdata.' + client),
- os.path.join(testdir, 'qemu', 'metadata.' + client),
- '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
- ],
- )
-
-@contextlib.contextmanager
-def download_image(ctx, config):
- """Downland base image, remove image file when done"""
- log.info('downloading base image')
- testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
- base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
- remote.run(
- args=[
- 'wget', '-nv', '-O', base_file, DEFAULT_IMAGE_URL,
- ]
- )
- try:
- yield
- finally:
- log.debug('cleaning up base image files')
- for client in config.iterkeys():
- base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
- tdir=testdir,
- client=client,
- )
- (remote,) = ctx.cluster.only(client).remotes.keys()
- remote.run(
- args=[
- 'rm', '-f', base_file,
- ],
- )
-
-
-def _setup_nfs_mount(remote, client, mount_dir):
- """
- Sets up an nfs mount on the remote that the guest can use to
- store logs. This nfs mount is also used to touch a file
- at the end of the test to indiciate if the test was successful
- or not.
- """
- export_dir = "/export/{client}".format(client=client)
- log.info("Creating the nfs export directory...")
- remote.run(args=[
- 'sudo', 'mkdir', '-p', export_dir,
- ])
- log.info("Mounting the test directory...")
- remote.run(args=[
- 'sudo', 'mount', '--bind', mount_dir, export_dir,
- ])
- log.info("Adding mount to /etc/exports...")
- export = "{dir} *(rw,no_root_squash,no_subtree_check,insecure)".format(
- dir=export_dir
- )
- remote.run(args=[
- 'echo', export, run.Raw("|"),
- 'sudo', 'tee', '-a', "/etc/exports",
- ])
- log.info("Restarting NFS...")
- if remote.os.package_type == "deb":
- remote.run(args=['sudo', 'service', 'nfs-kernel-server', 'restart'])
- else:
- remote.run(args=['sudo', 'systemctl', 'restart', 'nfs'])
-
-
-def _teardown_nfs_mount(remote, client):
- """
- Tears down the nfs mount on the remote used for logging and reporting the
- status of the tests being ran in the guest.
- """
- log.info("Tearing down the nfs mount for {remote}".format(remote=remote))
- export_dir = "/export/{client}".format(client=client)
- log.info("Stopping NFS...")
- if remote.os.package_type == "deb":
- remote.run(args=[
- 'sudo', 'service', 'nfs-kernel-server', 'stop'
- ])
- else:
- remote.run(args=[
- 'sudo', 'systemctl', 'stop', 'nfs'
- ])
- log.info("Unmounting exported directory...")
- remote.run(args=[
- 'sudo', 'umount', export_dir
- ])
- log.info("Deleting exported directory...")
- remote.run(args=[
- 'sudo', 'rm', '-r', '/export'
- ])
- log.info("Deleting export from /etc/exports...")
- remote.run(args=[
- 'sudo', 'sed', '-i', '$ d', '/etc/exports'
- ])
- log.info("Starting NFS...")
- if remote.os.package_type == "deb":
- remote.run(args=[
- 'sudo', 'service', 'nfs-kernel-server', 'start'
- ])
- else:
- remote.run(args=[
- 'sudo', 'systemctl', 'start', 'nfs'
- ])
-
-
-@contextlib.contextmanager
-def run_qemu(ctx, config):
- """Setup kvm environment and start qemu"""
- procs = []
- testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
- log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client)
- remote.run(
- args=[
- 'mkdir', log_dir, run.Raw('&&'),
- 'sudo', 'modprobe', 'kvm',
- ]
- )
-
- # make an nfs mount to use for logging and to
- # allow to test to tell teuthology the tests outcome
- _setup_nfs_mount(remote, client, log_dir)
-
- base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
- tdir=testdir,
- client=client
- )
- qemu_cmd = 'qemu-system-x86_64'
- if remote.os.package_type == "rpm":
- qemu_cmd = "/usr/libexec/qemu-kvm"
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'daemon-helper',
- 'term',
- qemu_cmd, '-enable-kvm', '-nographic',
- '-m', str(client_config.get('memory', DEFAULT_MEM)),
- # base OS device
- '-drive',
- 'file={base},format=qcow2,if=virtio'.format(base=base_file),
- # cd holding metadata for cloud-init
- '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
- ]
-
- cachemode = 'none'
- ceph_config = ctx.ceph.conf.get('global', {})
- ceph_config.update(ctx.ceph.conf.get('client', {}))
- ceph_config.update(ctx.ceph.conf.get(client, {}))
- if ceph_config.get('rbd cache'):
- if ceph_config.get('rbd cache max dirty', 1) > 0:
- cachemode = 'writeback'
- else:
- cachemode = 'writethrough'
-
- for i in xrange(client_config.get('num_rbd', DEFAULT_NUM_RBD)):
- args.extend([
- '-drive',
- 'file=rbd:rbd/{img}:id={id},format=raw,if=virtio,cache={cachemode}'.format(
- img='{client}.{num}'.format(client=client, num=i),
- id=client[len('client.'):],
- cachemode=cachemode,
- ),
- ])
-
- log.info('starting qemu...')
- procs.append(
- remote.run(
- args=args,
- logger=log.getChild(client),
- stdin=run.PIPE,
- wait=False,
- )
- )
-
- try:
- yield
- finally:
- log.info('waiting for qemu tests to finish...')
- run.wait(procs)
-
- log.debug('checking that qemu tests succeeded...')
- for client in config.iterkeys():
- (remote,) = ctx.cluster.only(client).remotes.keys()
- # teardown nfs mount
- _teardown_nfs_mount(remote, client)
- # check for test status
- remote.run(
- args=[
- 'test', '-f',
- '{tdir}/archive/qemu/{client}/success'.format(
- tdir=testdir,
- client=client
- ),
- ],
- )
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run a test inside of QEMU on top of rbd. Only one test
- is supported per client.
-
- For example, you can specify which clients to run on::
-
- tasks:
- - ceph:
- - qemu:
- client.0:
- test: http://ceph.com/qa/test.sh
- client.1:
- test: http://ceph.com/qa/test2.sh
-
- Or use the same settings on all clients:
-
- tasks:
- - ceph:
- - qemu:
- all:
- test: http://ceph.com/qa/test.sh
-
- For tests that don't need a filesystem, set type to block::
-
- tasks:
- - ceph:
- - qemu:
- client.0:
- test: http://ceph.com/qa/test.sh
- type: block
-
- The test should be configured to run on /dev/vdb and later
- devices.
-
- If you want to run a test that uses more than one rbd image,
- specify how many images to use::
-
- tasks:
- - ceph:
- - qemu:
- client.0:
- test: http://ceph.com/qa/test.sh
- type: block
- num_rbd: 2
-
- You can set the amount of memory the VM has (default is 1024 MB)::
-
- tasks:
- - ceph:
- - qemu:
- client.0:
- test: http://ceph.com/qa/test.sh
- memory: 512 # megabytes
- """
- assert isinstance(config, dict), \
- "task qemu only supports a dictionary for configuration"
-
- config = teuthology.replace_all_with_clients(ctx.cluster, config)
-
- managers = []
- for client, client_config in config.iteritems():
- num_rbd = client_config.get('num_rbd', 1)
- assert num_rbd > 0, 'at least one rbd device must be used'
- for i in xrange(num_rbd):
- create_config = {
- client: {
- 'image_name':
- '{client}.{num}'.format(client=client, num=i),
- }
- }
- managers.append(
- lambda create_config=create_config:
- rbd.create_image(ctx=ctx, config=create_config)
- )
-
- managers.extend([
- lambda: create_dirs(ctx=ctx, config=config),
- lambda: generate_iso(ctx=ctx, config=config),
- lambda: download_image(ctx=ctx, config=config),
- lambda: run_qemu(ctx=ctx, config=config),
- ])
-
- with contextutil.nested(*managers):
- yield
+++ /dev/null
-"""
-Rados modle-based integration tests
-"""
-import contextlib
-import logging
-import gevent
-from teuthology import misc as teuthology
-
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run RadosModel-based integration tests.
-
- The config should be as follows::
-
- rados:
- clients: [client list]
- ops: <number of ops>
- objects: <number of objects to use>
- max_in_flight: <max number of operations in flight>
- object_size: <size of objects in bytes>
- min_stride_size: <minimum write stride size in bytes>
- max_stride_size: <maximum write stride size in bytes>
- op_weights: <dictionary mapping operation type to integer weight>
- runs: <number of times to run> - the pool is remade between runs
- ec_pool: use an ec pool
-
- For example::
-
- tasks:
- - ceph:
- - rados:
- clients: [client.0]
- ops: 1000
- max_seconds: 0 # 0 for no limit
- objects: 25
- max_in_flight: 16
- object_size: 4000000
- min_stride_size: 1024
- max_stride_size: 4096
- op_weights:
- read: 20
- write: 10
- delete: 2
- snap_create: 3
- rollback: 2
- snap_remove: 0
- ec_pool: true
- runs: 10
- - interactive:
-
- Optionally, you can provide the pool name to run against:
-
- tasks:
- - ceph:
- - exec:
- client.0:
- - ceph osd pool create foo
- - rados:
- clients: [client.0]
- pools: [foo]
- ...
-
- Alternatively, you can provide a pool prefix:
-
- tasks:
- - ceph:
- - exec:
- client.0:
- - ceph osd pool create foo.client.0
- - rados:
- clients: [client.0]
- pool_prefix: foo
- ...
-
- """
- log.info('Beginning rados...')
- assert isinstance(config, dict), \
- "please list clients to run on"
-
- object_size = int(config.get('object_size', 4000000))
- op_weights = config.get('op_weights', {})
- testdir = teuthology.get_testdir(ctx)
- args = [
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'ceph_test_rados']
- if config.get('ec_pool', False):
- args.extend(['--ec-pool'])
- args.extend([
- '--op', 'read', str(op_weights.get('read', 100)),
- '--op', 'write', str(op_weights.get('write', 100)),
- '--op', 'delete', str(op_weights.get('delete', 10)),
- '--max-ops', str(config.get('ops', 10000)),
- '--objects', str(config.get('objects', 500)),
- '--max-in-flight', str(config.get('max_in_flight', 16)),
- '--size', str(object_size),
- '--min-stride-size', str(config.get('min_stride_size', object_size / 10)),
- '--max-stride-size', str(config.get('max_stride_size', object_size / 5)),
- '--max-seconds', str(config.get('max_seconds', 0))
- ])
- for field in [
- 'copy_from', 'is_dirty', 'undirty', 'cache_flush',
- 'cache_try_flush', 'cache_evict',
- 'snap_create', 'snap_remove', 'rollback', 'setattr', 'rmattr',
- 'watch', 'append',
- ]:
- if field in op_weights:
- args.extend([
- '--op', field, str(op_weights[field]),
- ])
-
- def thread():
- """Thread spawned by gevent"""
- clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- log.info('clients are %s' % clients)
- for i in range(int(config.get('runs', '1'))):
- log.info("starting run %s out of %s", str(i), config.get('runs', '1'))
- tests = {}
- existing_pools = config.get('pools', [])
- created_pools = []
- for role in config.get('clients', clients):
- assert isinstance(role, basestring)
- PREFIX = 'client.'
- assert role.startswith(PREFIX)
- id_ = role[len(PREFIX):]
-
- pool = config.get('pool', None)
- if not pool and existing_pools:
- pool = existing_pools.pop()
- else:
- pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False))
- created_pools.append(pool)
-
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- proc = remote.run(
- args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
- ["--pool", pool],
- logger=log.getChild("rados.{id}".format(id=id_)),
- stdin=run.PIPE,
- wait=False
- )
- tests[id_] = proc
- run.wait(tests.itervalues())
-
- for pool in created_pools:
- ctx.manager.remove_pool(pool)
-
- running = gevent.spawn(thread)
-
- try:
- yield
- finally:
- log.info('joining rados')
- running.get()
+++ /dev/null
-"""
-Rados benchmarking
-"""
-import contextlib
-import logging
-
-from teuthology.orchestra import run
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run radosbench
-
- The config should be as follows:
-
- radosbench:
- clients: [client list]
- time: <seconds to run>
- pool: <pool to use>
- size: write size to use
- unique_pool: use a unique pool, defaults to False
- ec_pool: create ec pool, defaults to False
- create_pool: create pool, defaults to False
-
- example:
-
- tasks:
- - ceph:
- - radosbench:
- clients: [client.0]
- time: 360
- - interactive:
- """
- log.info('Beginning radosbench...')
- assert isinstance(config, dict), \
- "please list clients to run on"
- radosbench = {}
-
- testdir = teuthology.get_testdir(ctx)
-
- for role in config.get('clients', ['client.0']):
- assert isinstance(role, basestring)
- PREFIX = 'client.'
- assert role.startswith(PREFIX)
- id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
-
- pool = 'data'
- if config.get('create_pool', True):
- if config.get('pool'):
- pool = config.get('pool')
- if pool != 'data':
- ctx.manager.create_pool(pool, ec_pool=config.get('ec_pool', False))
- else:
- pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False))
-
- proc = remote.run(
- args=[
- "/bin/sh", "-c",
- " ".join(['adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage',
- 'rados',
- '--name', role,
- '-b', str(config.get('size', 4<<20)),
- '-p' , pool,
- 'bench', str(config.get('time', 360)), 'write',
- ]).format(tdir=testdir),
- ],
- logger=log.getChild('radosbench.{id}'.format(id=id_)),
- stdin=run.PIPE,
- wait=False
- )
- radosbench[id_] = proc
-
- try:
- yield
- finally:
- timeout = config.get('time', 360) * 5
- log.info('joining radosbench (timing out after %ss)', timeout)
- run.wait(radosbench.itervalues(), timeout=timeout)
-
- if pool is not 'data':
- ctx.manager.remove_pool(pool)
+++ /dev/null
-"""
-Rgw admin testing against a running instance
-"""
-# The test cases in this file have been annotated for inventory.
-# To extract the inventory (in csv format) use the command:
-#
-# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
-#
-
-import copy
-import json
-import logging
-import time
-
-from cStringIO import StringIO
-
-import boto.exception
-import boto.s3.connection
-import boto.s3.acl
-
-import util.rgw as rgw_utils
-
-from teuthology import misc as teuthology
-from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops
-
-log = logging.getLogger(__name__)
-
-def get_acl(key):
- """
- Helper function to get the xml acl from a key, ensuring that the xml
- version tag is removed from the acl response
- """
- raw_acl = key.get_xml_acl()
-
- def remove_version(string):
- return string.split(
- '<?xml version="1.0" encoding="UTF-8"?>'
- )[-1]
-
- def remove_newlines(string):
- return string.strip('\n')
-
- return remove_version(
- remove_newlines(raw_acl)
- )
-
-
-def task(ctx, config):
- """
- Test radosgw-admin functionality against a running rgw instance.
- """
- global log
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- multi_region_run = rgw_utils.multi_region_enabled(ctx)
-
- client = clients[0]; # default choice, multi-region code may overwrite this
- if multi_region_run:
- client = rgw_utils.get_master_client(ctx, clients)
-
- # once the client is chosen, pull the host name and assigned port out of
- # the role_endpoints that were assigned by the rgw task
- (remote_host, remote_port) = ctx.rgw.role_endpoints[client]
-
- ##
- user1='foo'
- user2='fud'
- subuser1='foo:foo1'
- subuser2='foo:foo2'
- display_name1='Foo'
- display_name2='Fud'
- email='foo@foo.com'
- email2='bar@bar.com'
- access_key='9te6NH5mcdcq0Tc5i8i1'
- secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
- access_key2='p5YnriCv1nAtykxBrupQ'
- secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
- swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
- swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
-
- bucket_name='myfoo'
- bucket_name2='mybar'
-
- # connect to rgw
- connection = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=remote_port,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
- connection2 = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key2,
- aws_secret_access_key=secret_key2,
- is_secure=False,
- port=remote_port,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # legend (test cases can be easily grep-ed out)
- # TESTCASE 'testname','object','method','operation','assertion'
- # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert err
-
- # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user1,
- '--display-name', display_name1,
- '--email', email,
- '--access-key', access_key,
- '--secret', secret_key,
- '--max-buckets', '4'
- ],
- check_status=True)
-
- # TESTCASE 'duplicate email','user','create','existing user email','fails'
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user2,
- '--display-name', display_name2,
- '--email', email,
- ])
- assert err
-
- # TESTCASE 'info-existing','user','info','existing user','returns correct info'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
- assert out['user_id'] == user1
- assert out['email'] == email
- assert out['display_name'] == display_name1
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
- assert not out['suspended']
-
- # this whole block should only be run if regions have been configured
- if multi_region_run:
- rgw_utils.radosgw_agent_sync_all(ctx)
- # post-sync, validate that user1 exists on the sync destination host
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, dest_client, ['metadata', 'list', 'user'])
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
- assert out['user_id'] == user1
- assert out['email'] == email
- assert out['display_name'] == display_name1
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
- assert not out['suspended']
-
- # compare the metadata between different regions, make sure it matches
- log.debug('compare the metadata between different regions, make sure it matches')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client,
- ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
- assert out1 == out2
-
- # suspend a user on the master, then check the status on the destination
- log.debug('suspend a user on the master, then check the status on the destination')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, source_client, ['user', 'suspend', '--uid', user1])
- rgw_utils.radosgw_agent_sync_all(ctx)
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
- assert out['suspended']
-
- # delete a user on the master, then check that it's gone on the destination
- log.debug('delete a user on the master, then check that it\'s gone on the destination')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, source_client, ['user', 'rm', '--uid', user1], check_status=True)
- rgw_utils.radosgw_agent_sync_all(ctx)
- (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user1])
- assert out is None
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
- assert out is None
-
- # then recreate it so later tests pass
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user1,
- '--display-name', display_name1,
- '--email', email,
- '--access-key', access_key,
- '--secret', secret_key,
- '--max-buckets', '4'
- ],
- check_status=True)
-
- # now do the multi-region bucket tests
- log.debug('now do the multi-region bucket tests')
-
- # Create a second user for the following tests
- log.debug('Create a second user for the following tests')
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user2,
- '--display-name', display_name2,
- '--email', email2,
- '--access-key', access_key2,
- '--secret', secret_key2,
- '--max-buckets', '4'
- ],
- check_status=True)
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user2], check_status=True)
- assert out is not None
-
- # create a bucket and do a sync
- log.debug('create a bucket and do a sync')
- bucket = connection.create_bucket(bucket_name2)
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # compare the metadata for the bucket between different regions, make sure it matches
- log.debug('compare the metadata for the bucket between different regions, make sure it matches')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- assert out1 == out2
-
- # get the bucket.instance info and compare that
- src_bucket_id = out1['data']['bucket']['bucket_id']
- dest_bucket_id = out2['data']['bucket']['bucket_id']
- (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
- 'bucket.instance:{bucket_name}:{bucket_instance}'.format(
- bucket_name=bucket_name2,bucket_instance=src_bucket_id)],
- check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
- 'bucket.instance:{bucket_name}:{bucket_instance}'.format(
- bucket_name=bucket_name2,bucket_instance=dest_bucket_id)],
- check_status=True)
- del out1['data']['bucket_info']['bucket']['pool']
- del out1['data']['bucket_info']['bucket']['index_pool']
- del out2['data']['bucket_info']['bucket']['pool']
- del out2['data']['bucket_info']['bucket']['index_pool']
- assert out1 == out2
-
- same_region = 0
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
-
- source_region = rgw_utils.region_for_client(ctx, source_client)
- dest_region = rgw_utils.region_for_client(ctx, dest_client)
-
- # 301 is only returned for requests to something in a different region
- if source_region == dest_region:
- log.debug('301 is only returned for requests to something in a different region')
- same_region += 1
- continue
-
- # Attempt to create a new connection with user1 to the destination RGW
- log.debug('Attempt to create a new connection with user1 to the destination RGW')
- # and use that to attempt a delete (that should fail)
- exception_encountered = False
- try:
- (dest_remote_host, dest_remote_port) = ctx.rgw.role_endpoints[dest_client]
- connection_dest = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=dest_remote_port,
- host=dest_remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # this should fail
- connection_dest.delete_bucket(bucket_name2)
- except boto.exception.S3ResponseError as e:
- assert e.status == 301
- exception_encountered = True
-
- # confirm that the expected exception was seen
- assert exception_encountered
-
- # now delete the bucket on the source RGW and do another sync
- log.debug('now delete the bucket on the source RGW and do another sync')
- bucket.delete()
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- if same_region == len(ctx.radosgw_agent.config):
- bucket.delete()
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # make sure that the bucket no longer exists in either region
- log.debug('make sure that the bucket no longer exists in either region')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
- 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
- (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
- 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
- # Both of the previous calls should have errors due to requesting
- # metadata for non-existent buckets
- assert err1
- assert err2
-
- # create a bucket and then sync it
- log.debug('create a bucket and then sync it')
- bucket = connection.create_bucket(bucket_name2)
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # compare the metadata for the bucket between different regions, make sure it matches
- log.debug('compare the metadata for the bucket between different regions, make sure it matches')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- assert out1 == out2
-
- # Now delete the bucket and recreate it with a different user
- log.debug('Now delete the bucket and recreate it with a different user')
- # within the same window of time and then sync.
- bucket.delete()
- bucket = connection2.create_bucket(bucket_name2)
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # compare the metadata for the bucket between different regions, make sure it matches
- log.debug('compare the metadata for the bucket between different regions, make sure it matches')
- # user2 should own the bucket in both regions
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- assert out1 == out2
- assert out1['data']['owner'] == user2
- assert out1['data']['owner'] != user1
-
- # now we're going to use this bucket to test meta-data update propagation
- log.debug('now we\'re going to use this bucket to test meta-data update propagation')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
-
- # get the metadata so we can tweak it
- log.debug('get the metadata so we can tweak it')
- (err, orig_data) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
-
- # manually edit mtime for this bucket to be 300 seconds in the past
- log.debug('manually edit mtime for this bucket to be 300 seconds in the past')
- new_data = copy.deepcopy(orig_data)
- new_data['mtime'] = orig_data['mtime'] - 300
- assert new_data != orig_data
- (err, out) = rgwadmin(ctx, source_client,
- ['metadata', 'put', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- stdin=StringIO(json.dumps(new_data)),
- check_status=True)
-
- # get the metadata and make sure that the 'put' worked
- log.debug('get the metadata and make sure that the \'put\' worked')
- (err, out) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- assert out == new_data
-
- # sync to propagate the new metadata
- log.debug('sync to propagate the new metadata')
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # get the metadata from the dest and compare it to what we just set
- log.debug('get the metadata from the dest and compare it to what we just set')
- # and what the source region has.
- (err1, out1) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- # yeah for the transitive property
- assert out1 == out2
- assert out1 == new_data
-
- # now we delete the bucket
- log.debug('now we delete the bucket')
- bucket.delete()
-
- log.debug('sync to propagate the deleted bucket')
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # Delete user2 as later tests do not expect it to exist.
- # Verify that it is gone on both regions
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, source_client,
- ['user', 'rm', '--uid', user2], check_status=True)
- rgw_utils.radosgw_agent_sync_all(ctx)
- # The two 'user info' calls should fail and not return any data
- # since we just deleted this user.
- (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user2])
- assert out is None
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user2])
- assert out is None
-
- # Test data sync
-
- # First create a bucket for data sync test purpose
- bucket = connection.create_bucket(bucket_name + 'data')
-
- # Create a tiny file and check if in sync
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- if c_config.get('metadata-only'):
- continue
-
- source_client = c_config['src']
- dest_client = c_config['dest']
- k = boto.s3.key.Key(bucket)
- k.key = 'tiny_file'
- k.set_contents_from_string("123456789")
- time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
- rgw_utils.radosgw_agent_sync_all(ctx, data=True)
- (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
- dest_connection = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=dest_port,
- host=dest_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
- dest_k = dest_connection.get_bucket(bucket_name + 'data').get_key('tiny_file')
- assert k.get_contents_as_string() == dest_k.get_contents_as_string()
-
- # check that deleting it removes it from the dest zone
- k.delete()
- time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
- rgw_utils.radosgw_agent_sync_all(ctx, data=True)
-
- dest_bucket = dest_connection.get_bucket(bucket_name + 'data')
- dest_k = dest_bucket.get_key('tiny_file')
- assert dest_k == None, 'object not deleted from destination zone'
-
- # finally we delete the bucket
- bucket.delete()
-
- bucket = connection.create_bucket(bucket_name + 'data2')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- if c_config.get('metadata-only'):
- continue
-
- source_client = c_config['src']
- dest_client = c_config['dest']
- (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
- dest_connection = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=dest_port,
- host=dest_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
- for i in range(20):
- k = boto.s3.key.Key(bucket)
- k.key = 'tiny_file_' + str(i)
- k.set_contents_from_string(str(i) * 100)
-
- time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
- rgw_utils.radosgw_agent_sync_all(ctx, data=True)
-
- for i in range(20):
- dest_k = dest_connection.get_bucket(bucket_name + 'data2').get_key('tiny_file_' + str(i))
- assert (str(i) * 100) == dest_k.get_contents_as_string()
- k = boto.s3.key.Key(bucket)
- k.key = 'tiny_file_' + str(i)
- k.delete()
-
- # check that deleting removes the objects from the dest zone
- time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
- rgw_utils.radosgw_agent_sync_all(ctx, data=True)
-
- for i in range(20):
- dest_bucket = dest_connection.get_bucket(bucket_name + 'data2')
- dest_k = dest_bucket.get_key('tiny_file_' + str(i))
- assert dest_k == None, 'object %d not deleted from destination zone' % i
- bucket.delete()
-
- # end of 'if multi_region_run:'
-
- # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
- check_status=True)
-
- # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
- assert out['suspended']
-
- # TESTCASE 're-enable','user','enable','suspended user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], check_status=True)
-
- # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
- assert not out['suspended']
-
- # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'key', 'create', '--uid', user1,
- '--access-key', access_key2, '--secret', secret_key2,
- ], check_status=True)
-
- # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1],
- check_status=True)
- assert len(out['keys']) == 2
- assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
- assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
-
- # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'key', 'rm', '--uid', user1,
- '--access-key', access_key2,
- ], check_status=True)
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
-
- # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
- subuser_access = 'full'
- subuser_perm = 'full-control'
-
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'create', '--subuser', subuser1,
- '--access', subuser_access
- ], check_status=True)
-
- # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'modify', '--subuser', subuser1,
- '--secret', swift_secret1,
- '--key-type', 'swift',
- ], check_status=True)
-
- # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
-
- assert out['subusers'][0]['permissions'] == subuser_perm
-
- # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
- assert len(out['swift_keys']) == 1
- assert out['swift_keys'][0]['user'] == subuser1
- assert out['swift_keys'][0]['secret_key'] == swift_secret1
-
- # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'create', '--subuser', subuser2,
- '--secret', swift_secret2,
- '--key-type', 'swift',
- ], check_status=True)
-
- # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
- assert len(out['swift_keys']) == 2
- assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
- assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
-
- # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'key', 'rm', '--subuser', subuser1,
- '--key-type', 'swift',
- ], check_status=True)
- assert len(out['swift_keys']) == 1
-
- # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'rm', '--subuser', subuser1,
- ], check_status=True)
- assert len(out['subusers']) == 1
-
- # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'rm', '--subuser', subuser2,
- '--key-type', 'swift', '--purge-keys',
- ], check_status=True)
- assert len(out['swift_keys']) == 0
- assert len(out['subusers']) == 0
-
- # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1],
- check_status=True)
- assert len(out) == 0
-
- if multi_region_run:
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
- assert len(out) == 0
-
- # create a first bucket
- bucket = connection.create_bucket(bucket_name)
-
- # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
- assert len(out) == 1
- assert out[0] == bucket_name
-
- # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True)
- assert len(out) >= 1
- assert bucket_name in out;
-
- # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4'
- bucket2 = connection.create_bucket(bucket_name + '2')
- bucket3 = connection.create_bucket(bucket_name + '3')
- bucket4 = connection.create_bucket(bucket_name + '4')
- # the 5th should fail.
- failed = False
- try:
- connection.create_bucket(bucket_name + '5')
- except Exception:
- failed = True
- assert failed
-
- # delete the buckets
- bucket2.delete()
- bucket3.delete()
- bucket4.delete()
-
- # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
- (err, out) = rgwadmin(ctx, client, [
- 'bucket', 'stats', '--bucket', bucket_name], check_status=True)
- assert out['owner'] == user1
- bucket_id = out['id']
-
- # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], check_status=True)
- assert len(out) == 1
- assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
-
- # use some space
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('one')
-
- # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
- (err, out) = rgwadmin(ctx, client, [
- 'bucket', 'stats', '--bucket', bucket_name], check_status=True)
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 1
- assert out['usage']['rgw.main']['size_kb'] > 0
-
- # reclaim it
- key.delete()
-
- # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
- (err, out) = rgwadmin(ctx, client,
- ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name],
- check_status=True)
-
- # create a second user to link the bucket to
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user2,
- '--display-name', display_name2,
- '--access-key', access_key2,
- '--secret', secret_key2,
- '--max-buckets', '1',
- ],
- check_status=True)
-
- # try creating an object with the first user before the bucket is relinked
- denied = False
- key = boto.s3.key.Key(bucket)
-
- try:
- key.set_contents_from_string('two')
- except boto.exception.S3ResponseError:
- denied = True
-
- assert not denied
-
- # delete the object
- key.delete()
-
- # link the bucket to another user
- (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name],
- check_status=True)
-
- # try to remove user, should fail (has a linked bucket)
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
- assert err
-
- # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name],
- check_status=True)
-
- # relink the bucket to the first user and delete the second user
- (err, out) = rgwadmin(ctx, client,
- ['bucket', 'link', '--uid', user1, '--bucket', bucket_name],
- check_status=True)
-
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2],
- check_status=True)
-
- # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
-
- # upload an object
- object_name = 'four'
- key = boto.s3.key.Key(bucket, object_name)
- key.set_contents_from_string(object_name)
-
- # now delete it
- (err, out) = rgwadmin(ctx, client,
- ['object', 'rm', '--bucket', bucket_name, '--object', object_name],
- check_status=True)
-
- # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
- (err, out) = rgwadmin(ctx, client, [
- 'bucket', 'stats', '--bucket', bucket_name],
- check_status=True)
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 0
-
- # list log objects
- # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects'
- (err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True)
- assert len(out) > 0
-
- for obj in out:
- # TESTCASE 'log-show','log','show','after activity','returns expected info'
- if obj[:4] == 'meta' or obj[:4] == 'data':
- continue
-
- (err, rgwlog) = rgwadmin(ctx, client, ['log', 'show', '--object', obj],
- check_status=True)
- assert len(rgwlog) > 0
-
- # exempt bucket_name2 from checking as it was only used for multi-region tests
- assert rgwlog['bucket'].find(bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0
- assert rgwlog['bucket'] != bucket_name or rgwlog['bucket_id'] == bucket_id
- assert rgwlog['bucket_owner'] == user1 or rgwlog['bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2
- for entry in rgwlog['log_entries']:
- log.debug('checking log entry: ', entry)
- assert entry['bucket'] == rgwlog['bucket']
- possible_buckets = [bucket_name + '5', bucket_name2]
- user = entry['user']
- assert user == user1 or user.endswith('system-user') or \
- rgwlog['bucket'] in possible_buckets
-
- # TESTCASE 'log-rm','log','rm','delete log objects','succeeds'
- (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj],
- check_status=True)
-
- # TODO: show log by bucket+date
-
- # need to wait for all usage data to get flushed, should take up to 30 seconds
- timestamp = time.time()
- while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj']) # last operation we did is delete obj, wait for it to flush
- if get_user_successful_ops(out, user1) > 0:
- break
- time.sleep(1)
-
- assert time.time() - timestamp <= (20 * 60)
-
- # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True)
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
-
- user_summary = get_user_summary(out, user1)
-
- total = user_summary['total']
- assert total['successful_ops'] > 0
-
- # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
- check_status=True)
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- for entry in user_summary['categories']:
- assert entry['successful_ops'] > 0
- assert user_summary['user'] == user1
-
- # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
- test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
- for cat in test_categories:
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat],
- check_status=True)
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- assert user_summary['user'] == user1
- assert len(user_summary['categories']) == 1
- entry = user_summary['categories'][0]
- assert entry['category'] == cat
- assert entry['successful_ops'] > 0
-
- # the usage flush interval is 30 seconds, wait that much an then some
- # to make sure everything has been flushed
- time.sleep(35)
-
- # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
- (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1],
- check_status=True)
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
- check_status=True)
- assert len(out['entries']) == 0
- assert len(out['summary']) == 0
-
- # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
- check_status=True)
-
- # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
- try:
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('five')
- except boto.exception.S3ResponseError as e:
- assert e.status == 403
-
- # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1],
- check_status=True)
-
- # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('six')
-
- # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection'
-
- # create an object large enough to be split into multiple parts
- test_string = 'foo'*10000000
-
- big_key = boto.s3.key.Key(bucket)
- big_key.set_contents_from_string(test_string)
-
- # now delete the head
- big_key.delete()
-
- # wait a bit to give the garbage collector time to cycle
- time.sleep(15)
-
- (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
-
- assert len(out) > 0
-
- # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage'
- (err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True)
-
- #confirm
- (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
-
- assert len(out) == 0
-
- # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
- assert err
-
- # delete should fail because ``key`` still exists
- try:
- bucket.delete()
- except boto.exception.S3ResponseError as e:
- assert e.status == 409
-
- key.delete()
- bucket.delete()
-
- # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
- bucket = connection.create_bucket(bucket_name)
-
- # create an object
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('seven')
-
- # should be private already but guarantee it
- key.set_acl('private')
-
- (err, out) = rgwadmin(ctx, client,
- ['policy', '--bucket', bucket.name, '--object', key.key],
- check_status=True)
-
- acl = get_acl(key)
-
- assert acl == out.strip('\n')
-
- # add another grantee by making the object public read
- key.set_acl('public-read')
-
- (err, out) = rgwadmin(ctx, client,
- ['policy', '--bucket', bucket.name, '--object', key.key],
- check_status=True)
-
- acl = get_acl(key)
-
- assert acl == out.strip('\n')
-
- # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key_name = ['eight', 'nine', 'ten', 'eleven']
- for i in range(4):
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string(key_name[i])
-
- (err, out) = rgwadmin(ctx, client,
- ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'],
- check_status=True)
-
- # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
- caps='user=read'
- (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps])
-
- assert out['caps'][0]['perm'] == 'read'
-
- # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps])
-
- assert not out['caps']
-
- # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
-
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
- assert err
-
- # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('twelve')
-
- (err, out) = rgwadmin(ctx, client,
- ['user', 'rm', '--uid', user1, '--purge-data' ],
- check_status=True)
-
- # TESTCASE 'rm-user3','user','rm','deleted user','fails'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert err
-
- # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
- #
-
- (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
- orig_placement_pools = len(out['placement_pools'])
-
- # removed this test, it is not correct to assume that zone has default placement, it really
- # depends on how we set it up before
- #
- # assert len(out) > 0
- # assert len(out['placement_pools']) == 1
-
- # default_rule = out['placement_pools'][0]
- # assert default_rule['key'] == 'default-placement'
-
- rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}}
-
- out['placement_pools'].append(rule)
-
- (err, out) = rgwadmin(ctx, client, ['zone', 'set'],
- stdin=StringIO(json.dumps(out)),
- check_status=True)
-
- (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
- assert len(out) > 0
- assert len(out['placement_pools']) == orig_placement_pools + 1
+++ /dev/null
-"""
-Run a series of rgw admin commands through the rest interface.
-
-The test cases in this file have been annotated for inventory.
-To extract the inventory (in csv format) use the command:
-
- grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
-
-"""
-from cStringIO import StringIO
-import logging
-import json
-
-import boto.exception
-import boto.s3.connection
-import boto.s3.acl
-
-import requests
-import time
-
-from boto.connection import AWSAuthConnection
-from teuthology import misc as teuthology
-from util.rgw import get_user_summary, get_user_successful_ops
-
-log = logging.getLogger(__name__)
-
-def rgwadmin(ctx, client, cmd):
- """
- Perform rgw admin command
-
- :param client: client
- :param cmd: command to execute.
- :return: command exit status, json result.
- """
- log.info('radosgw-admin: %s' % cmd)
- testdir = teuthology.get_testdir(ctx)
- pre = [
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '--log-to-stderr',
- '--format', 'json',
- ]
- pre.extend(cmd)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- proc = remote.run(
- args=pre,
- check_status=False,
- stdout=StringIO(),
- stderr=StringIO(),
- )
- r = proc.exitstatus
- out = proc.stdout.getvalue()
- j = None
- if not r and out != '':
- try:
- j = json.loads(out)
- log.info(' json result: %s' % j)
- except ValueError:
- j = out
- log.info(' raw result: %s' % j)
- return (r, j)
-
-
-def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False):
- """
- perform a rest command
- """
- log.info('radosgw-admin-rest: %s %s' % (cmd, params))
- put_cmds = ['create', 'link', 'add']
- post_cmds = ['unlink', 'modify']
- delete_cmds = ['trim', 'rm', 'process']
- get_cmds = ['check', 'info', 'show', 'list']
-
- bucket_sub_resources = ['object', 'policy', 'index']
- user_sub_resources = ['subuser', 'key', 'caps']
- zone_sub_resources = ['pool', 'log', 'garbage']
-
- def get_cmd_method_and_handler(cmd):
- """
- Get the rest command and handler from information in cmd and
- from the imported requests object.
- """
- if cmd[1] in put_cmds:
- return 'PUT', requests.put
- elif cmd[1] in delete_cmds:
- return 'DELETE', requests.delete
- elif cmd[1] in post_cmds:
- return 'POST', requests.post
- elif cmd[1] in get_cmds:
- return 'GET', requests.get
-
- def get_resource(cmd):
- """
- Get the name of the resource from information in cmd.
- """
- if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources:
- if cmd[0] == 'bucket':
- return 'bucket', ''
- else:
- return 'bucket', cmd[0]
- elif cmd[0] == 'user' or cmd[0] in user_sub_resources:
- if cmd[0] == 'user':
- return 'user', ''
- else:
- return 'user', cmd[0]
- elif cmd[0] == 'usage':
- return 'usage', ''
- elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources:
- if cmd[0] == 'zone':
- return 'zone', ''
- else:
- return 'zone', cmd[0]
-
- def build_admin_request(conn, method, resource = '', headers=None, data='',
- query_args=None, params=None):
- """
- Build an administative request adapted from the build_request()
- method of boto.connection
- """
-
- path = conn.calling_format.build_path_base('admin', resource)
- auth_path = conn.calling_format.build_auth_path('admin', resource)
- host = conn.calling_format.build_host(conn.server_name(), 'admin')
- if query_args:
- path += '?' + query_args
- boto.log.debug('path=%s' % path)
- auth_path += '?' + query_args
- boto.log.debug('auth_path=%s' % auth_path)
- return AWSAuthConnection.build_base_http_request(conn, method, path,
- auth_path, params, headers, data, host)
-
- method, handler = get_cmd_method_and_handler(cmd)
- resource, query_args = get_resource(cmd)
- request = build_admin_request(connection, method, resource,
- query_args=query_args, headers=headers)
-
- url = '{protocol}://{host}{path}'.format(protocol=request.protocol,
- host=request.host, path=request.path)
-
- request.authorize(connection=connection)
- result = handler(url, params=params, headers=request.headers)
-
- if raw:
- log.info(' text result: %s' % result.txt)
- return result.status_code, result.txt
- else:
- log.info(' json result: %s' % result.json())
- return result.status_code, result.json()
-
-
-def task(ctx, config):
- """
- Test radosgw-admin functionality through the RESTful interface
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- # just use the first client...
- client = clients[0]
-
- ##
- admin_user = 'ada'
- admin_display_name = 'Ms. Admin User'
- admin_access_key = 'MH1WC2XQ1S8UISFDZC8W'
- admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG'
- admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write'
-
- user1 = 'foo'
- user2 = 'fud'
- subuser1 = 'foo:foo1'
- subuser2 = 'foo:foo2'
- display_name1 = 'Foo'
- display_name2 = 'Fud'
- email = 'foo@foo.com'
- access_key = '9te6NH5mcdcq0Tc5i8i1'
- secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
- access_key2 = 'p5YnriCv1nAtykxBrupQ'
- secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
- swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
- swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
-
- bucket_name = 'myfoo'
-
- # legend (test cases can be easily grep-ed out)
- # TESTCASE 'testname','object','method','operation','assertion'
- # TESTCASE 'create-admin-user','user','create','administrative user','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', admin_user,
- '--display-name', admin_display_name,
- '--access-key', admin_access_key,
- '--secret', admin_secret_key,
- '--max-buckets', '0',
- '--caps', admin_caps
- ])
- logging.error(out)
- logging.error(err)
- assert not err
-
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- remote_host = remote.name.split('@')[1]
- admin_conn = boto.s3.connection.S3Connection(
- aws_access_key_id=admin_access_key,
- aws_secret_access_key=admin_secret_key,
- is_secure=False,
- port=7280,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1})
- assert ret == 404
-
- # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['user', 'create'],
- {'uid' : user1,
- 'display-name' : display_name1,
- 'email' : email,
- 'access-key' : access_key,
- 'secret-key' : secret_key,
- 'max-buckets' : '4'
- })
-
- assert ret == 200
-
- # TESTCASE 'info-existing','user','info','existing user','returns correct info'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
-
- assert out['user_id'] == user1
- assert out['email'] == email
- assert out['display_name'] == display_name1
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
- assert not out['suspended']
-
- # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
- assert ret == 200
-
- # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert out['suspended']
-
- # TESTCASE 're-enable','user','enable','suspended user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
- assert not err
-
- # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert not out['suspended']
-
- # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['key', 'create'],
- {'uid' : user1,
- 'access-key' : access_key2,
- 'secret-key' : secret_key2
- })
-
-
- assert ret == 200
-
- # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out['keys']) == 2
- assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
- assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
-
- # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['key', 'rm'],
- {'uid' : user1,
- 'access-key' : access_key2
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
-
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
-
- # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'create'],
- {'subuser' : subuser1,
- 'secret-key' : swift_secret1,
- 'key-type' : 'swift'
- })
-
- assert ret == 200
-
- # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out['swift_keys']) == 1
- assert out['swift_keys'][0]['user'] == subuser1
- assert out['swift_keys'][0]['secret_key'] == swift_secret1
-
- # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'create'],
- {'subuser' : subuser2,
- 'secret-key' : swift_secret2,
- 'key-type' : 'swift'
- })
-
- assert ret == 200
-
- # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out['swift_keys']) == 2
- assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
- assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
-
- # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['key', 'rm'],
- {'subuser' : subuser1,
- 'key-type' :'swift'
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert len(out['swift_keys']) == 1
-
- # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'rm'],
- {'subuser' : subuser1
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert len(out['subusers']) == 1
-
- # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'rm'],
- {'subuser' : subuser2,
- 'key-type' : 'swift',
- '{purge-keys' :True
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert len(out['swift_keys']) == 0
- assert len(out['subusers']) == 0
-
- # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out) == 0
-
- # connect to rgw
- connection = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=7280,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
- assert ret == 200
- assert len(out) == 0
-
- # create a first bucket
- bucket = connection.create_bucket(bucket_name)
-
- # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out) == 1
- assert out[0] == bucket_name
-
- # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
-
- assert ret == 200
- assert out['owner'] == user1
- bucket_id = out['id']
-
- # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
- assert ret == 200
- assert len(out) == 1
- assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
-
- # use some space
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('one')
-
- # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
- assert ret == 200
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 1
- assert out['usage']['rgw.main']['size_kb'] > 0
-
- # reclaim it
- key.delete()
-
- # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name})
-
- assert ret == 200
-
- # create a second user to link the bucket to
- (ret, out) = rgwadmin_rest(admin_conn,
- ['user', 'create'],
- {'uid' : user2,
- 'display-name' : display_name2,
- 'access-key' : access_key2,
- 'secret-key' : secret_key2,
- 'max-buckets' : '1',
- })
-
- assert ret == 200
-
- # try creating an object with the first user before the bucket is relinked
- denied = False
- key = boto.s3.key.Key(bucket)
-
- try:
- key.set_contents_from_string('two')
- except boto.exception.S3ResponseError:
- denied = True
-
- assert not denied
-
- # delete the object
- key.delete()
-
- # link the bucket to another user
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user2, 'bucket' : bucket_name})
-
- assert ret == 200
-
- # try creating an object with the first user which should cause an error
- key = boto.s3.key.Key(bucket)
-
- try:
- key.set_contents_from_string('three')
- except boto.exception.S3ResponseError:
- denied = True
-
- assert denied
-
- # relink the bucket to the first user and delete the second user
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user1, 'bucket' : bucket_name})
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2})
- assert ret == 200
-
- # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
-
- # upload an object
- object_name = 'four'
- key = boto.s3.key.Key(bucket, object_name)
- key.set_contents_from_string(object_name)
-
- # now delete it
- (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name})
- assert ret == 200
-
- # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
- assert ret == 200
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 0
-
- # create a bucket for deletion stats
- useless_bucket = connection.create_bucket('useless_bucket')
- useless_key = useless_bucket.new_key('useless_key')
- useless_key.set_contents_from_string('useless string')
-
- # delete it
- useless_key.delete()
- useless_bucket.delete()
-
- # wait for the statistics to flush
- time.sleep(60)
-
- # need to wait for all usage data to get flushed, should take up to 30 seconds
- timestamp = time.time()
- while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'}) # last operation we did is delete obj, wait for it to flush
-
- if get_user_successful_ops(out, user1) > 0:
- break
- time.sleep(1)
-
- assert time.time() - timestamp <= (20 * 60)
-
- # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'])
- assert ret == 200
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = get_user_summary(out, user1)
- total = user_summary['total']
- assert total['successful_ops'] > 0
-
- # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
- assert ret == 200
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- for entry in user_summary['categories']:
- assert entry['successful_ops'] > 0
- assert user_summary['user'] == user1
-
- # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
- test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
- for cat in test_categories:
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat})
- assert ret == 200
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- assert user_summary['user'] == user1
- assert len(user_summary['categories']) == 1
- entry = user_summary['categories'][0]
- assert entry['category'] == cat
- assert entry['successful_ops'] > 0
-
- # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1})
- assert ret == 200
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
- assert ret == 200
- assert len(out['entries']) == 0
- assert len(out['summary']) == 0
-
- # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
- assert ret == 200
-
- # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
- try:
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('five')
- except boto.exception.S3ResponseError as e:
- assert e.status == 403
-
- # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
- assert ret == 200
-
- # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('six')
-
- # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection'
-
- # create an object large enough to be split into multiple parts
- test_string = 'foo'*10000000
-
- big_key = boto.s3.key.Key(bucket)
- big_key.set_contents_from_string(test_string)
-
- # now delete the head
- big_key.delete()
-
- # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
- assert ret == 409
-
- # delete should fail because ``key`` still exists
- try:
- bucket.delete()
- except boto.exception.S3ResponseError as e:
- assert e.status == 409
-
- key.delete()
- bucket.delete()
-
- # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
- bucket = connection.create_bucket(bucket_name)
-
- # create an object
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('seven')
-
- # should be private already but guarantee it
- key.set_acl('private')
-
- (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
- assert ret == 200
-
- acl = key.get_xml_acl()
- assert acl == out.strip('\n')
-
- # add another grantee by making the object public read
- key.set_acl('public-read')
-
- (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
- assert ret == 200
-
- acl = key.get_xml_acl()
- assert acl == out.strip('\n')
-
- # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key_name = ['eight', 'nine', 'ten', 'eleven']
- for i in range(4):
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string(key_name[i])
-
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True})
- assert ret == 200
-
- # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
- caps = 'usage=read'
- (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' : user1, 'user-caps' : caps})
- assert ret == 200
- assert out[0]['perm'] == 'read'
-
- # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' : user1, 'user-caps' : caps})
- assert ret == 200
- assert not out
-
- # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
- assert ret == 409
-
- # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('twelve')
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True})
- assert ret == 200
-
- # TESTCASE 'rm-user3','user','info','deleted user','fails'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 404
-
+++ /dev/null
-"""
-Run rados gateway agent in test mode
-"""
-import contextlib
-import logging
-import argparse
-
-from teuthology.orchestra import run
-from teuthology import misc as teuthology
-import util.rgw as rgw_utils
-
-log = logging.getLogger(__name__)
-
-def run_radosgw_agent(ctx, config):
- """
- Run a single radosgw-agent. See task() for config format.
- """
- return_list = list()
- for (client, cconf) in config.items():
- # don't process entries that are not clients
- if not client.startswith('client.'):
- log.debug('key {data} does not start with \'client.\', moving on'.format(
- data=client))
- continue
-
- src_client = cconf['src']
- dest_client = cconf['dest']
-
- src_zone = rgw_utils.zone_for_client(ctx, src_client)
- dest_zone = rgw_utils.zone_for_client(ctx, dest_client)
-
- log.info("source is %s", src_zone)
- log.info("dest is %s", dest_zone)
-
- testdir = teuthology.get_testdir(ctx)
- (remote,) = ctx.cluster.only(client).remotes.keys()
- # figure out which branch to pull from
- branch = cconf.get('force-branch', None)
- if not branch:
- branch = cconf.get('branch', 'master')
- sha1 = cconf.get('sha1')
- remote.run(
- args=[
- 'cd', testdir, run.Raw('&&'),
- 'git', 'clone',
- '-b', branch,
-# 'https://github.com/ceph/radosgw-agent.git',
- 'git://git.ceph.com/radosgw-agent.git',
- 'radosgw-agent.{client}'.format(client=client),
- ]
- )
- if sha1 is not None:
- remote.run(
- args=[
- 'cd', testdir, run.Raw('&&'),
- run.Raw('&&'),
- 'git', 'reset', '--hard', sha1,
- ]
- )
- remote.run(
- args=[
- 'cd', testdir, run.Raw('&&'),
- 'cd', 'radosgw-agent.{client}'.format(client=client),
- run.Raw('&&'),
- './bootstrap',
- ]
- )
-
- src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client,
- src_zone)
- dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client,
- dest_zone)
- src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client,
- src_zone)
- dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client,
- dest_zone)
- sync_scope = cconf.get('sync-scope', None)
- port = cconf.get('port', 8000)
- daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port)
- in_args=[
- 'daemon-helper',
- 'kill',
- '{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir,
- client=client),
- '-v',
- '--src-access-key', src_access,
- '--src-secret-key', src_secret,
- '--source', "http://{addr}:{port}".format(addr=src_host, port=src_port),
- '--dest-access-key', dest_access,
- '--dest-secret-key', dest_secret,
- '--max-entries', str(cconf.get('max-entries', 1000)),
- '--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format(
- tdir=testdir,
- client=client),
- '--object-sync-timeout', '30',
- ]
-
- if cconf.get('metadata-only', False):
- in_args.append('--metadata-only')
-
- # the test server and full/incremental flags are mutually exclusive
- if sync_scope is None:
- in_args.append('--test-server-host')
- in_args.append('0.0.0.0')
- in_args.append('--test-server-port')
- in_args.append(str(port))
- log.debug('Starting a sync test server on {client}'.format(client=client))
- # Stash the radosgw-agent server / port # for use by subsequent tasks
- ctx.radosgw_agent.endpoint = (client, str(port))
- else:
- in_args.append('--sync-scope')
- in_args.append(sync_scope)
- log.debug('Starting a {scope} sync on {client}'.format(scope=sync_scope,client=client))
-
- # positional arg for destination must come last
- in_args.append("http://{addr}:{port}".format(addr=dest_host,
- port=dest_port))
-
- return_list.append((client, remote.run(
- args=in_args,
- wait=False,
- stdin=run.PIPE,
- logger=log.getChild(daemon_name),
- )))
- return return_list
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run radosgw-agents in test mode.
-
- Configuration is clients to run the agents on, with settings for
- source client, destination client, and port to listen on. Binds
- to 0.0.0.0. Port defaults to 8000. This must be run on clients
- that have the correct zone root pools and rgw zone set in
- ceph.conf, or the task cannot read the region information from the
- cluster.
-
- By default, this task will start an HTTP server that will trigger full
- or incremental syncs based on requests made to it.
- Alternatively, a single full sync can be triggered by
- specifying 'sync-scope: full' or a loop of incremental syncs can be triggered
- by specifying 'sync-scope: incremental' (the loop will sleep
- '--incremental-sync-delay' seconds between each sync, default is 30 seconds).
-
- By default, both data and metadata are synced. To only sync
- metadata, for example because you want to sync between regions,
- set metadata-only: true.
-
- An example::
-
- tasks:
- - ceph:
- conf:
- client.0:
- rgw zone = foo
- rgw zone root pool = .root.pool
- client.1:
- rgw zone = bar
- rgw zone root pool = .root.pool2
- - rgw: # region configuration omitted for brevity
- - radosgw-agent:
- client.0:
- branch: wip-next-feature-branch
- src: client.0
- dest: client.1
- sync-scope: full
- metadata-only: true
- # port: 8000 (default)
- client.1:
- src: client.1
- dest: client.0
- port: 8001
- """
- assert isinstance(config, dict), 'rgw_sync_agent requires a dictionary config'
- log.debug("config is %s", config)
-
- overrides = ctx.config.get('overrides', {})
- # merge each client section, but only if it exists in config since there isn't
- # a sensible default action for this task
- for client in config.iterkeys():
- if config[client]:
- log.debug('config[{client}]: {data}'.format(client=client, data=config[client]))
- teuthology.deep_merge(config[client], overrides.get('radosgw-agent', {}))
-
- ctx.radosgw_agent = argparse.Namespace()
- ctx.radosgw_agent.config = config
-
- procs = run_radosgw_agent(ctx, config)
-
- ctx.radosgw_agent.procs = procs
-
- try:
- yield
- finally:
- testdir = teuthology.get_testdir(ctx)
- try:
- for client, proc in procs:
- log.info("shutting down sync agent on %s", client)
- proc.stdin.close()
- proc.wait()
- finally:
- for client, proc in procs:
- ctx.cluster.only(client).run(
- args=[
- 'rm', '-rf',
- '{tdir}/radosgw-agent.{client}'.format(tdir=testdir,
- client=client)
- ]
- )
+++ /dev/null
-"""
-Rbd testing task
-"""
-import contextlib
-import logging
-import os
-
-from cStringIO import StringIO
-from teuthology.orchestra import run
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from teuthology.parallel import parallel
-from teuthology.task.common_fs_utils import generic_mkfs
-from teuthology.task.common_fs_utils import generic_mount
-from teuthology.task.common_fs_utils import default_image_name
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def create_image(ctx, config):
- """
- Create an rbd image.
-
- For example::
-
- tasks:
- - ceph:
- - rbd.create_image:
- client.0:
- image_name: testimage
- image_size: 100
- image_format: 1
- client.1:
-
- Image size is expressed as a number of megabytes; default value
- is 10240.
-
- Image format value must be either 1 or 2; default value is 1.
-
- """
- assert isinstance(config, dict) or isinstance(config, list), \
- "task create_image only supports a list or dictionary for configuration"
-
- if isinstance(config, dict):
- images = config.items()
- else:
- images = [(role, None) for role in config]
-
- testdir = teuthology.get_testdir(ctx)
- for role, properties in images:
- if properties is None:
- properties = {}
- name = properties.get('image_name', default_image_name(role))
- size = properties.get('image_size', 10240)
- fmt = properties.get('image_format', 1)
- (remote,) = ctx.cluster.only(role).remotes.keys()
- log.info('Creating image {name} with size {size}'.format(name=name,
- size=size))
- args = [
- 'adjust-ulimits',
- 'ceph-coverage'.format(tdir=testdir),
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rbd',
- '-p', 'rbd',
- 'create',
- '--size', str(size),
- name,
- ]
- # omit format option if using the default (format 1)
- # since old versions of don't support it
- if int(fmt) != 1:
- args += ['--format', str(fmt)]
- remote.run(args=args)
- try:
- yield
- finally:
- log.info('Deleting rbd images...')
- for role, properties in images:
- if properties is None:
- properties = {}
- name = properties.get('image_name', default_image_name(role))
- (remote,) = ctx.cluster.only(role).remotes.keys()
- remote.run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rbd',
- '-p', 'rbd',
- 'rm',
- name,
- ],
- )
-
-@contextlib.contextmanager
-def modprobe(ctx, config):
- """
- Load the rbd kernel module..
-
- For example::
-
- tasks:
- - ceph:
- - rbd.create_image: [client.0]
- - rbd.modprobe: [client.0]
- """
- log.info('Loading rbd kernel module...')
- for role in config:
- (remote,) = ctx.cluster.only(role).remotes.keys()
- remote.run(
- args=[
- 'sudo',
- 'modprobe',
- 'rbd',
- ],
- )
- try:
- yield
- finally:
- log.info('Unloading rbd kernel module...')
- for role in config:
- (remote,) = ctx.cluster.only(role).remotes.keys()
- remote.run(
- args=[
- 'sudo',
- 'modprobe',
- '-r',
- 'rbd',
- # force errors to be ignored; necessary if more
- # than one device was created, which may mean
- # the module isn't quite ready to go the first
- # time through.
- run.Raw('||'),
- 'true',
- ],
- )
-
-@contextlib.contextmanager
-def dev_create(ctx, config):
- """
- Map block devices to rbd images.
-
- For example::
-
- tasks:
- - ceph:
- - rbd.create_image: [client.0]
- - rbd.modprobe: [client.0]
- - rbd.dev_create:
- client.0: testimage.client.0
- """
- assert isinstance(config, dict) or isinstance(config, list), \
- "task dev_create only supports a list or dictionary for configuration"
-
- if isinstance(config, dict):
- role_images = config.items()
- else:
- role_images = [(role, None) for role in config]
-
- log.info('Creating rbd block devices...')
-
- testdir = teuthology.get_testdir(ctx)
-
- for role, image in role_images:
- if image is None:
- image = default_image_name(role)
- (remote,) = ctx.cluster.only(role).remotes.keys()
-
- remote.run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rbd',
- '--user', role.rsplit('.')[-1],
- '-p', 'rbd',
- 'map',
- image,
- run.Raw('&&'),
- # wait for the symlink to be created by udev
- 'while', 'test', '!', '-e', '/dev/rbd/rbd/{image}'.format(image=image), run.Raw(';'), 'do',
- 'sleep', '1', run.Raw(';'),
- 'done',
- ],
- )
- try:
- yield
- finally:
- log.info('Unmapping rbd devices...')
- for role, image in role_images:
- if image is None:
- image = default_image_name(role)
- (remote,) = ctx.cluster.only(role).remotes.keys()
- remote.run(
- args=[
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rbd',
- '-p', 'rbd',
- 'unmap',
- '/dev/rbd/rbd/{imgname}'.format(imgname=image),
- run.Raw('&&'),
- # wait for the symlink to be deleted by udev
- 'while', 'test', '-e', '/dev/rbd/rbd/{image}'.format(image=image),
- run.Raw(';'),
- 'do',
- 'sleep', '1', run.Raw(';'),
- 'done',
- ],
- )
-
-
-def rbd_devname_rtn(ctx, image):
- return '/dev/rbd/rbd/{image}'.format(image=image)
-
-def canonical_path(ctx, role, path):
- """
- Determine the canonical path for a given path on the host
- representing the given role. A canonical path contains no
- . or .. components, and includes no symbolic links.
- """
- version_fp = StringIO()
- ctx.cluster.only(role).run(
- args=[ 'readlink', '-f', path ],
- stdout=version_fp,
- )
- canonical_path = version_fp.getvalue().rstrip('\n')
- version_fp.close()
- return canonical_path
-
-@contextlib.contextmanager
-def run_xfstests(ctx, config):
- """
- Run xfstests over specified devices.
-
- Warning: both the test and scratch devices specified will be
- overwritten. Normally xfstests modifies (but does not destroy)
- the test device, but for now the run script used here re-makes
- both filesystems.
-
- Note: Only one instance of xfstests can run on a single host at
- a time, although this is not enforced.
-
- This task in its current form needs some improvement. For
- example, it assumes all roles provided in the config are
- clients, and that the config provided is a list of key/value
- pairs. For now please use the xfstests() interface, below.
-
- For example::
-
- tasks:
- - ceph:
- - rbd.run_xfstests:
- client.0:
- count: 2
- test_dev: 'test_dev'
- scratch_dev: 'scratch_dev'
- fs_type: 'xfs'
- tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48'
- """
- with parallel() as p:
- for role, properties in config.items():
- p.spawn(run_xfstests_one_client, ctx, role, properties)
- yield
-
-def run_xfstests_one_client(ctx, role, properties):
- """
- Spawned routine to handle xfs tests for a single client
- """
- testdir = teuthology.get_testdir(ctx)
- try:
- count = properties.get('count')
- test_dev = properties.get('test_dev')
- assert test_dev is not None, \
- "task run_xfstests requires test_dev to be defined"
- test_dev = canonical_path(ctx, role, test_dev)
-
- scratch_dev = properties.get('scratch_dev')
- assert scratch_dev is not None, \
- "task run_xfstests requires scratch_dev to be defined"
- scratch_dev = canonical_path(ctx, role, scratch_dev)
-
- fs_type = properties.get('fs_type')
- tests = properties.get('tests')
-
- (remote,) = ctx.cluster.only(role).remotes.keys()
-
- # Fetch the test script
- test_root = teuthology.get_testdir(ctx)
- test_script = 'run_xfstests.sh'
- test_path = os.path.join(test_root, test_script)
-
- git_branch = 'master'
- test_url = 'https://raw.github.com/ceph/ceph/{branch}/qa/{script}'.format(branch=git_branch, script=test_script)
- # test_url = 'http://ceph.newdream.net/git/?p=ceph.git;a=blob_plain;hb=refs/heads/{branch};f=qa/{script}'.format(branch=git_branch, script=test_script)
-
- log.info('Fetching {script} for {role} from {url}'.format(script=test_script,
- role=role,
- url=test_url))
- args = [ 'wget', '-O', test_path, '--', test_url ]
- remote.run(args=args)
-
- log.info('Running xfstests on {role}:'.format(role=role))
- log.info(' iteration count: {count}:'.format(count=count))
- log.info(' test device: {dev}'.format(dev=test_dev))
- log.info(' scratch device: {dev}'.format(dev=scratch_dev))
- log.info(' using fs_type: {fs_type}'.format(fs_type=fs_type))
- log.info(' tests to run: {tests}'.format(tests=tests))
-
- # Note that the device paths are interpreted using
- # readlink -f <path> in order to get their canonical
- # pathname (so it matches what the kernel remembers).
- args = [
- '/usr/bin/sudo',
- 'TESTDIR={tdir}'.format(tdir=testdir),
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- '/bin/bash',
- test_path,
- '-c', str(count),
- '-f', fs_type,
- '-t', test_dev,
- '-s', scratch_dev,
- ]
- if tests:
- args.append(tests)
- remote.run(args=args, logger=log.getChild(role))
- finally:
- log.info('Removing {script} on {role}'.format(script=test_script,
- role=role))
- remote.run(args=['rm', '-f', test_path])
-
-@contextlib.contextmanager
-def xfstests(ctx, config):
- """
- Run xfstests over rbd devices. This interface sets up all
- required configuration automatically if not otherwise specified.
- Note that only one instance of xfstests can run on a single host
- at a time. By default, the set of tests specified is run once.
- If a (non-zero) count value is supplied, the complete set of
- tests will be run that number of times.
-
- For example::
-
- tasks:
- - ceph:
- # Image sizes are in MB
- - rbd.xfstests:
- client.0:
- count: 3
- test_image: 'test_image'
- test_size: 250
- test_format: 2
- scratch_image: 'scratch_image'
- scratch_size: 250
- scratch_format: 1
- fs_type: 'xfs'
- tests: '1-9 11-15 17 19-21 26-28 31-34 41 45-48'
- """
- if config is None:
- config = { 'all': None }
- assert isinstance(config, dict) or isinstance(config, list), \
- "task xfstests only supports a list or dictionary for configuration"
- if isinstance(config, dict):
- config = teuthology.replace_all_with_clients(ctx.cluster, config)
- runs = config.items()
- else:
- runs = [(role, None) for role in config]
-
- running_xfstests = {}
- for role, properties in runs:
- assert role.startswith('client.'), \
- "task xfstests can only run on client nodes"
- for host, roles_for_host in ctx.cluster.remotes.items():
- if role in roles_for_host:
- assert host not in running_xfstests, \
- "task xfstests allows only one instance at a time per host"
- running_xfstests[host] = True
-
- images_config = {}
- scratch_config = {}
- modprobe_config = {}
- image_map_config = {}
- scratch_map_config = {}
- xfstests_config = {}
- for role, properties in runs:
- if properties is None:
- properties = {}
-
- test_image = properties.get('test_image', 'test_image.{role}'.format(role=role))
- test_size = properties.get('test_size', 2000) # 2G
- test_fmt = properties.get('test_format', 1)
- scratch_image = properties.get('scratch_image', 'scratch_image.{role}'.format(role=role))
- scratch_size = properties.get('scratch_size', 10000) # 10G
- scratch_fmt = properties.get('scratch_format', 1)
-
- images_config[role] = dict(
- image_name=test_image,
- image_size=test_size,
- image_format=test_fmt,
- )
-
- scratch_config[role] = dict(
- image_name=scratch_image,
- image_size=scratch_size,
- image_format=scratch_fmt,
- )
-
- xfstests_config[role] = dict(
- count=properties.get('count', 1),
- test_dev='/dev/rbd/rbd/{image}'.format(image=test_image),
- scratch_dev='/dev/rbd/rbd/{image}'.format(image=scratch_image),
- fs_type=properties.get('fs_type', 'xfs'),
- tests=properties.get('tests'),
- )
-
- log.info('Setting up xfstests using RBD images:')
- log.info(' test ({size} MB): {image}'.format(size=test_size,
- image=test_image))
- log.info(' scratch ({size} MB): {image}'.format(size=scratch_size,
- image=scratch_image))
- modprobe_config[role] = None
- image_map_config[role] = test_image
- scratch_map_config[role] = scratch_image
-
- with contextutil.nested(
- lambda: create_image(ctx=ctx, config=images_config),
- lambda: create_image(ctx=ctx, config=scratch_config),
- lambda: modprobe(ctx=ctx, config=modprobe_config),
- lambda: dev_create(ctx=ctx, config=image_map_config),
- lambda: dev_create(ctx=ctx, config=scratch_map_config),
- lambda: run_xfstests(ctx=ctx, config=xfstests_config),
- ):
- yield
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Create and mount an rbd image.
-
- For example, you can specify which clients to run on::
-
- tasks:
- - ceph:
- - rbd: [client.0, client.1]
-
- There are a few image options::
-
- tasks:
- - ceph:
- - rbd:
- client.0: # uses defaults
- client.1:
- image_name: foo
- image_size: 2048
- image_format: 2
- fs_type: xfs
-
- To use default options on all clients::
-
- tasks:
- - ceph:
- - rbd:
- all:
-
- To create 20GiB images and format them with xfs on all clients::
-
- tasks:
- - ceph:
- - rbd:
- all:
- image_size: 20480
- fs_type: xfs
- """
- if config is None:
- config = { 'all': None }
- norm_config = config
- if isinstance(config, dict):
- norm_config = teuthology.replace_all_with_clients(ctx.cluster, config)
- if isinstance(norm_config, dict):
- role_images = {}
- for role, properties in norm_config.iteritems():
- if properties is None:
- properties = {}
- role_images[role] = properties.get('image_name')
- else:
- role_images = norm_config
-
- log.debug('rbd config is: %s', norm_config)
-
- with contextutil.nested(
- lambda: create_image(ctx=ctx, config=norm_config),
- lambda: modprobe(ctx=ctx, config=norm_config),
- lambda: dev_create(ctx=ctx, config=role_images),
- lambda: generic_mkfs(ctx=ctx, config=norm_config,
- devname_rtn=rbd_devname_rtn),
- lambda: generic_mount(ctx=ctx, config=role_images,
- devname_rtn=rbd_devname_rtn),
- ):
- yield
+++ /dev/null
-"""
-Run fsx on an rbd image
-"""
-import contextlib
-import logging
-
-from teuthology.parallel import parallel
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run fsx on an rbd image.
-
- Currently this requires running as client.admin
- to create a pool.
-
- Specify which clients to run on as a list::
-
- tasks:
- ceph:
- rbd_fsx:
- clients: [client.0, client.1]
-
- You can optionally change some properties of fsx:
-
- tasks:
- ceph:
- rbd_fsx:
- clients: <list of clients>
- seed: <random seed number, or 0 to use the time>
- ops: <number of operations to do>
- size: <maximum image size in bytes>
- """
- log.info('starting rbd_fsx...')
- with parallel() as p:
- for role in config['clients']:
- p.spawn(_run_one_client, ctx, config, role)
- yield
-
-def _run_one_client(ctx, config, role):
- """Spawned task that runs the client"""
- testdir = teuthology.get_testdir(ctx)
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- remote.run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'ceph_test_librbd_fsx',
- '-d',
- '-W', '-R', # mmap doesn't work with rbd
- '-p', str(config.get('progress_interval', 100)), # show progress
- '-P', '{tdir}/archive'.format(tdir=testdir),
- '-t', str(config.get('truncbdy',1)),
- '-l', str(config.get('size', 250000000)),
- '-S', str(config.get('seed', 0)),
- '-N', str(config.get('ops', 1000)),
- 'pool_{pool}'.format(pool=role),
- 'image_{image}'.format(image=role),
- ],
- )
+++ /dev/null
-"""
-Recovery system benchmarking
-"""
-from cStringIO import StringIO
-
-import contextlib
-import gevent
-import json
-import logging
-import random
-import time
-
-import ceph_manager
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Benchmark the recovery system.
-
- Generates objects with smalliobench, runs it normally to get a
- baseline performance measurement, then marks an OSD out and reruns
- to measure performance during recovery.
-
- The config should be as follows:
-
- recovery_bench:
- duration: <seconds for each measurement run>
- num_objects: <number of objects>
- io_size: <io size in bytes>
-
- example:
-
- tasks:
- - ceph:
- - recovery_bench:
- duration: 60
- num_objects: 500
- io_size: 4096
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'recovery_bench task only accepts a dict for configuration'
-
- log.info('Beginning recovery bench...')
-
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
- while len(manager.get_osd_status()['up']) < num_osds:
- manager.sleep(10)
-
- bench_proc = RecoveryBencher(
- manager,
- config,
- )
- try:
- yield
- finally:
- log.info('joining recovery bencher')
- bench_proc.do_join()
-
-class RecoveryBencher:
- """
- RecoveryBencher
- """
- def __init__(self, manager, config):
- self.ceph_manager = manager
- self.ceph_manager.wait_for_clean()
-
- osd_status = self.ceph_manager.get_osd_status()
- self.osds = osd_status['up']
-
- self.config = config
- if self.config is None:
- self.config = dict()
-
- else:
- def tmp(x):
- """
- Local wrapper to print value.
- """
- print x
- self.log = tmp
-
- log.info("spawning thread")
-
- self.thread = gevent.spawn(self.do_bench)
-
- def do_join(self):
- """
- Join the recovery bencher. This is called after the main
- task exits.
- """
- self.thread.get()
-
- def do_bench(self):
- """
- Do the benchmarking.
- """
- duration = self.config.get("duration", 60)
- num_objects = self.config.get("num_objects", 500)
- io_size = self.config.get("io_size", 4096)
-
- osd = str(random.choice(self.osds))
- (osd_remote,) = self.ceph_manager.ctx.cluster.only('osd.%s' % osd).remotes.iterkeys()
-
- testdir = teuthology.get_testdir(self.ceph_manager.ctx)
-
- # create the objects
- osd_remote.run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'smalliobench'.format(tdir=testdir),
- '--use-prefix', 'recovery_bench',
- '--init-only', '1',
- '--num-objects', str(num_objects),
- '--io-size', str(io_size),
- ],
- wait=True,
- )
-
- # baseline bench
- log.info('non-recovery (baseline)')
- p = osd_remote.run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'smalliobench',
- '--use-prefix', 'recovery_bench',
- '--do-not-init', '1',
- '--duration', str(duration),
- '--io-size', str(io_size),
- ],
- stdout=StringIO(),
- stderr=StringIO(),
- wait=True,
- )
- self.process_samples(p.stderr.getvalue())
-
- self.ceph_manager.raw_cluster_cmd('osd', 'out', osd)
- time.sleep(5)
-
- # recovery bench
- log.info('recovery active')
- p = osd_remote.run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'smalliobench',
- '--use-prefix', 'recovery_bench',
- '--do-not-init', '1',
- '--duration', str(duration),
- '--io-size', str(io_size),
- ],
- stdout=StringIO(),
- stderr=StringIO(),
- wait=True,
- )
- self.process_samples(p.stderr.getvalue())
-
- self.ceph_manager.raw_cluster_cmd('osd', 'in', osd)
-
- def process_samples(self, input):
- """
- Extract samples from the input and process the results
-
- :param input: input lines in JSON format
- """
- lat = {}
- for line in input.split('\n'):
- try:
- sample = json.loads(line)
- samples = lat.setdefault(sample['type'], [])
- samples.append(float(sample['latency']))
- except Exception:
- pass
-
- for type in lat:
- samples = lat[type]
- samples.sort()
-
- num = len(samples)
-
- # median
- if num & 1 == 1: # odd number of samples
- median = samples[num / 2]
- else:
- median = (samples[num / 2] + samples[num / 2 - 1]) / 2
-
- # 99%
- ninety_nine = samples[int(num * 0.99)]
-
- log.info("%s: median %f, 99%% %f" % (type, median, ninety_nine))
+++ /dev/null
-"""
-Lost_unfound
-"""
-import logging
-import ceph_manager
-from teuthology import misc as teuthology
-from util.rados import rados
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Test handling of lost objects.
-
- A pretty rigid cluseter is brought up andtested by this task
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'lost_unfound task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- while len(manager.get_osd_status()['up']) < 3:
- manager.sleep(10)
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_clean()
-
- # something that is always there
- dummyfile = '/etc/fstab'
-
- # take an osd out until the very end
- manager.kill_osd(2)
- manager.mark_down_osd(2)
- manager.mark_out_osd(2)
-
- # kludge to make sure they get a map
- rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.wait_for_recovery()
-
- # create old objects
- for f in range(1, 10):
- rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', 'data', 'rm', 'existed_%d' % f])
-
- # delay recovery, and make the pg log very long (to prevent backfill)
- manager.raw_cluster_cmd(
- 'tell', 'osd.1',
- 'injectargs',
- '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000'
- )
-
- manager.kill_osd(0)
- manager.mark_down_osd(0)
-
- for f in range(1, 10):
- rados(ctx, mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
- rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
-
- # bring osd.0 back up, let it peer, but don't replicate the new
- # objects...
- log.info('osd.0 command_args is %s' % 'foo')
- log.info(ctx.daemons.get_daemon('osd', 0).command_args)
- ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([
- '--osd-recovery-delay-start', '1000'
- ])
- manager.revive_osd(0)
- manager.mark_in_osd(0)
- manager.wait_till_osd_is_up(0)
-
- manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.wait_till_active()
-
- # take out osd.1 and the only copy of those objects.
- manager.kill_osd(1)
- manager.mark_down_osd(1)
- manager.mark_out_osd(1)
- manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it')
-
- # bring up osd.2 so that things would otherwise, in theory, recovery fully
- manager.revive_osd(2)
- manager.mark_in_osd(2)
- manager.wait_till_osd_is_up(2)
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_till_active()
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
-
- # verify that there are unfound objects
- unfound = manager.get_num_unfound_objects()
- log.info("there are %d unfound objects" % unfound)
- assert unfound
-
- # mark stuff lost
- pgs = manager.get_pg_stats()
- for pg in pgs:
- if pg['stat_sum']['num_objects_unfound'] > 0:
- primary = 'osd.%d' % pg['acting'][0]
-
- # verify that i can list them direct from the osd
- log.info('listing missing/lost in %s state %s', pg['pgid'],
- pg['state']);
- m = manager.list_pg_missing(pg['pgid'])
- #log.info('%s' % m)
- assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound']
- num_unfound=0
- for o in m['objects']:
- if len(o['locations']) == 0:
- num_unfound += 1
- assert m['num_unfound'] == num_unfound
-
- log.info("reverting unfound in %s on %s", pg['pgid'], primary)
- manager.raw_cluster_cmd('pg', pg['pgid'],
- 'mark_unfound_lost', 'delete')
- else:
- log.info("no unfound in %s", pg['pgid'])
-
- manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5')
- manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5')
- manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
- manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
- manager.wait_for_recovery()
-
- # verify result
- for f in range(1, 10):
- err = rados(ctx, mon, ['-p', 'data', 'get', 'new_%d' % f, '-'])
- assert err
- err = rados(ctx, mon, ['-p', 'data', 'get', 'existed_%d' % f, '-'])
- assert err
- err = rados(ctx, mon, ['-p', 'data', 'get', 'existing_%d' % f, '-'])
- assert err
-
- # see if osd.1 can cope
- manager.revive_osd(1)
- manager.mark_in_osd(1)
- manager.wait_till_osd_is_up(1)
- manager.wait_for_clean()
+++ /dev/null
-import logging
-import time
-
-import ceph_manager
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-def setup(ctx, config):
- ctx.manager.wait_for_clean()
- ctx.manager.create_pool("repair_test_pool", 1)
- return "repair_test_pool"
-
-def teardown(ctx, config, pool):
- ctx.manager.remove_pool(pool)
- ctx.manager.wait_for_clean()
-
-def run_test(ctx, config, test):
- s = setup(ctx, config)
- test(ctx, config, s)
- teardown(ctx, config, s)
-
-def choose_primary(ctx):
- def ret(pool, num):
- log.info("Choosing primary")
- return ctx.manager.get_pg_primary(pool, num)
- return ret
-
-def choose_replica(ctx):
- def ret(pool, num):
- log.info("Choosing replica")
- return ctx.manager.get_pg_replica(pool, num)
- return ret
-
-def trunc(ctx):
- def ret(osd, pool, obj):
- log.info("truncating object")
- return ctx.manager.osd_admin_socket(
- osd,
- ['truncobj', pool, obj, '1'])
- return ret
-
-def dataerr(ctx):
- def ret(osd, pool, obj):
- log.info("injecting data err on object")
- return ctx.manager.osd_admin_socket(
- osd,
- ['injectdataerr', pool, obj])
- return ret
-
-def mdataerr(ctx):
- def ret(osd, pool, obj):
- log.info("injecting mdata err on object")
- return ctx.manager.osd_admin_socket(
- osd,
- ['injectmdataerr', pool, obj])
- return ret
-
-def omaperr(ctx):
- def ret(osd, pool, obj):
- log.info("injecting omap err on object")
- return ctx.manager.osd_admin_socket(osd, ['setomapval', pool, obj, 'badkey', 'badval']);
- return ret
-
-def gen_repair_test_1(corrupter, chooser, scrub_type):
- def ret(ctx, config, pool):
- log.info("starting repair test type 1")
- victim_osd = chooser(pool, 0)
-
- # create object
- log.info("doing put")
- ctx.manager.do_put(pool, 'repair_test_obj', '/etc/hosts')
-
- # corrupt object
- log.info("corrupting object")
- corrupter(victim_osd, pool, 'repair_test_obj')
-
- # verify inconsistent
- log.info("scrubbing")
- ctx.manager.do_pg_scrub(pool, 0, scrub_type)
-
- assert ctx.manager.pg_inconsistent(pool, 0)
-
- # repair
- log.info("repairing")
- ctx.manager.do_pg_scrub(pool, 0, "repair")
-
- log.info("re-scrubbing")
- ctx.manager.do_pg_scrub(pool, 0, scrub_type)
-
- # verify consistent
- assert not ctx.manager.pg_inconsistent(pool, 0)
- log.info("done")
- return ret
-
-def gen_repair_test_2(chooser):
- def ret(ctx, config, pool):
- log.info("starting repair test type 2")
- victim_osd = chooser(pool, 0)
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- # create object
- log.info("doing put and setomapval")
- ctx.manager.do_put(pool, 'file1', '/etc/hosts')
- ctx.manager.do_rados(mon, ['-p', pool, 'setomapval', 'file1', 'key', 'val'])
- ctx.manager.do_put(pool, 'file2', '/etc/hosts')
- ctx.manager.do_put(pool, 'file3', '/etc/hosts')
- ctx.manager.do_put(pool, 'file4', '/etc/hosts')
- ctx.manager.do_put(pool, 'file5', '/etc/hosts')
- ctx.manager.do_rados(mon, ['-p', pool, 'setomapval', 'file5', 'key', 'val'])
- ctx.manager.do_put(pool, 'file6', '/etc/hosts')
-
- # corrupt object
- log.info("corrupting object")
- omaperr(ctx)(victim_osd, pool, 'file1')
-
- # verify inconsistent
- log.info("scrubbing")
- ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub')
-
- assert ctx.manager.pg_inconsistent(pool, 0)
-
- # Regression test for bug #4778, should still
- # be inconsistent after scrub
- ctx.manager.do_pg_scrub(pool, 0, 'scrub')
-
- assert ctx.manager.pg_inconsistent(pool, 0)
-
- # Additional corruptions including 2 types for file1
- log.info("corrupting more objects")
- dataerr(ctx)(victim_osd, pool, 'file1')
- mdataerr(ctx)(victim_osd, pool, 'file2')
- trunc(ctx)(victim_osd, pool, 'file3')
- omaperr(ctx)(victim_osd, pool, 'file6')
-
- # see still inconsistent
- log.info("scrubbing")
- ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub')
-
- assert ctx.manager.pg_inconsistent(pool, 0)
-
- # repair
- log.info("repairing")
- ctx.manager.do_pg_scrub(pool, 0, "repair")
-
- # Let repair clear inconsistent flag
- time.sleep(10)
-
- # verify consistent
- assert not ctx.manager.pg_inconsistent(pool, 0)
-
- # In the future repair might determine state of
- # inconsistency itself, verify with a deep-scrub
- log.info("scrubbing")
- ctx.manager.do_pg_scrub(pool, 0, 'deep-scrub')
-
- # verify consistent
- assert not ctx.manager.pg_inconsistent(pool, 0)
-
- log.info("done")
- return ret
-
-def task(ctx, config):
- """
- Test [deep] repair in several situations:
- Repair [Truncate, Data EIO, MData EIO] on [Primary|Replica]
-
- The config should be as follows:
-
- Must include the log-whitelist below
- Must enable filestore_debug_inject_read_err config
-
- example:
-
- tasks:
- - chef:
- - install:
- - ceph:
- log-whitelist: ['candidate had a read error', 'deep-scrub 0 missing, 1 inconsistent objects', 'deep-scrub 0 missing, 4 inconsistent objects', 'deep-scrub 1 errors', 'deep-scrub 4 errors', '!= known omap_digest', 'repair 0 missing, 1 inconsistent objects', 'repair 0 missing, 4 inconsistent objects', 'repair 1 errors, 1 fixed', 'repair 4 errors, 4 fixed', 'scrub 0 missing, 1 inconsistent', 'scrub 1 errors', 'size 1 != known size']
- conf:
- osd:
- filestore debug inject read err: true
- - repair_test:
-
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'repair_test task only accepts a dict for config'
-
- if not hasattr(ctx, 'manager'):
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
- ctx.manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager')
- )
-
- tests = [
- gen_repair_test_1(mdataerr(ctx), choose_primary(ctx), "scrub"),
- gen_repair_test_1(mdataerr(ctx), choose_replica(ctx), "scrub"),
- gen_repair_test_1(dataerr(ctx), choose_primary(ctx), "deep-scrub"),
- gen_repair_test_1(dataerr(ctx), choose_replica(ctx), "deep-scrub"),
- gen_repair_test_1(trunc(ctx), choose_primary(ctx), "scrub"),
- gen_repair_test_1(trunc(ctx), choose_replica(ctx), "scrub"),
- gen_repair_test_2(choose_primary(ctx)),
- gen_repair_test_2(choose_replica(ctx))
- ]
-
- for test in tests:
- run_test(ctx, config, test)
+++ /dev/null
-"""
-Rest Api
-"""
-import logging
-import contextlib
-import time
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from teuthology.orchestra import run
-from tasks.ceph import DaemonGroup
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def run_rest_api_daemon(ctx, api_clients):
- """
- Wrapper starts the rest api daemons
- """
- if not hasattr(ctx, 'daemons'):
- ctx.daemons = DaemonGroup()
- remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
- for rems, roles in remotes.iteritems():
- for whole_id_ in roles:
- if whole_id_ in api_clients:
- id_ = whole_id_[len('clients'):]
- run_cmd = [
- 'sudo',
- 'daemon-helper',
- 'kill',
- 'ceph-rest-api',
- '-n',
- 'client.rest{id}'.format(id=id_), ]
- cl_rest_id = 'client.rest{id}'.format(id=id_)
- ctx.daemons.add_daemon(rems, 'restapi',
- cl_rest_id,
- args=run_cmd,
- logger=log.getChild(cl_rest_id),
- stdin=run.PIPE,
- wait=False,
- )
- for i in range(1, 12):
- log.info('testing for ceph-rest-api try {0}'.format(i))
- run_cmd = [
- 'wget',
- '-O',
- '/dev/null',
- '-q',
- 'http://localhost:5000/api/v0.1/status'
- ]
- proc = rems.run(
- args=run_cmd,
- check_status=False
- )
- if proc.exitstatus == 0:
- break
- time.sleep(5)
- if proc.exitstatus != 0:
- raise RuntimeError('Cannot contact ceph-rest-api')
- try:
- yield
-
- finally:
- """
- TO DO: destroy daemons started -- modify iter_daemons_of_role
- """
- teuthology.stop_daemons_of_type(ctx, 'restapi')
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Start up rest-api.
-
- To start on on all clients::
-
- tasks:
- - ceph:
- - rest-api:
-
- To only run on certain clients::
-
- tasks:
- - ceph:
- - rest-api: [client.0, client.3]
-
- or
-
- tasks:
- - ceph:
- - rest-api:
- client.0:
- client.3:
-
- The general flow of things here is:
- 1. Find clients on which rest-api is supposed to run (api_clients)
- 2. Generate keyring values
- 3. Start up ceph-rest-api daemons
- On cleanup:
- 4. Stop the daemons
- 5. Delete keyring value files.
- """
- api_clients = []
- remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
- log.info(remotes)
- if config == None:
- api_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- else:
- api_clients = config
- log.info(api_clients)
- testdir = teuthology.get_testdir(ctx)
- coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
- for rems, roles in remotes.iteritems():
- for whole_id_ in roles:
- if whole_id_ in api_clients:
- id_ = whole_id_[len('client.'):]
- keyring = '/etc/ceph/ceph.client.rest{id}.keyring'.format(
- id=id_)
- rems.run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- '--create-keyring',
- '--gen-key',
- '--name=client.rest{id}'.format(id=id_),
- '--set-uid=0',
- '--cap', 'mon', 'allow *',
- '--cap', 'osd', 'allow *',
- '--cap', 'mds', 'allow',
- keyring,
- run.Raw('&&'),
- 'sudo',
- 'chmod',
- '0644',
- keyring,
- ],
- )
- rems.run(
- args=[
- 'sudo',
- 'sh',
- '-c',
- run.Raw("'"),
- "echo",
- '[client.rest{id}]'.format(id=id_),
- run.Raw('>>'),
- "/etc/ceph/ceph.conf",
- run.Raw("'")
- ]
- )
- rems.run(
- args=[
- 'sudo',
- 'sh',
- '-c',
- run.Raw("'"),
- 'echo',
- 'restapi',
- 'keyring',
- '=',
- '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
- run.Raw('>>'),
- '/etc/ceph/ceph.conf',
- run.Raw("'"),
- ]
- )
- rems.run(
- args=[
- 'ceph',
- 'auth',
- 'import',
- '-i',
- '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
- ]
- )
- with contextutil.nested(
- lambda: run_rest_api_daemon(ctx=ctx, api_clients=api_clients),):
- yield
-
+++ /dev/null
-"""
-Daemon restart
-"""
-import logging
-import pipes
-
-from teuthology import misc as teuthology
-from teuthology.orchestra import run as tor
-
-from teuthology.orchestra import run
-log = logging.getLogger(__name__)
-
-def restart_daemon(ctx, config, role, id_, *args):
- """
- Handle restart (including the execution of the command parameters passed)
- """
- log.info('Restarting {r}.{i} daemon...'.format(r=role, i=id_))
- daemon = ctx.daemons.get_daemon(role, id_)
- log.debug('Waiting for exit of {r}.{i} daemon...'.format(r=role, i=id_))
- try:
- daemon.wait_for_exit()
- except tor.CommandFailedError as e:
- log.debug('Command Failed: {e}'.format(e=e))
- if len(args) > 0:
- confargs = ['--{k}={v}'.format(k=k, v=v) for k,v in zip(args[0::2], args[1::2])]
- log.debug('Doing restart of {r}.{i} daemon with args: {a}...'.format(r=role, i=id_, a=confargs))
- daemon.restart_with_args(confargs)
- else:
- log.debug('Doing restart of {r}.{i} daemon...'.format(r=role, i=id_))
- daemon.restart()
-
-def get_tests(ctx, config, role, remote, testdir):
- """Download restart tests"""
- srcdir = '{tdir}/restart.{role}'.format(tdir=testdir, role=role)
-
- refspec = config.get('branch')
- if refspec is None:
- refspec = config.get('sha1')
- if refspec is None:
- refspec = config.get('tag')
- if refspec is None:
- refspec = 'HEAD'
- log.info('Pulling restart qa/workunits from ref %s', refspec)
-
- remote.run(
- logger=log.getChild(role),
- args=[
- 'mkdir', '--', srcdir,
- run.Raw('&&'),
- 'git',
- 'archive',
- '--remote=git://git.ceph.com/ceph.git',
- '%s:qa/workunits' % refspec,
- run.Raw('|'),
- 'tar',
- '-C', srcdir,
- '-x',
- '-f-',
- run.Raw('&&'),
- 'cd', '--', srcdir,
- run.Raw('&&'),
- 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
- run.Raw('&&'),
- 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
- run.Raw('>{tdir}/restarts.list'.format(tdir=testdir)),
- ],
- )
- restarts = sorted(teuthology.get_file(
- remote,
- '{tdir}/restarts.list'.format(tdir=testdir)).split('\0'))
- return (srcdir, restarts)
-
-def task(ctx, config):
- """
- Execute commands and allow daemon restart with config options.
- Each process executed can output to stdout restart commands of the form:
- restart <role> <id> <conf_key1> <conf_value1> <conf_key2> <conf_value2>
- This will restart the daemon <role>.<id> with the specified config values once
- by modifying the conf file with those values, and then replacing the old conf file
- once the daemon is restarted.
- This task does not kill a running daemon, it assumes the daemon will abort on an
- assert specified in the config.
-
- tasks:
- - install:
- - ceph:
- - restart:
- exec:
- client.0:
- - test_backtraces.py
-
- """
- assert isinstance(config, dict), "task kill got invalid config"
-
- testdir = teuthology.get_testdir(ctx)
-
- try:
- assert 'exec' in config, "config requires exec key with <role>: <command> entries"
- for role, task in config['exec'].iteritems():
- log.info('restart for role {r}'.format(r=role))
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- srcdir, restarts = get_tests(ctx, config, role, remote, testdir)
- log.info('Running command on role %s host %s', role, remote.name)
- spec = '{spec}'.format(spec=task[0])
- log.info('Restarts list: %s', restarts)
- log.info('Spec is %s', spec)
- to_run = [w for w in restarts if w == task or w.find(spec) != -1]
- log.info('To run: %s', to_run)
- for c in to_run:
- log.info('Running restart script %s...', c)
- args = [
- run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
- ]
- env = config.get('env')
- if env is not None:
- for var, val in env.iteritems():
- quoted_val = pipes.quote(val)
- env_arg = '{var}={val}'.format(var=var, val=quoted_val)
- args.append(run.Raw(env_arg))
- args.extend([
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- '{srcdir}/{c}'.format(
- srcdir=srcdir,
- c=c,
- ),
- ])
- proc = remote.run(
- args=args,
- stdout=tor.PIPE,
- stdin=tor.PIPE,
- stderr=log,
- wait=False,
- )
- log.info('waiting for a command from script')
- while True:
- l = proc.stdout.readline()
- if not l or l == '':
- break
- log.debug('script command: {c}'.format(c=l))
- ll = l.strip()
- cmd = ll.split(' ')
- if cmd[0] == "done":
- break
- assert cmd[0] == 'restart', "script sent invalid command request to kill task"
- # cmd should be: restart <role> <id> <conf_key1> <conf_value1> <conf_key2> <conf_value2>
- # or to clear, just: restart <role> <id>
- restart_daemon(ctx, config, cmd[1], cmd[2], *cmd[3:])
- proc.stdin.writelines(['restarted\n'])
- proc.stdin.flush()
- try:
- proc.wait()
- except tor.CommandFailedError:
- raise Exception('restart task got non-zero exit status from script: {s}'.format(s=c))
- finally:
- log.info('Finishing %s on %s...', task, role)
- remote.run(
- logger=log.getChild(role),
- args=[
- 'rm', '-rf', '--', '{tdir}/restarts.list'.format(tdir=testdir), srcdir,
- ],
- )
+++ /dev/null
-"""
-rgw routines
-"""
-import argparse
-import contextlib
-import json
-import logging
-import os
-
-from cStringIO import StringIO
-
-from teuthology.orchestra import run
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from teuthology.orchestra.run import CommandFailedError
-from util.rgw import rgwadmin
-from util.rados import (rados, create_ec_pool,
- create_replicated_pool,
- create_cache_pool)
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def create_apache_dirs(ctx, config):
- """
- Remotely create apache directories. Delete when finished.
- """
- log.info('Creating apache directories...')
- testdir = teuthology.get_testdir(ctx)
- for client in config.iterkeys():
- ctx.cluster.only(client).run(
- args=[
- 'mkdir',
- '-p',
- '{tdir}/apache/htdocs.{client}'.format(tdir=testdir,
- client=client),
- '{tdir}/apache/tmp.{client}/fastcgi_sock'.format(
- tdir=testdir,
- client=client),
- run.Raw('&&'),
- 'mkdir',
- '{tdir}/archive/apache.{client}'.format(tdir=testdir,
- client=client),
- ],
- )
- try:
- yield
- finally:
- log.info('Cleaning up apache directories...')
- for client in config.iterkeys():
- ctx.cluster.only(client).run(
- args=[
- 'rm',
- '-rf',
- '{tdir}/apache/tmp.{client}'.format(tdir=testdir,
- client=client),
- run.Raw('&&'),
- 'rmdir',
- '{tdir}/apache/htdocs.{client}'.format(tdir=testdir,
- client=client),
- ],
- )
-
- for client in config.iterkeys():
- ctx.cluster.only(client).run(
- args=[
- 'rmdir',
- '{tdir}/apache'.format(tdir=testdir),
- ],
- check_status=False, # only need to remove once per host
- )
-
-
-@contextlib.contextmanager
-def ship_apache_configs(ctx, config, role_endpoints):
- """
- Ship apache config and rgw.fgci to all clients. Clean up on termination
- """
- assert isinstance(config, dict)
- assert isinstance(role_endpoints, dict)
- testdir = teuthology.get_testdir(ctx)
- log.info('Shipping apache config and rgw.fcgi...')
- src = os.path.join(os.path.dirname(__file__), 'apache.conf.template')
- for client, conf in config.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
- system_type = teuthology.get_system_type(remote)
- if not conf:
- conf = {}
- idle_timeout = conf.get('idle_timeout', ctx.rgw.default_idle_timeout)
- if system_type == 'deb':
- mod_path = '/usr/lib/apache2/modules'
- print_continue = 'on'
- user = 'www-data'
- group = 'www-data'
- apache24_modconfig = '''
- IncludeOptional /etc/apache2/mods-available/mpm_event.conf
- IncludeOptional /etc/apache2/mods-available/mpm_event.load
-'''
- else:
- mod_path = '/usr/lib64/httpd/modules'
- print_continue = 'off'
- user = 'apache'
- group = 'apache'
- apache24_modconfig = \
- 'IncludeOptional /etc/httpd/conf.modules.d/00-mpm.conf'
- host, port = role_endpoints[client]
- with file(src, 'rb') as f:
- conf = f.read().format(
- testdir=testdir,
- mod_path=mod_path,
- print_continue=print_continue,
- host=host,
- port=port,
- client=client,
- idle_timeout=idle_timeout,
- user=user,
- group=group,
- apache24_modconfig=apache24_modconfig,
- )
- teuthology.write_file(
- remote=remote,
- path='{tdir}/apache/apache.{client}.conf'.format(
- tdir=testdir,
- client=client),
- data=conf,
- )
- teuthology.write_file(
- remote=remote,
- path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
- tdir=testdir,
- client=client),
- data="""#!/bin/sh
-ulimit -c unlimited
-exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring --rgw-socket-path {tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock
-
-""".format(tdir=testdir, client=client)
- )
- remote.run(
- args=[
- 'chmod',
- 'a=rx',
- '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir,
- client=client),
- ],
- )
- try:
- yield
- finally:
- log.info('Removing apache config...')
- for client in config.iterkeys():
- ctx.cluster.only(client).run(
- args=[
- 'rm',
- '-f',
- '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
- client=client),
- run.Raw('&&'),
- 'rm',
- '-f',
- '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
- tdir=testdir,
- client=client),
- ],
- )
-
-
-@contextlib.contextmanager
-def start_rgw(ctx, config):
- """
- Start rgw on remote sites.
- """
- log.info('Starting rgw...')
- testdir = teuthology.get_testdir(ctx)
- for client in config.iterkeys():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
-
- client_config = config.get(client)
- if client_config is None:
- client_config = {}
- log.info("rgw %s config is %s", client, client_config)
- id_ = client.split('.', 1)[1]
- log.info('client {client} is id {id}'.format(client=client, id=id_))
- cmd_prefix = [
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'daemon-helper',
- 'term',
- ]
-
- rgw_cmd = ['radosgw']
-
- if ctx.rgw.frontend == 'apache':
- rgw_cmd.extend([
- '--rgw-socket-path',
- '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format(
- tdir=testdir,
- client=client,
- ),
- ])
- elif ctx.rgw.frontend == 'civetweb':
- host, port = ctx.rgw.role_endpoints[client]
- rgw_cmd.extend([
- '--rgw-frontends',
- 'civetweb port={port}'.format(port=port),
- ])
-
- rgw_cmd.extend([
- '-n', client,
- '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client),
- '--log-file',
- '/var/log/ceph/rgw.{client}.log'.format(client=client),
- '--rgw_ops_log_socket_path',
- '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
- client=client),
- '--foreground',
- run.Raw('|'),
- 'sudo',
- 'tee',
- '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir,
- client=client),
- run.Raw('2>&1'),
- ])
-
- if client_config.get('valgrind'):
- cmd_prefix = teuthology.get_valgrind_args(
- testdir,
- client,
- cmd_prefix,
- client_config.get('valgrind')
- )
-
- run_cmd = list(cmd_prefix)
- run_cmd.extend(rgw_cmd)
-
- ctx.daemons.add_daemon(
- remote, 'rgw', client,
- args=run_cmd,
- logger=log.getChild(client),
- stdin=run.PIPE,
- wait=False,
- )
-
- try:
- yield
- finally:
- teuthology.stop_daemons_of_type(ctx, 'rgw')
- for client in config.iterkeys():
- ctx.cluster.only(client).run(
- args=[
- 'rm',
- '-f',
- '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
- client=client),
- ],
- )
-
-
-@contextlib.contextmanager
-def start_apache(ctx, config):
- """
- Start apache on remote sites.
- """
- log.info('Starting apache...')
- testdir = teuthology.get_testdir(ctx)
- apaches = {}
- for client in config.iterkeys():
- (remote,) = ctx.cluster.only(client).remotes.keys()
- system_type = teuthology.get_system_type(remote)
- if system_type == 'deb':
- apache_name = 'apache2'
- else:
- try:
- remote.run(
- args=[
- 'stat',
- '/usr/sbin/httpd.worker',
- ],
- )
- apache_name = '/usr/sbin/httpd.worker'
- except CommandFailedError:
- apache_name = '/usr/sbin/httpd'
-
- proc = remote.run(
- args=[
- 'adjust-ulimits',
- 'daemon-helper',
- 'kill',
- apache_name,
- '-X',
- '-f',
- '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
- client=client),
- ],
- logger=log.getChild(client),
- stdin=run.PIPE,
- wait=False,
- )
- apaches[client] = proc
-
- try:
- yield
- finally:
- log.info('Stopping apache...')
- for client, proc in apaches.iteritems():
- proc.stdin.close()
-
- run.wait(apaches.itervalues())
-
-
-def extract_user_info(client_config):
- """
- Extract user info from the client config specified. Returns a dict
- that includes system key information.
- """
- # test if there isn't a system user or if there isn't a name for that
- # user, return None
- if ('system user' not in client_config or
- 'name' not in client_config['system user']):
- return None
-
- user_info = dict()
- user_info['system_key'] = dict(
- user=client_config['system user']['name'],
- access_key=client_config['system user']['access key'],
- secret_key=client_config['system user']['secret key'],
- )
- return user_info
-
-
-def extract_zone_info(ctx, client, client_config):
- """
- Get zone information.
- :param client: dictionary of client information
- :param client_config: dictionary of client configuration information
- :returns: zone extracted from client and client_config information
- """
- ceph_config = ctx.ceph.conf.get('global', {})
- ceph_config.update(ctx.ceph.conf.get('client', {}))
- ceph_config.update(ctx.ceph.conf.get(client, {}))
- for key in ['rgw zone', 'rgw region', 'rgw zone root pool']:
- assert key in ceph_config, \
- 'ceph conf must contain {key} for {client}'.format(key=key,
- client=client)
- region = ceph_config['rgw region']
- zone = ceph_config['rgw zone']
- zone_info = dict()
- for key in ['rgw control pool', 'rgw gc pool', 'rgw log pool',
- 'rgw intent log pool', 'rgw usage log pool',
- 'rgw user keys pool', 'rgw user email pool',
- 'rgw user swift pool', 'rgw user uid pool',
- 'rgw domain root']:
- new_key = key.split(' ', 1)[1]
- new_key = new_key.replace(' ', '_')
-
- if key in ceph_config:
- value = ceph_config[key]
- log.debug('{key} specified in ceph_config ({val})'.format(
- key=key, val=value))
- zone_info[new_key] = value
- else:
- zone_info[new_key] = '.' + region + '.' + zone + '.' + new_key
-
- index_pool = '.' + region + '.' + zone + '.' + 'index_pool'
- data_pool = '.' + region + '.' + zone + '.' + 'data_pool'
- data_extra_pool = '.' + region + '.' + zone + '.' + 'data_extra_pool'
-
- zone_info['placement_pools'] = [{'key': 'default_placement',
- 'val': {'index_pool': index_pool,
- 'data_pool': data_pool,
- 'data_extra_pool': data_extra_pool}
- }]
-
- # these keys are meant for the zones argument in the region info. We
- # insert them into zone_info with a different format and then remove them
- # in the fill_in_endpoints() method
- for key in ['rgw log meta', 'rgw log data']:
- if key in ceph_config:
- zone_info[key] = ceph_config[key]
-
- # these keys are meant for the zones argument in the region info. We
- # insert them into zone_info with a different format and then remove them
- # in the fill_in_endpoints() method
- for key in ['rgw log meta', 'rgw log data']:
- if key in ceph_config:
- zone_info[key] = ceph_config[key]
-
- return region, zone, zone_info
-
-
-def extract_region_info(region, region_info):
- """
- Extract region information from the region_info parameter, using get
- to set default values.
-
- :param region: name of the region
- :param region_info: region information (in dictionary form).
- :returns: dictionary of region information set from region_info, using
- default values for missing fields.
- """
- assert isinstance(region_info['zones'], list) and region_info['zones'], \
- 'zones must be a non-empty list'
- return dict(
- name=region,
- api_name=region_info.get('api name', region),
- is_master=region_info.get('is master', False),
- log_meta=region_info.get('log meta', False),
- log_data=region_info.get('log data', False),
- master_zone=region_info.get('master zone', region_info['zones'][0]),
- placement_targets=region_info.get('placement targets',
- [{'name': 'default_placement',
- 'tags': []}]),
- default_placement=region_info.get('default placement',
- 'default_placement'),
- )
-
-
-def assign_ports(ctx, config):
- """
- Assign port numberst starting with port 7280.
- """
- port = 7280
- role_endpoints = {}
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
- for role in roles_for_host:
- if role in config:
- role_endpoints[role] = (remote.name.split('@')[1], port)
- port += 1
-
- return role_endpoints
-
-
-def fill_in_endpoints(region_info, role_zones, role_endpoints):
- """
- Iterate through the list of role_endpoints, filling in zone information
-
- :param region_info: region data
- :param role_zones: region and zone information.
- :param role_endpoints: endpoints being used
- """
- for role, (host, port) in role_endpoints.iteritems():
- region, zone, zone_info, _ = role_zones[role]
- host, port = role_endpoints[role]
- endpoint = 'http://{host}:{port}/'.format(host=host, port=port)
- # check if the region specified under client actually exists
- # in region_info (it should, if properly configured).
- # If not, throw a reasonable error
- if region not in region_info:
- raise Exception(
- 'Region: {region} was specified but no corresponding'
- ' entry was found under \'regions\''.format(region=region))
-
- region_conf = region_info[region]
- region_conf.setdefault('endpoints', [])
- region_conf['endpoints'].append(endpoint)
-
- # this is the payload for the 'zones' field in the region field
- zone_payload = dict()
- zone_payload['endpoints'] = [endpoint]
- zone_payload['name'] = zone
-
- # Pull the log meta and log data settings out of zone_info, if they
- # exist, then pop them as they don't actually belong in the zone info
- for key in ['rgw log meta', 'rgw log data']:
- new_key = key.split(' ', 1)[1]
- new_key = new_key.replace(' ', '_')
-
- if key in zone_info:
- value = zone_info.pop(key)
- else:
- value = 'false'
-
- zone_payload[new_key] = value
-
- region_conf.setdefault('zones', [])
- region_conf['zones'].append(zone_payload)
-
-
-@contextlib.contextmanager
-def configure_users(ctx, config, everywhere=False):
- """
- Create users by remotely running rgwadmin commands using extracted
- user information.
- """
- log.info('Configuring users...')
-
- # extract the user info and append it to the payload tuple for the given
- # client
- for client, c_config in config.iteritems():
- if not c_config:
- continue
- user_info = extract_user_info(c_config)
- if not user_info:
- continue
-
- # For data sync the master zones and regions must have the
- # system users of the secondary zones. To keep this simple,
- # just create the system users on every client if regions are
- # configured.
- clients_to_create_as = [client]
- if everywhere:
- clients_to_create_as = config.keys()
- for client_name in clients_to_create_as:
- log.debug('Creating user {user} on {client}'.format(
- user=user_info['system_key']['user'], client=client))
- rgwadmin(ctx, client_name,
- cmd=[
- 'user', 'create',
- '--uid', user_info['system_key']['user'],
- '--access-key', user_info['system_key']['access_key'],
- '--secret', user_info['system_key']['secret_key'],
- '--display-name', user_info['system_key']['user'],
- '--system',
- ],
- check_status=True,
- )
-
- yield
-
-
-@contextlib.contextmanager
-def create_nonregion_pools(ctx, config, regions):
- """Create replicated or erasure coded data pools for rgw."""
- if regions:
- yield
- return
-
- log.info('creating data pools')
- for client in config.keys():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- data_pool = '.rgw.buckets'
- if ctx.rgw.ec_data_pool:
- create_ec_pool(remote, data_pool, client, 64)
- else:
- create_replicated_pool(remote, data_pool, 64)
- if ctx.rgw.cache_pools:
- create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
- 64*1024*1024)
- yield
-
-
-@contextlib.contextmanager
-def configure_regions_and_zones(ctx, config, regions, role_endpoints):
- """
- Configure regions and zones from rados and rgw.
- """
- if not regions:
- log.debug(
- 'In rgw.configure_regions_and_zones() and regions is None. '
- 'Bailing')
- yield
- return
-
- log.info('Configuring regions and zones...')
-
- log.debug('config is %r', config)
- log.debug('regions are %r', regions)
- log.debug('role_endpoints = %r', role_endpoints)
- # extract the zone info
- role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
- for client, c_config in config.iteritems()])
- log.debug('roles_zones = %r', role_zones)
-
- # extract the user info and append it to the payload tuple for the given
- # client
- for client, c_config in config.iteritems():
- if not c_config:
- user_info = None
- else:
- user_info = extract_user_info(c_config)
-
- (region, zone, zone_info) = role_zones[client]
- role_zones[client] = (region, zone, zone_info, user_info)
-
- region_info = dict([
- (region_name, extract_region_info(region_name, r_config))
- for region_name, r_config in regions.iteritems()])
-
- fill_in_endpoints(region_info, role_zones, role_endpoints)
-
- # clear out the old defaults
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
- # removing these objects from .rgw.root and the per-zone root pools
- # may or may not matter
- rados(ctx, mon,
- cmd=['-p', '.rgw.root', 'rm', 'region_info.default'])
- rados(ctx, mon,
- cmd=['-p', '.rgw.root', 'rm', 'zone_info.default'])
-
- for client in config.iterkeys():
- for role, (_, zone, zone_info, user_info) in role_zones.iteritems():
- rados(ctx, mon,
- cmd=['-p', zone_info['domain_root'],
- 'rm', 'region_info.default'])
- rados(ctx, mon,
- cmd=['-p', zone_info['domain_root'],
- 'rm', 'zone_info.default'])
-
- (remote,) = ctx.cluster.only(role).remotes.keys()
- for pool_info in zone_info['placement_pools']:
- remote.run(args=['ceph', 'osd', 'pool', 'create',
- pool_info['val']['index_pool'], '64', '64'])
- if ctx.rgw.ec_data_pool:
- create_ec_pool(remote, pool_info['val']['data_pool'],
- zone, 64)
- else:
- create_replicated_pool(
- remote, pool_info['val']['data_pool'],
- 64)
-
- rgwadmin(ctx, client,
- cmd=['-n', client, 'zone', 'set', '--rgw-zone', zone],
- stdin=StringIO(json.dumps(dict(
- zone_info.items() + user_info.items()))),
- check_status=True)
-
- for region, info in region_info.iteritems():
- region_json = json.dumps(info)
- log.debug('region info is: %s', region_json)
- rgwadmin(ctx, client,
- cmd=['-n', client, 'region', 'set'],
- stdin=StringIO(region_json),
- check_status=True)
- if info['is_master']:
- rgwadmin(ctx, client,
- cmd=['-n', client,
- 'region', 'default',
- '--rgw-region', region],
- check_status=True)
-
- rgwadmin(ctx, client, cmd=['-n', client, 'regionmap', 'update'])
- yield
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Either use configure apache to run a rados gateway, or use the built-in
- civetweb server.
- Only one should be run per machine, since it uses a hard-coded port for
- now.
-
- For example, to run rgw on all clients::
-
- tasks:
- - ceph:
- - rgw:
-
- To only run on certain clients::
-
- tasks:
- - ceph:
- - rgw: [client.0, client.3]
-
- or
-
- tasks:
- - ceph:
- - rgw:
- client.0:
- client.3:
-
- You can adjust the idle timeout for fastcgi (default is 30 seconds):
-
- tasks:
- - ceph:
- - rgw:
- client.0:
- idle_timeout: 90
-
- To run radosgw through valgrind:
-
- tasks:
- - ceph:
- - rgw:
- client.0:
- valgrind: [--tool=memcheck]
- client.3:
- valgrind: [--tool=memcheck]
-
- To use civetweb instead of apache:
-
- tasks:
- - ceph:
- - rgw:
- - client.0
- overrides:
- rgw:
- frontend: civetweb
-
- Note that without a modified fastcgi module e.g. with the default
- one on CentOS, you must have rgw print continue = false in ceph.conf::
-
- tasks:
- - ceph:
- conf:
- global:
- rgw print continue: false
- - rgw: [client.0]
-
- To run rgws for multiple regions or zones, describe the regions
- and their zones in a regions section. The endpoints will be
- generated by this task. Each client must have a region, zone,
- and pools assigned in ceph.conf::
-
- tasks:
- - install:
- - ceph:
- conf:
- client.0:
- rgw region: foo
- rgw zone: foo-1
- rgw region root pool: .rgw.rroot.foo
- rgw zone root pool: .rgw.zroot.foo
- rgw log meta: true
- rgw log data: true
- client.1:
- rgw region: bar
- rgw zone: bar-master
- rgw region root pool: .rgw.rroot.bar
- rgw zone root pool: .rgw.zroot.bar
- rgw log meta: true
- rgw log data: true
- client.2:
- rgw region: bar
- rgw zone: bar-secondary
- rgw region root pool: .rgw.rroot.bar
- rgw zone root pool: .rgw.zroot.bar-secondary
- - rgw:
- default_idle_timeout: 30
- ec-data-pool: true
- regions:
- foo:
- api name: api_name # default: region name
- is master: true # default: false
- master zone: foo-1 # default: first zone
- zones: [foo-1]
- log meta: true
- log data: true
- placement targets: [target1, target2] # default: []
- default placement: target2 # default: ''
- bar:
- api name: bar-api
- zones: [bar-master, bar-secondary]
- client.0:
- system user:
- name: foo-system
- access key: X2IYPSTY1072DDY1SJMC
- secret key: YIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
- client.1:
- system user:
- name: bar1
- access key: Y2IYPSTY1072DDY1SJMC
- secret key: XIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
- client.2:
- system user:
- name: bar2
- access key: Z2IYPSTY1072DDY1SJMC
- secret key: ZIMHICpPvT+MhLTbSsiBJ1jQF15IFvJA8tgwJEcm
- """
- if config is None:
- config = dict(('client.{id}'.format(id=id_), None)
- for id_ in teuthology.all_roles_of_type(
- ctx.cluster, 'client'))
- elif isinstance(config, list):
- config = dict((name, None) for name in config)
-
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('rgw', {}))
-
- regions = {}
- if 'regions' in config:
- # separate region info so only clients are keys in config
- regions = config['regions']
- del config['regions']
-
- role_endpoints = assign_ports(ctx, config)
- ctx.rgw = argparse.Namespace()
- ctx.rgw.role_endpoints = role_endpoints
- # stash the region info for later, since it was deleted from the config
- # structure
- ctx.rgw.regions = regions
-
- ctx.rgw.ec_data_pool = False
- if 'ec-data-pool' in config:
- ctx.rgw.ec_data_pool = bool(config['ec-data-pool'])
- del config['ec-data-pool']
- ctx.rgw.default_idle_timeout = 30
- if 'default_idle_timeout' in config:
- ctx.rgw.default_idle_timeout = int(config['default_idle_timeout'])
- del config['default_idle_timeout']
- ctx.rgw.cache_pools = False
- if 'cache-pools' in config:
- ctx.rgw.cache_pools = bool(config['cache-pools'])
- del config['cache-pools']
-
- ctx.rgw.frontend = 'apache'
- if 'frontend' in config:
- ctx.rgw.frontend = config['frontend']
- del config['frontend']
-
- subtasks = [
- lambda: configure_regions_and_zones(
- ctx=ctx,
- config=config,
- regions=regions,
- role_endpoints=role_endpoints,
- ),
- lambda: configure_users(
- ctx=ctx,
- config=config,
- everywhere=bool(regions),
- ),
- lambda: create_nonregion_pools(
- ctx=ctx, config=config, regions=regions),
- ]
- if ctx.rgw.frontend == 'apache':
- subtasks.insert(0, lambda: create_apache_dirs(ctx=ctx, config=config))
- subtasks.extend([
- lambda: ship_apache_configs(ctx=ctx, config=config,
- role_endpoints=role_endpoints),
- lambda: start_rgw(ctx=ctx, config=config),
- lambda: start_apache(ctx=ctx, config=config),
- ])
- elif ctx.rgw.frontend == 'civetweb':
- subtasks.extend([
- lambda: start_rgw(ctx=ctx, config=config),
- ])
- else:
- raise ValueError("frontend must be 'apache' or 'civetweb'")
-
- log.info("Using %s as radosgw frontend", ctx.rgw.frontend)
- with contextutil.nested(*subtasks):
- yield
+++ /dev/null
-"""
-rgw s3tests logging wrappers
-"""
-from cStringIO import StringIO
-from configobj import ConfigObj
-import contextlib
-import logging
-import s3tests
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def download(ctx, config):
- """
- Run s3tests download function
- """
- return s3tests.download(ctx, config)
-
-def _config_user(s3tests_conf, section, user):
- """
- Run s3tests user config function
- """
- return s3tests._config_user(s3tests_conf, section, user)
-
-@contextlib.contextmanager
-def create_users(ctx, config):
- """
- Run s3tests user create function
- """
- return s3tests.create_users(ctx, config)
-
-@contextlib.contextmanager
-def configure(ctx, config):
- """
- Run s3tests user configure function
- """
- return s3tests.configure(ctx, config)
-
-@contextlib.contextmanager
-def run_tests(ctx, config):
- """
- Run remote netcat tests
- """
- assert isinstance(config, dict)
- testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
- client_config['extra_args'] = [
- 's3tests.functional.test_s3:test_bucket_list_return_data',
- ]
-# args = [
-# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
-# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
-# '-w',
-# '{tdir}/s3-tests'.format(tdir=testdir),
-# '-v',
-# 's3tests.functional.test_s3:test_bucket_list_return_data',
-# ]
-# if client_config is not None and 'extra_args' in client_config:
-# args.extend(client_config['extra_args'])
-#
-# ctx.cluster.only(client).run(
-# args=args,
-# )
-
- s3tests.run_tests(ctx, config)
-
- netcat_out = StringIO()
-
- for client, client_config in config.iteritems():
- ctx.cluster.only(client).run(
- args = [
- 'netcat',
- '-w', '5',
- '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
- ],
- stdout = netcat_out,
- )
-
- out = netcat_out.getvalue()
-
- assert len(out) > 100
-
- log.info('Received', out)
-
- yield
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run some s3-tests suite against rgw, verify opslog socket returns data
-
- Must restrict testing to a particular client::
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3tests: [client.0]
-
- To pass extra arguments to nose (e.g. to run a certain test)::
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3tests:
- client.0:
- extra_args: ['test_s3:test_object_acl_grand_public_read']
- client.1:
- extra_args: ['--exclude', 'test_100_continue']
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- overrides = ctx.config.get('overrides', {})
- # merge each client section, not the top level.
- for (client, cconf) in config.iteritems():
- teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {}))
-
- log.debug('config is %s', config)
-
- s3tests_conf = {}
- for client in clients:
- s3tests_conf[client] = ConfigObj(
- indent_type='',
- infile={
- 'DEFAULT':
- {
- 'port' : 7280,
- 'is_secure' : 'no',
- },
- 'fixtures' : {},
- 's3 main' : {},
- 's3 alt' : {},
- }
- )
-
- with contextutil.nested(
- lambda: download(ctx=ctx, config=config),
- lambda: create_users(ctx=ctx, config=dict(
- clients=clients,
- s3tests_conf=s3tests_conf,
- )),
- lambda: configure(ctx=ctx, config=dict(
- clients=config,
- s3tests_conf=s3tests_conf,
- )),
- lambda: run_tests(ctx=ctx, config=config),
- ):
- yield
+++ /dev/null
-"""
-Run rgw s3 readwite tests
-"""
-from cStringIO import StringIO
-import base64
-import contextlib
-import logging
-import os
-import random
-import string
-import yaml
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from teuthology.config import config as teuth_config
-from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def download(ctx, config):
- """
- Download the s3 tests from the git builder.
- Remove downloaded s3 file upon exit.
-
- The context passed in should be identical to the context
- passed in to the main task.
- """
- assert isinstance(config, dict)
- log.info('Downloading s3-tests...')
- testdir = teuthology.get_testdir(ctx)
- for (client, cconf) in config.items():
- branch = cconf.get('force-branch', None)
- if not branch:
- branch = cconf.get('branch', 'master')
- sha1 = cconf.get('sha1')
- ctx.cluster.only(client).run(
- args=[
- 'git', 'clone',
- '-b', branch,
- teuth_config.ceph_git_base_url + 's3-tests.git',
- '{tdir}/s3-tests'.format(tdir=testdir),
- ],
- )
- if sha1 is not None:
- ctx.cluster.only(client).run(
- args=[
- 'cd', '{tdir}/s3-tests'.format(tdir=testdir),
- run.Raw('&&'),
- 'git', 'reset', '--hard', sha1,
- ],
- )
- try:
- yield
- finally:
- log.info('Removing s3-tests...')
- testdir = teuthology.get_testdir(ctx)
- for client in config:
- ctx.cluster.only(client).run(
- args=[
- 'rm',
- '-rf',
- '{tdir}/s3-tests'.format(tdir=testdir),
- ],
- )
-
-
-def _config_user(s3tests_conf, section, user):
- """
- Configure users for this section by stashing away keys, ids, and
- email addresses.
- """
- s3tests_conf[section].setdefault('user_id', user)
- s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
- s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
- s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
- s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
-
-@contextlib.contextmanager
-def create_users(ctx, config):
- """
- Create a default s3 user.
- """
- assert isinstance(config, dict)
- log.info('Creating rgw users...')
- testdir = teuthology.get_testdir(ctx)
- users = {'s3': 'foo'}
- cached_client_user_names = dict()
- for client in config['clients']:
- cached_client_user_names[client] = dict()
- s3tests_conf = config['s3tests_conf'][client]
- s3tests_conf.setdefault('readwrite', {})
- s3tests_conf['readwrite'].setdefault('bucket', 'rwtest-' + client + '-{random}-')
- s3tests_conf['readwrite'].setdefault('readers', 10)
- s3tests_conf['readwrite'].setdefault('writers', 3)
- s3tests_conf['readwrite'].setdefault('duration', 300)
- s3tests_conf['readwrite'].setdefault('files', {})
- rwconf = s3tests_conf['readwrite']
- rwconf['files'].setdefault('num', 10)
- rwconf['files'].setdefault('size', 2000)
- rwconf['files'].setdefault('stddev', 500)
- for section, user in users.iteritems():
- _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
- log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'],
- client=client))
-
- # stash the 'delete_user' flag along with user name for easier cleanup
- delete_this_user = True
- if 'delete_user' in s3tests_conf['s3']:
- delete_this_user = s3tests_conf['s3']['delete_user']
- log.debug('delete_user set to {flag} for {client}'.format(flag=delete_this_user, client=client))
- cached_client_user_names[client][section+user] = (s3tests_conf[section]['user_id'], delete_this_user)
-
- # skip actual user creation if the create_user flag is set to false for this client
- if 'create_user' in s3tests_conf['s3'] and s3tests_conf['s3']['create_user'] == False:
- log.debug('create_user set to False, skipping user creation for {client}'.format(client=client))
- continue
- else:
- ctx.cluster.only(client).run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '-n', client,
- 'user', 'create',
- '--uid', s3tests_conf[section]['user_id'],
- '--display-name', s3tests_conf[section]['display_name'],
- '--access-key', s3tests_conf[section]['access_key'],
- '--secret', s3tests_conf[section]['secret_key'],
- '--email', s3tests_conf[section]['email'],
- ],
- )
- try:
- yield
- finally:
- for client in config['clients']:
- for section, user in users.iteritems():
- #uid = '{user}.{client}'.format(user=user, client=client)
- real_uid, delete_this_user = cached_client_user_names[client][section+user]
- if delete_this_user:
- ctx.cluster.only(client).run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '-n', client,
- 'user', 'rm',
- '--uid', real_uid,
- '--purge-data',
- ],
- )
- else:
- log.debug('skipping delete for user {uid} on {client}'.format(uid=real_uid, client=client))
-
-@contextlib.contextmanager
-def configure(ctx, config):
- """
- Configure the s3-tests. This includes the running of the
- bootstrap code and the updating of local conf files.
- """
- assert isinstance(config, dict)
- log.info('Configuring s3-readwrite-tests...')
- for client, properties in config['clients'].iteritems():
- s3tests_conf = config['s3tests_conf'][client]
- if properties is not None and 'rgw_server' in properties:
- host = None
- for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
- log.info('roles: ' + str(roles))
- log.info('target: ' + str(target))
- if properties['rgw_server'] in roles:
- _, host = split_user(target)
- assert host is not None, "Invalid client specified as the rgw_server"
- s3tests_conf['s3']['host'] = host
- else:
- s3tests_conf['s3']['host'] = 'localhost'
-
- def_conf = s3tests_conf['DEFAULT']
- s3tests_conf['s3'].setdefault('port', def_conf['port'])
- s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])
-
- (remote,) = ctx.cluster.only(client).remotes.keys()
- remote.run(
- args=[
- 'cd',
- '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)),
- run.Raw('&&'),
- './bootstrap',
- ],
- )
- conf_fp = StringIO()
- conf = dict(
- s3=s3tests_conf['s3'],
- readwrite=s3tests_conf['readwrite'],
- )
- yaml.safe_dump(conf, conf_fp, default_flow_style=False)
- teuthology.write_file(
- remote=remote,
- path='{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=teuthology.get_testdir(ctx), client=client),
- data=conf_fp.getvalue(),
- )
- yield
-
-
-@contextlib.contextmanager
-def run_tests(ctx, config):
- """
- Run the s3readwrite tests after everything is set up.
-
- :param ctx: Context passed to task
- :param config: specific configuration information
- """
- assert isinstance(config, dict)
- testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
- conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client))
- args = [
- '{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir),
- ]
- if client_config is not None and 'extra_args' in client_config:
- args.extend(client_config['extra_args'])
-
- ctx.cluster.only(client).run(
- args=args,
- stdin=conf,
- )
- yield
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run the s3tests-test-readwrite suite against rgw.
-
- To run all tests on all clients::
-
- tasks:
- - ceph:
- - rgw:
- - s3readwrite:
-
- To restrict testing to particular clients::
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3readwrite: [client.0]
-
- To run against a server on client.1::
-
- tasks:
- - ceph:
- - rgw: [client.1]
- - s3readwrite:
- client.0:
- rgw_server: client.1
-
- To pass extra test arguments
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3readwrite:
- client.0:
- readwrite:
- bucket: mybucket
- readers: 10
- writers: 3
- duration: 600
- files:
- num: 10
- size: 2000
- stddev: 500
- client.1:
- ...
-
- To override s3 configuration
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3readwrite:
- client.0:
- s3:
- user_id: myuserid
- display_name: myname
- email: my@email
- access_key: myaccesskey
- secret_key: mysecretkey
-
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- overrides = ctx.config.get('overrides', {})
- # merge each client section, not the top level.
- for client in config.iterkeys():
- if not config[client]:
- config[client] = {}
- teuthology.deep_merge(config[client], overrides.get('s3readwrite', {}))
-
- log.debug('in s3readwrite, config is %s', config)
-
- s3tests_conf = {}
- for client in clients:
- if config[client] is None:
- config[client] = {}
- config[client].setdefault('s3', {})
- config[client].setdefault('readwrite', {})
-
- s3tests_conf[client] = ({
- 'DEFAULT':
- {
- 'port' : 7280,
- 'is_secure' : False,
- },
- 'readwrite' : config[client]['readwrite'],
- 's3' : config[client]['s3'],
- })
-
- with contextutil.nested(
- lambda: download(ctx=ctx, config=config),
- lambda: create_users(ctx=ctx, config=dict(
- clients=clients,
- s3tests_conf=s3tests_conf,
- )),
- lambda: configure(ctx=ctx, config=dict(
- clients=config,
- s3tests_conf=s3tests_conf,
- )),
- lambda: run_tests(ctx=ctx, config=config),
- ):
- pass
- yield
+++ /dev/null
-"""
-Run rgw roundtrip message tests
-"""
-from cStringIO import StringIO
-import base64
-import contextlib
-import logging
-import os
-import random
-import string
-import yaml
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from teuthology.config import config as teuth_config
-from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def download(ctx, config):
- """
- Download the s3 tests from the git builder.
- Remove downloaded s3 file upon exit.
-
- The context passed in should be identical to the context
- passed in to the main task.
- """
- assert isinstance(config, list)
- log.info('Downloading s3-tests...')
- testdir = teuthology.get_testdir(ctx)
- for client in config:
- ctx.cluster.only(client).run(
- args=[
- 'git', 'clone',
- teuth_config.ceph_git_base_url + 's3-tests.git',
- '{tdir}/s3-tests'.format(tdir=testdir),
- ],
- )
- try:
- yield
- finally:
- log.info('Removing s3-tests...')
- for client in config:
- ctx.cluster.only(client).run(
- args=[
- 'rm',
- '-rf',
- '{tdir}/s3-tests'.format(tdir=testdir),
- ],
- )
-
-def _config_user(s3tests_conf, section, user):
- """
- Configure users for this section by stashing away keys, ids, and
- email addresses.
- """
- s3tests_conf[section].setdefault('user_id', user)
- s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
- s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
- s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
- s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
-
-@contextlib.contextmanager
-def create_users(ctx, config):
- """
- Create a default s3 user.
- """
- assert isinstance(config, dict)
- log.info('Creating rgw users...')
- testdir = teuthology.get_testdir(ctx)
- users = {'s3': 'foo'}
- for client in config['clients']:
- s3tests_conf = config['s3tests_conf'][client]
- s3tests_conf.setdefault('roundtrip', {})
- s3tests_conf['roundtrip'].setdefault('bucket', 'rttest-' + client + '-{random}-')
- s3tests_conf['roundtrip'].setdefault('readers', 10)
- s3tests_conf['roundtrip'].setdefault('writers', 3)
- s3tests_conf['roundtrip'].setdefault('duration', 300)
- s3tests_conf['roundtrip'].setdefault('files', {})
- rtconf = s3tests_conf['roundtrip']
- rtconf['files'].setdefault('num', 10)
- rtconf['files'].setdefault('size', 2000)
- rtconf['files'].setdefault('stddev', 500)
- for section, user in [('s3', 'foo')]:
- _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
- ctx.cluster.only(client).run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '-n', client,
- 'user', 'create',
- '--uid', s3tests_conf[section]['user_id'],
- '--display-name', s3tests_conf[section]['display_name'],
- '--access-key', s3tests_conf[section]['access_key'],
- '--secret', s3tests_conf[section]['secret_key'],
- '--email', s3tests_conf[section]['email'],
- ],
- )
- try:
- yield
- finally:
- for client in config['clients']:
- for user in users.itervalues():
- uid = '{user}.{client}'.format(user=user, client=client)
- ctx.cluster.only(client).run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '-n', client,
- 'user', 'rm',
- '--uid', uid,
- '--purge-data',
- ],
- )
-
-@contextlib.contextmanager
-def configure(ctx, config):
- """
- Configure the s3-tests. This includes the running of the
- bootstrap code and the updating of local conf files.
- """
- assert isinstance(config, dict)
- log.info('Configuring s3-roundtrip-tests...')
- testdir = teuthology.get_testdir(ctx)
- for client, properties in config['clients'].iteritems():
- s3tests_conf = config['s3tests_conf'][client]
- if properties is not None and 'rgw_server' in properties:
- host = None
- for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
- log.info('roles: ' + str(roles))
- log.info('target: ' + str(target))
- if properties['rgw_server'] in roles:
- _, host = split_user(target)
- assert host is not None, "Invalid client specified as the rgw_server"
- s3tests_conf['s3']['host'] = host
- else:
- s3tests_conf['s3']['host'] = 'localhost'
-
- def_conf = s3tests_conf['DEFAULT']
- s3tests_conf['s3'].setdefault('port', def_conf['port'])
- s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])
-
- (remote,) = ctx.cluster.only(client).remotes.keys()
- remote.run(
- args=[
- 'cd',
- '{tdir}/s3-tests'.format(tdir=testdir),
- run.Raw('&&'),
- './bootstrap',
- ],
- )
- conf_fp = StringIO()
- conf = dict(
- s3=s3tests_conf['s3'],
- roundtrip=s3tests_conf['roundtrip'],
- )
- yaml.safe_dump(conf, conf_fp, default_flow_style=False)
- teuthology.write_file(
- remote=remote,
- path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client),
- data=conf_fp.getvalue(),
- )
- yield
-
-
-@contextlib.contextmanager
-def run_tests(ctx, config):
- """
- Run the s3 roundtrip after everything is set up.
-
- :param ctx: Context passed to task
- :param config: specific configuration information
- """
- assert isinstance(config, dict)
- testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
- conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client))
- args = [
- '{tdir}/s3-tests/virtualenv/bin/s3tests-test-roundtrip'.format(tdir=testdir),
- ]
- if client_config is not None and 'extra_args' in client_config:
- args.extend(client_config['extra_args'])
-
- ctx.cluster.only(client).run(
- args=args,
- stdin=conf,
- )
- yield
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run the s3tests-test-roundtrip suite against rgw.
-
- To run all tests on all clients::
-
- tasks:
- - ceph:
- - rgw:
- - s3roundtrip:
-
- To restrict testing to particular clients::
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3roundtrip: [client.0]
-
- To run against a server on client.1::
-
- tasks:
- - ceph:
- - rgw: [client.1]
- - s3roundtrip:
- client.0:
- rgw_server: client.1
-
- To pass extra test arguments
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3roundtrip:
- client.0:
- roundtrip:
- bucket: mybucket
- readers: 10
- writers: 3
- duration: 600
- files:
- num: 10
- size: 2000
- stddev: 500
- client.1:
- ...
-
- To override s3 configuration
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3roundtrip:
- client.0:
- s3:
- user_id: myuserid
- display_name: myname
- email: my@email
- access_key: myaccesskey
- secret_key: mysecretkey
-
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- s3tests_conf = {}
- for client in clients:
- if config[client] is None:
- config[client] = {}
- config[client].setdefault('s3', {})
- config[client].setdefault('roundtrip', {})
-
- s3tests_conf[client] = ({
- 'DEFAULT':
- {
- 'port' : 7280,
- 'is_secure' : False,
- },
- 'roundtrip' : config[client]['roundtrip'],
- 's3' : config[client]['s3'],
- })
-
- with contextutil.nested(
- lambda: download(ctx=ctx, config=clients),
- lambda: create_users(ctx=ctx, config=dict(
- clients=clients,
- s3tests_conf=s3tests_conf,
- )),
- lambda: configure(ctx=ctx, config=dict(
- clients=config,
- s3tests_conf=s3tests_conf,
- )),
- lambda: run_tests(ctx=ctx, config=config),
- ):
- pass
- yield
+++ /dev/null
-"""
-Run a set of s3 tests on rgw.
-"""
-from cStringIO import StringIO
-from configobj import ConfigObj
-import base64
-import contextlib
-import logging
-import os
-import random
-import string
-
-import util.rgw as rgw_utils
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from teuthology.config import config as teuth_config
-from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
-
-log = logging.getLogger(__name__)
-
-def extract_sync_client_data(ctx, client_name):
- """
- Extract synchronized client rgw zone and rgw region information.
-
- :param ctx: Context passed to the s3tests task
- :param name: Name of client that we are synching with
- """
- return_region_name = None
- return_dict = None
- client = ctx.ceph.conf.get(client_name, None)
- if client:
- current_client_zone = client.get('rgw zone', None)
- if current_client_zone:
- (endpoint_host, endpoint_port) = ctx.rgw.role_endpoints.get(client_name, (None, None))
- # pull out the radosgw_agent stuff
- regions = ctx.rgw.regions
- for region in regions:
- log.debug('jbuck, region is {region}'.format(region=region))
- region_data = ctx.rgw.regions[region]
- log.debug('region data is {region}'.format(region=region_data))
- zones = region_data['zones']
- for zone in zones:
- if current_client_zone in zone:
- return_region_name = region
- return_dict = dict()
- return_dict['api_name'] = region_data['api name']
- return_dict['is_master'] = region_data['is master']
- return_dict['port'] = endpoint_port
- return_dict['host'] = endpoint_host
-
- # The s3tests expect the sync_agent_[addr|port} to be
- # set on the non-master node for some reason
- if not region_data['is master']:
- (rgwagent_host, rgwagent_port) = ctx.radosgw_agent.endpoint
- (return_dict['sync_agent_addr'], _) = ctx.rgw.role_endpoints[rgwagent_host]
- return_dict['sync_agent_port'] = rgwagent_port
-
- else: #if client_zone:
- log.debug('No zone info for {host}'.format(host=client_name))
- else: # if client
- log.debug('No ceph conf for {host}'.format(host=client_name))
-
- return return_region_name, return_dict
-
-def update_conf_with_region_info(ctx, config, s3tests_conf):
- """
- Scan for a client (passed in s3tests_conf) that is an s3agent
- with which we can sync. Update information in local conf file
- if such a client is found.
- """
- for key in s3tests_conf.keys():
- # we'll assume that there's only one sync relationship (source / destination) with client.X
- # as the key for now
-
- # Iterate through all of the radosgw_agent (rgwa) configs and see if a
- # given client is involved in a relationship.
- # If a given client isn't, skip it
- this_client_in_rgwa_config = False
- for rgwa in ctx.radosgw_agent.config.keys():
- rgwa_data = ctx.radosgw_agent.config[rgwa]
-
- if key in rgwa_data['src'] or key in rgwa_data['dest']:
- this_client_in_rgwa_config = True
- log.debug('{client} is in an radosgw-agent sync relationship'.format(client=key))
- radosgw_sync_data = ctx.radosgw_agent.config[key]
- break
- if not this_client_in_rgwa_config:
- log.debug('{client} is NOT in an radosgw-agent sync relationship'.format(client=key))
- continue
-
- source_client = radosgw_sync_data['src']
- dest_client = radosgw_sync_data['dest']
-
- # #xtract the pertinent info for the source side
- source_region_name, source_region_dict = extract_sync_client_data(ctx, source_client)
- log.debug('\t{key} source_region {source_region} source_dict {source_dict}'.format
- (key=key,source_region=source_region_name,source_dict=source_region_dict))
-
- # The source *should* be the master region, but test anyway and then set it as the default region
- if source_region_dict['is_master']:
- log.debug('Setting {region} as default_region'.format(region=source_region_name))
- s3tests_conf[key]['fixtures'].setdefault('default_region', source_region_name)
-
- # Extract the pertinent info for the destination side
- dest_region_name, dest_region_dict = extract_sync_client_data(ctx, dest_client)
- log.debug('\t{key} dest_region {dest_region} dest_dict {dest_dict}'.format
- (key=key,dest_region=dest_region_name,dest_dict=dest_region_dict))
-
- # now add these regions to the s3tests_conf object
- s3tests_conf[key]['region {region_name}'.format(region_name=source_region_name)] = source_region_dict
- s3tests_conf[key]['region {region_name}'.format(region_name=dest_region_name)] = dest_region_dict
-
-@contextlib.contextmanager
-def download(ctx, config):
- """
- Download the s3 tests from the git builder.
- Remove downloaded s3 file upon exit.
-
- The context passed in should be identical to the context
- passed in to the main task.
- """
- assert isinstance(config, dict)
- log.info('Downloading s3-tests...')
- testdir = teuthology.get_testdir(ctx)
- for (client, cconf) in config.items():
- branch = cconf.get('force-branch', None)
- if not branch:
- ceph_branch = ctx.config.get('branch')
- suite_branch = ctx.config.get('suite_branch', ceph_branch)
- branch = cconf.get('branch', suite_branch)
- if not branch:
- raise ValueError(
- "Could not determine what branch to use for s3tests!")
- else:
- log.info("Using branch '%s' for s3tests", branch)
- sha1 = cconf.get('sha1')
- ctx.cluster.only(client).run(
- args=[
- 'git', 'clone',
- '-b', branch,
- teuth_config.ceph_git_base_url + 's3-tests.git',
- '{tdir}/s3-tests'.format(tdir=testdir),
- ],
- )
- if sha1 is not None:
- ctx.cluster.only(client).run(
- args=[
- 'cd', '{tdir}/s3-tests'.format(tdir=testdir),
- run.Raw('&&'),
- 'git', 'reset', '--hard', sha1,
- ],
- )
- try:
- yield
- finally:
- log.info('Removing s3-tests...')
- testdir = teuthology.get_testdir(ctx)
- for client in config:
- ctx.cluster.only(client).run(
- args=[
- 'rm',
- '-rf',
- '{tdir}/s3-tests'.format(tdir=testdir),
- ],
- )
-
-
-def _config_user(s3tests_conf, section, user):
- """
- Configure users for this section by stashing away keys, ids, and
- email addresses.
- """
- s3tests_conf[section].setdefault('user_id', user)
- s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
- s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
- s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
- s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
-
-
-@contextlib.contextmanager
-def create_users(ctx, config):
- """
- Create a main and an alternate s3 user.
- """
- assert isinstance(config, dict)
- log.info('Creating rgw users...')
- testdir = teuthology.get_testdir(ctx)
- users = {'s3 main': 'foo', 's3 alt': 'bar'}
- for client in config['clients']:
- s3tests_conf = config['s3tests_conf'][client]
- s3tests_conf.setdefault('fixtures', {})
- s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-')
- for section, user in users.iteritems():
- _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
- log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client))
- ctx.cluster.only(client).run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '-n', client,
- 'user', 'create',
- '--uid', s3tests_conf[section]['user_id'],
- '--display-name', s3tests_conf[section]['display_name'],
- '--access-key', s3tests_conf[section]['access_key'],
- '--secret', s3tests_conf[section]['secret_key'],
- '--email', s3tests_conf[section]['email'],
- ],
- )
- try:
- yield
- finally:
- for client in config['clients']:
- for user in users.itervalues():
- uid = '{user}.{client}'.format(user=user, client=client)
- ctx.cluster.only(client).run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '-n', client,
- 'user', 'rm',
- '--uid', uid,
- '--purge-data',
- ],
- )
-
-
-@contextlib.contextmanager
-def configure(ctx, config):
- """
- Configure the s3-tests. This includes the running of the
- bootstrap code and the updating of local conf files.
- """
- assert isinstance(config, dict)
- log.info('Configuring s3-tests...')
- testdir = teuthology.get_testdir(ctx)
- for client, properties in config['clients'].iteritems():
- s3tests_conf = config['s3tests_conf'][client]
- if properties is not None and 'rgw_server' in properties:
- host = None
- for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
- log.info('roles: ' + str(roles))
- log.info('target: ' + str(target))
- if properties['rgw_server'] in roles:
- _, host = split_user(target)
- assert host is not None, "Invalid client specified as the rgw_server"
- s3tests_conf['DEFAULT']['host'] = host
- else:
- s3tests_conf['DEFAULT']['host'] = 'localhost'
-
- if properties is not None and 'slow_backend' in properties:
- s3tests_conf['fixtures']['slow backend'] = properties['slow_backend']
-
- (remote,) = ctx.cluster.only(client).remotes.keys()
- remote.run(
- args=[
- 'cd',
- '{tdir}/s3-tests'.format(tdir=testdir),
- run.Raw('&&'),
- './bootstrap',
- ],
- )
- conf_fp = StringIO()
- s3tests_conf.write(conf_fp)
- teuthology.write_file(
- remote=remote,
- path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
- data=conf_fp.getvalue(),
- )
-
- log.info('Configuring boto...')
- boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
- for client, properties in config['clients'].iteritems():
- with file(boto_src, 'rb') as f:
- (remote,) = ctx.cluster.only(client).remotes.keys()
- conf = f.read().format(
- idle_timeout=config.get('idle_timeout', 30)
- )
- teuthology.write_file(
- remote=remote,
- path='{tdir}/boto.cfg'.format(tdir=testdir),
- data=conf,
- )
-
- try:
- yield
-
- finally:
- log.info('Cleaning up boto...')
- for client, properties in config['clients'].iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
- remote.run(
- args=[
- 'rm',
- '{tdir}/boto.cfg'.format(tdir=testdir),
- ],
- )
-
-@contextlib.contextmanager
-def sync_users(ctx, config):
- """
- Sync this user.
- """
- assert isinstance(config, dict)
- # do a full sync if this is a multi-region test
- if rgw_utils.multi_region_enabled(ctx):
- log.debug('Doing a full sync')
- rgw_utils.radosgw_agent_sync_all(ctx)
- else:
- log.debug('Not a multi-region config; skipping the metadata sync')
-
- yield
-
-@contextlib.contextmanager
-def run_tests(ctx, config):
- """
- Run the s3tests after everything is set up.
-
- :param ctx: Context passed to task
- :param config: specific configuration information
- """
- assert isinstance(config, dict)
- testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
- args = [
- 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
- 'BOTO_CONFIG={tdir}/boto.cfg'.format(tdir=testdir),
- '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
- '-w',
- '{tdir}/s3-tests'.format(tdir=testdir),
- '-v',
- '-a', '!fails_on_rgw',
- ]
- if client_config is not None and 'extra_args' in client_config:
- args.extend(client_config['extra_args'])
-
- ctx.cluster.only(client).run(
- args=args,
- label="s3 tests against rgw"
- )
- yield
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run the s3-tests suite against rgw.
-
- To run all tests on all clients::
-
- tasks:
- - ceph:
- - rgw:
- - s3tests:
-
- To restrict testing to particular clients::
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3tests: [client.0]
-
- To run against a server on client.1 and increase the boto timeout to 10m::
-
- tasks:
- - ceph:
- - rgw: [client.1]
- - s3tests:
- client.0:
- rgw_server: client.1
- idle_timeout: 600
-
- To pass extra arguments to nose (e.g. to run a certain test)::
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3tests:
- client.0:
- extra_args: ['test_s3:test_object_acl_grand_public_read']
- client.1:
- extra_args: ['--exclude', 'test_100_continue']
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- overrides = ctx.config.get('overrides', {})
- # merge each client section, not the top level.
- for client in config.iterkeys():
- if not config[client]:
- config[client] = {}
- teuthology.deep_merge(config[client], overrides.get('s3tests', {}))
-
- log.debug('s3tests config is %s', config)
-
- s3tests_conf = {}
- for client in clients:
- s3tests_conf[client] = ConfigObj(
- indent_type='',
- infile={
- 'DEFAULT':
- {
- 'port' : 7280,
- 'is_secure' : 'no',
- },
- 'fixtures' : {},
- 's3 main' : {},
- 's3 alt' : {},
- }
- )
-
- # Only attempt to add in the region info if there's a radosgw_agent configured
- if hasattr(ctx, 'radosgw_agent'):
- update_conf_with_region_info(ctx, config, s3tests_conf)
-
- with contextutil.nested(
- lambda: download(ctx=ctx, config=config),
- lambda: create_users(ctx=ctx, config=dict(
- clients=clients,
- s3tests_conf=s3tests_conf,
- )),
- lambda: sync_users(ctx=ctx, config=config),
- lambda: configure(ctx=ctx, config=dict(
- clients=config,
- s3tests_conf=s3tests_conf,
- )),
- lambda: run_tests(ctx=ctx, config=config),
- ):
- pass
- yield
+++ /dev/null
-"""
-Samba
-"""
-import contextlib
-import logging
-import sys
-
-from teuthology import misc as teuthology
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-def get_sambas(ctx, roles):
- """
- Scan for roles that are samba. Yield the id of the the samba role
- (samba.0, samba.1...) and the associated remote site
-
- :param ctx: Context
- :param roles: roles for this test (extracted from yaml files)
- """
- for role in roles:
- assert isinstance(role, basestring)
- PREFIX = 'samba.'
- assert role.startswith(PREFIX)
- id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- yield (id_, remote)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Setup samba smbd with ceph vfs module. This task assumes the samba
- package has already been installed via the install task.
-
- The config is optional and defaults to starting samba on all nodes.
- If a config is given, it is expected to be a list of
- samba nodes to start smbd servers on.
-
- Example that starts smbd on all samba nodes::
-
- tasks:
- - install:
- - install:
- project: samba
- extra_packages: ['samba']
- - ceph:
- - samba:
- - interactive:
-
- Example that starts smbd on just one of the samba nodes and cifs on the other::
-
- tasks:
- - samba: [samba.0]
- - cifs: [samba.1]
-
- An optional backend can be specified, and requires a path which smbd will
- use as the backend storage location:
-
- roles:
- - [osd.0, osd.1, osd.2, mon.0, mon.1, mon.2, mds.a]
- - [client.0, samba.0]
-
- tasks:
- - ceph:
- - ceph-fuse: [client.0]
- - samba:
- samba.0:
- cephfuse: "{testdir}/mnt.0"
-
- This mounts ceph to {testdir}/mnt.0 using fuse, and starts smbd with
- a UNC of //localhost/cephfuse. Access through that UNC will be on
- the ceph fuse mount point.
-
- If no arguments are specified in the samba
- role, the default behavior is to enable the ceph UNC //localhost/ceph
- and use the ceph vfs module as the smbd backend.
-
- :param ctx: Context
- :param config: Configuration
- """
- log.info("Setting up smbd with ceph vfs...")
- assert config is None or isinstance(config, list) or isinstance(config, dict), \
- "task samba got invalid config"
-
- if config is None:
- config = dict(('samba.{id}'.format(id=id_), None)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba'))
- elif isinstance(config, list):
- config = dict((name, None) for name in config)
-
- samba_servers = list(get_sambas(ctx=ctx, roles=config.keys()))
-
- testdir = teuthology.get_testdir(ctx)
-
- from tasks.ceph import DaemonGroup
- if not hasattr(ctx, 'daemons'):
- ctx.daemons = DaemonGroup()
-
- for id_, remote in samba_servers:
-
- rolestr = "samba.{id_}".format(id_=id_)
-
- confextras = """vfs objects = ceph
- ceph:config_file = /etc/ceph/ceph.conf"""
-
- unc = "ceph"
- backend = "/"
-
- if config[rolestr] is not None:
- # verify that there's just one parameter in role
- if len(config[rolestr]) != 1:
- log.error("samba config for role samba.{id_} must have only one parameter".format(id_=id_))
- raise Exception('invalid config')
- confextras = ""
- (unc, backendstr) = config[rolestr].items()[0]
- backend = backendstr.format(testdir=testdir)
-
- # on first samba role, set ownership and permissions of ceph root
- # so that samba tests succeed
- if config[rolestr] is None and id_ == samba_servers[0][0]:
- remote.run(
- args=[
- 'mkdir', '-p', '/tmp/cmnt', run.Raw('&&'),
- 'sudo', 'ceph-fuse', '/tmp/cmnt', run.Raw('&&'),
- 'sudo', 'chown', 'ubuntu:ubuntu', '/tmp/cmnt/', run.Raw('&&'),
- 'sudo', 'chmod', '1777', '/tmp/cmnt/', run.Raw('&&'),
- 'sudo', 'umount', '/tmp/cmnt/', run.Raw('&&'),
- 'rm', '-rf', '/tmp/cmnt',
- ],
- )
- else:
- remote.run(
- args=[
- 'sudo', 'chown', 'ubuntu:ubuntu', backend, run.Raw('&&'),
- 'sudo', 'chmod', '1777', backend,
- ],
- )
-
- teuthology.sudo_write_file(remote, "/usr/local/samba/etc/smb.conf", """
-[global]
- workgroup = WORKGROUP
- netbios name = DOMAIN
-
-[{unc}]
- path = {backend}
- {extras}
- writeable = yes
- valid users = ubuntu
-""".format(extras=confextras, unc=unc, backend=backend))
-
- # create ubuntu user
- remote.run(
- args=[
- 'sudo', '/usr/local/samba/bin/smbpasswd', '-e', 'ubuntu',
- run.Raw('||'),
- 'printf', run.Raw('"ubuntu\nubuntu\n"'),
- run.Raw('|'),
- 'sudo', '/usr/local/samba/bin/smbpasswd', '-s', '-a', 'ubuntu'
- ])
-
- smbd_cmd = [
- 'sudo',
- 'daemon-helper',
- 'term',
- 'nostdin',
- '/usr/local/samba/sbin/smbd',
- '-F',
- ]
- ctx.daemons.add_daemon(remote, 'smbd', id_,
- args=smbd_cmd,
- logger=log.getChild("smbd.{id_}".format(id_=id_)),
- stdin=run.PIPE,
- wait=False,
- )
-
- # let smbd initialize, probably a better way...
- import time
- seconds_to_sleep = 100
- log.info('Sleeping for %s seconds...' % seconds_to_sleep)
- time.sleep(seconds_to_sleep)
- log.info('Sleeping stopped...')
-
- try:
- yield
- finally:
- log.info('Stopping smbd processes...')
- exc_info = (None, None, None)
- for d in ctx.daemons.iter_daemons_of_role('smbd'):
- try:
- d.stop()
- except (run.CommandFailedError,
- run.CommandCrashedError,
- run.ConnectionLostError):
- exc_info = sys.exc_info()
- log.exception('Saw exception from %s.%s', d.role, d.id_)
- if exc_info != (None, None, None):
- raise exc_info[0], exc_info[1], exc_info[2]
-
- for id_, remote in samba_servers:
- remote.run(
- args=[
- 'sudo',
- 'rm', '-rf',
- '/usr/local/samba/etc/smb.conf',
- '/usr/local/samba/private/*',
- '/usr/local/samba/var/run/',
- '/usr/local/samba/var/locks',
- '/usr/local/samba/var/lock',
- ],
- )
- # make sure daemons are gone
- try:
- remote.run(
- args=[
- 'while',
- 'sudo', 'killall', '-9', 'smbd',
- run.Raw(';'),
- 'do', 'sleep', '1',
- run.Raw(';'),
- 'done',
- ],
- )
-
- remote.run(
- args=[
- 'sudo',
- 'lsof',
- backend,
- ],
- check_status=False
- )
- remote.run(
- args=[
- 'sudo',
- 'fuser',
- '-M',
- backend,
- ],
- check_status=False
- )
- except Exception:
- log.exception("Saw exception")
- pass
+++ /dev/null
-"""
-Scrub osds
-"""
-import contextlib
-import gevent
-import logging
-import random
-import time
-
-import ceph_manager
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run scrub periodically. Randomly chooses an OSD to scrub.
-
- The config should be as follows:
-
- scrub:
- frequency: <seconds between scrubs>
- deep: <bool for deepness>
-
- example:
-
- tasks:
- - ceph:
- - scrub:
- frequency: 30
- deep: 0
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'scrub task only accepts a dict for configuration'
-
- log.info('Beginning scrub...')
-
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
- while len(manager.get_osd_status()['up']) < num_osds:
- manager.sleep(10)
-
- scrub_proc = Scrubber(
- manager,
- config,
- )
- try:
- yield
- finally:
- log.info('joining scrub')
- scrub_proc.do_join()
-
-class Scrubber:
- """
- Scrubbing is actually performed during initialzation
- """
- def __init__(self, manager, config):
- """
- Spawn scrubbing thread upon completion.
- """
- self.ceph_manager = manager
- self.ceph_manager.wait_for_clean()
-
- osd_status = self.ceph_manager.get_osd_status()
- self.osds = osd_status['up']
-
- self.config = config
- if self.config is None:
- self.config = dict()
-
- else:
- def tmp(x):
- """Local display"""
- print x
- self.log = tmp
-
- self.stopping = False
-
- log.info("spawning thread")
-
- self.thread = gevent.spawn(self.do_scrub)
-
- def do_join(self):
- """Scrubbing thread finished"""
- self.stopping = True
- self.thread.get()
-
- def do_scrub(self):
- """Perform the scrub operation"""
- frequency = self.config.get("frequency", 30)
- deep = self.config.get("deep", 0)
-
- log.info("stopping %s" % self.stopping)
-
- while not self.stopping:
- osd = str(random.choice(self.osds))
-
- if deep:
- cmd = 'deep-scrub'
- else:
- cmd = 'scrub'
-
- log.info('%sbing %s' % (cmd, osd))
- self.ceph_manager.raw_cluster_cmd('osd', cmd, osd)
-
- time.sleep(frequency)
+++ /dev/null
-"""Scrub testing"""
-from cStringIO import StringIO
-
-import logging
-import os
-import time
-
-import ceph_manager
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-def task(ctx, config):
- """
- Test [deep] scrub
-
- tasks:
- - chef:
- - install:
- - ceph:
- log-whitelist:
- - '!= known digest'
- - '!= known omap_digest'
- - deep-scrub 0 missing, 1 inconsistent objects
- - deep-scrub 1 errors
- - repair 0 missing, 1 inconsistent objects
- - repair 1 errors, 1 fixed
- - scrub_test:
-
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'scrub_test task only accepts a dict for configuration'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
-
- num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
- log.info('num_osds is %s' % num_osds)
-
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- logger=log.getChild('ceph_manager'),
- )
-
- while len(manager.get_osd_status()['up']) < num_osds:
- time.sleep(10)
-
- for i in range(num_osds):
- manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'flush_pg_stats')
- manager.wait_for_clean()
-
- # write some data
- p = manager.do_rados(mon, ['-p', 'rbd', 'bench', '--no-cleanup', '1', 'write', '-b', '4096'])
- err = p.exitstatus
- log.info('err is %d' % err)
-
- # wait for some PG to have data that we can mess with
- victim = None
- osd = None
- while victim is None:
- stats = manager.get_pg_stats()
- for pg in stats:
- size = pg['stat_sum']['num_bytes']
- if size > 0:
- victim = pg['pgid']
- osd = pg['acting'][0]
- break
-
- if victim is None:
- time.sleep(3)
-
- log.info('messing with PG %s on osd %d' % (victim, osd))
-
- (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys()
- data_path = os.path.join(
- '/var/lib/ceph/osd',
- 'ceph-{id}'.format(id=osd),
- 'current',
- '{pg}_head'.format(pg=victim)
- )
-
- # fuzz time
- ls_fp = StringIO()
- osd_remote.run(
- args=[ 'ls', data_path ],
- stdout=ls_fp,
- )
- ls_out = ls_fp.getvalue()
- ls_fp.close()
-
- # find an object file we can mess with
- osdfilename = None
- for line in ls_out.split('\n'):
- if 'object' in line:
- osdfilename = line
- break
- assert osdfilename is not None
-
- # Get actual object name from osd stored filename
- tmp=osdfilename.split('__')
- objname=tmp[0]
- objname=objname.replace('\u', '_')
- log.info('fuzzing %s' % objname)
-
- # put a single \0 at the beginning of the file
- osd_remote.run(
- args=[ 'sudo', 'dd',
- 'if=/dev/zero',
- 'of=%s' % os.path.join(data_path, osdfilename),
- 'bs=1', 'count=1', 'conv=notrunc'
- ]
- )
-
- # scrub, verify inconsistent
- manager.raw_cluster_cmd('pg', 'deep-scrub', victim)
- # Give deep-scrub a chance to start
- time.sleep(60)
-
- while True:
- stats = manager.get_single_pg_stats(victim)
- state = stats['state']
-
- # wait for the scrub to finish
- if 'scrubbing' in state:
- time.sleep(3)
- continue
-
- inconsistent = stats['state'].find('+inconsistent') != -1
- assert inconsistent
- break
-
-
- # repair, verify no longer inconsistent
- manager.raw_cluster_cmd('pg', 'repair', victim)
- # Give repair a chance to start
- time.sleep(60)
-
- while True:
- stats = manager.get_single_pg_stats(victim)
- state = stats['state']
-
- # wait for the scrub to finish
- if 'scrubbing' in state:
- time.sleep(3)
- continue
-
- inconsistent = stats['state'].find('+inconsistent') != -1
- assert not inconsistent
- break
-
- # Test deep-scrub with various omap modifications
- manager.do_rados(mon, ['-p', 'rbd', 'setomapval', objname, 'key', 'val'])
- manager.do_rados(mon, ['-p', 'rbd', 'setomapheader', objname, 'hdr'])
-
- # Modify omap on specific osd
- log.info('fuzzing omap of %s' % objname)
- manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'key']);
- manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname, 'badkey', 'badval']);
- manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'badhdr']);
-
- # scrub, verify inconsistent
- manager.raw_cluster_cmd('pg', 'deep-scrub', victim)
- # Give deep-scrub a chance to start
- time.sleep(60)
-
- while True:
- stats = manager.get_single_pg_stats(victim)
- state = stats['state']
-
- # wait for the scrub to finish
- if 'scrubbing' in state:
- time.sleep(3)
- continue
-
- inconsistent = stats['state'].find('+inconsistent') != -1
- assert inconsistent
- break
-
- # repair, verify no longer inconsistent
- manager.raw_cluster_cmd('pg', 'repair', victim)
- # Give repair a chance to start
- time.sleep(60)
-
- while True:
- stats = manager.get_single_pg_stats(victim)
- state = stats['state']
-
- # wait for the scrub to finish
- if 'scrubbing' in state:
- time.sleep(3)
- continue
-
- inconsistent = stats['state'].find('+inconsistent') != -1
- assert not inconsistent
- break
-
- log.info('test successful!')
+++ /dev/null
-from textwrap import dedent
-
-from .. import devstack
-
-
-class TestDevstack(object):
- def test_parse_os_table(self):
- table_str = dedent("""
- +---------------------+--------------------------------------+
- | Property | Value |
- +---------------------+--------------------------------------+
- | attachments | [] |
- | availability_zone | nova |
- | bootable | false |
- | created_at | 2014-02-21T17:14:47.548361 |
- | display_description | None |
- | display_name | NAME |
- | id | ffdbd1bb-60dc-4d95-acfe-88774c09ad3e |
- | metadata | {} |
- | size | 1 |
- | snapshot_id | None |
- | source_volid | None |
- | status | creating |
- | volume_type | None |
- +---------------------+--------------------------------------+
- """).strip()
- expected = {
- 'Property': 'Value',
- 'attachments': '[]',
- 'availability_zone': 'nova',
- 'bootable': 'false',
- 'created_at': '2014-02-21T17:14:47.548361',
- 'display_description': 'None',
- 'display_name': 'NAME',
- 'id': 'ffdbd1bb-60dc-4d95-acfe-88774c09ad3e',
- 'metadata': '{}',
- 'size': '1',
- 'snapshot_id': 'None',
- 'source_volid': 'None',
- 'status': 'creating',
- 'volume_type': 'None'}
-
- vol_info = devstack.parse_os_table(table_str)
- assert vol_info == expected
-
-
-
-
+++ /dev/null
-"""
-Task to handle tgt
-
-Assumptions made:
- The ceph-extras tgt package may need to get installed.
- The open-iscsi package needs to get installed.
-"""
-import logging
-import contextlib
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def start_tgt_remotes(ctx, start_tgtd):
- """
- This subtask starts up a tgtd on the clients specified
- """
- remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
- tgtd_list = []
- for rem, roles in remotes.iteritems():
- for _id in roles:
- if _id in start_tgtd:
- if not rem in tgtd_list:
- tgtd_list.append(rem)
- size = ctx.config.get('image_size', 10240)
- rem.run(
- args=[
- 'rbd',
- 'create',
- 'iscsi-image',
- '--size',
- str(size),
- ])
- rem.run(
- args=[
- 'sudo',
- 'tgtadm',
- '--lld',
- 'iscsi',
- '--mode',
- 'target',
- '--op',
- 'new',
- '--tid',
- '1',
- '--targetname',
- 'rbd',
- ])
- rem.run(
- args=[
- 'sudo',
- 'tgtadm',
- '--lld',
- 'iscsi',
- '--mode',
- 'logicalunit',
- '--op',
- 'new',
- '--tid',
- '1',
- '--lun',
- '1',
- '--backing-store',
- 'iscsi-image',
- '--bstype',
- 'rbd',
- ])
- rem.run(
- args=[
- 'sudo',
- 'tgtadm',
- '--lld',
- 'iscsi',
- '--op',
- 'bind',
- '--mode',
- 'target',
- '--tid',
- '1',
- '-I',
- 'ALL',
- ])
- try:
- yield
-
- finally:
- for rem in tgtd_list:
- rem.run(
- args=[
- 'sudo',
- 'tgtadm',
- '--lld',
- 'iscsi',
- '--mode',
- 'target',
- '--op',
- 'delete',
- '--force',
- '--tid',
- '1',
- ])
- rem.run(
- args=[
- 'rbd',
- 'snap',
- 'purge',
- 'iscsi-image',
- ])
- rem.run(
- args=[
- 'sudo',
- 'rbd',
- 'rm',
- 'iscsi-image',
- ])
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Start up tgt.
-
- To start on on all clients::
-
- tasks:
- - ceph:
- - tgt:
-
- To start on certain clients::
-
- tasks:
- - ceph:
- - tgt: [client.0, client.3]
-
- or
-
- tasks:
- - ceph:
- - tgt:
- client.0:
- client.3:
-
- An image blocksize size can also be specified::
-
- tasks:
- - ceph:
- - tgt:
- image_size = 20480
-
- The general flow of things here is:
- 1. Find clients on which tgt is supposed to run (start_tgtd)
- 2. Remotely start up tgt daemon
- On cleanup:
- 3. Stop tgt daemon
-
- The iscsi administration is handled by the iscsi task.
- """
- if config:
- config = {key : val for key, val in config.items()
- if key.startswith('client')}
- # config at this point should only contain keys starting with 'client'
- start_tgtd = []
- remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
- log.info(remotes)
- if not config:
- start_tgtd = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- else:
- start_tgtd = config
- log.info(start_tgtd)
- with contextutil.nested(
- lambda: start_tgt_remotes(ctx=ctx, start_tgtd=start_tgtd),):
- yield
+++ /dev/null
-"""
-Thrash -- Simulate random osd failures.
-"""
-import contextlib
-import logging
-import ceph_manager
-from teuthology import misc as teuthology
-
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- "Thrash" the OSDs by randomly marking them out/down (and then back
- in) until the task is ended. This loops, and every op_delay
- seconds it randomly chooses to add or remove an OSD (even odds)
- unless there are fewer than min_out OSDs out of the cluster, or
- more than min_in OSDs in the cluster.
-
- All commands are run on mon0 and it stops when __exit__ is called.
-
- The config is optional, and is a dict containing some or all of:
-
- min_in: (default 3) the minimum number of OSDs to keep in the
- cluster
-
- min_out: (default 0) the minimum number of OSDs to keep out of the
- cluster
-
- op_delay: (5) the length of time to sleep between changing an
- OSD's status
-
- min_dead: (0) minimum number of osds to leave down/dead.
-
- max_dead: (0) maximum number of osds to leave down/dead before waiting
- for clean. This should probably be num_replicas - 1.
-
- clean_interval: (60) the approximate length of time to loop before
- waiting until the cluster goes clean. (In reality this is used
- to probabilistically choose when to wait, and the method used
- makes it closer to -- but not identical to -- the half-life.)
-
- scrub_interval: (-1) the approximate length of time to loop before
- waiting until a scrub is performed while cleaning. (In reality
- this is used to probabilistically choose when to wait, and it
- only applies to the cases where cleaning is being performed).
- -1 is used to indicate that no scrubbing will be done.
-
- chance_down: (0.4) the probability that the thrasher will mark an
- OSD down rather than marking it out. (The thrasher will not
- consider that OSD out of the cluster, since presently an OSD
- wrongly marked down will mark itself back up again.) This value
- can be either an integer (eg, 75) or a float probability (eg
- 0.75).
-
- chance_test_min_size: (0) chance to run test_pool_min_size,
- which:
- - kills all but one osd
- - waits
- - kills that osd
- - revives all other osds
- - verifies that the osds fully recover
-
- timeout: (360) the number of seconds to wait for the cluster
- to become clean after each cluster change. If this doesn't
- happen within the timeout, an exception will be raised.
-
- revive_timeout: (150) number of seconds to wait for an osd asok to
- appear after attempting to revive the osd
-
- thrash_primary_affinity: (true) randomly adjust primary-affinity
-
- chance_pgnum_grow: (0) chance to increase a pool's size
- chance_pgpnum_fix: (0) chance to adjust pgpnum to pg for a pool
- pool_grow_by: (10) amount to increase pgnum by
- max_pgs_per_pool_osd: (1200) don't expand pools past this size per osd
-
- pause_short: (3) duration of short pause
- pause_long: (80) duration of long pause
- pause_check_after: (50) assert osd down after this long
- chance_inject_pause_short: (1) chance of injecting short stall
- chance_inject_pause_long: (0) chance of injecting long stall
-
- clean_wait: (0) duration to wait before resuming thrashing once clean
-
- powercycle: (false) whether to power cycle the node instead
- of just the osd process. Note that this assumes that a single
- osd is the only important process on the node.
-
- chance_test_backfill_full: (0) chance to simulate full disks stopping
- backfill
-
- chance_test_map_discontinuity: (0) chance to test map discontinuity
- map_discontinuity_sleep_time: (40) time to wait for map trims
-
- ceph_objectstore_tool: (true) whether to export/import a pg while an osd is down
- chance_move_pg: (1.0) chance of moving a pg if more than 1 osd is down (default 100%)
-
- example:
-
- tasks:
- - ceph:
- - thrashosds:
- chance_down: 10
- op_delay: 3
- min_in: 1
- timeout: 600
- - interactive:
- """
- if config is None:
- config = {}
- assert isinstance(config, dict), \
- 'thrashosds task only accepts a dict for configuration'
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('thrashosds', {}))
-
- if 'powercycle' in config:
-
- # sync everyone first to avoid collateral damage to / etc.
- log.info('Doing preliminary sync to avoid collateral damage...')
- ctx.cluster.run(args=['sync'])
-
- if 'ipmi_user' in ctx.teuthology_config:
- for remote in ctx.cluster.remotes.keys():
- log.debug('checking console status of %s' % remote.shortname)
- if not remote.console.check_status():
- log.warn('Failed to get console status for %s',
- remote.shortname)
-
- # check that all osd remotes have a valid console
- osds = ctx.cluster.only(teuthology.is_type('osd'))
- for remote in osds.remotes.keys():
- if not remote.console.has_ipmi_credentials:
- raise Exception(
- 'IPMI console required for powercycling, '
- 'but not available on osd role: {r}'.format(
- r=remote.name))
-
- log.info('Beginning thrashosds...')
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
- manager = ceph_manager.CephManager(
- mon,
- ctx=ctx,
- config=config,
- logger=log.getChild('ceph_manager'),
- )
- ctx.manager = manager
- thrash_proc = ceph_manager.Thrasher(
- manager,
- config,
- logger=log.getChild('thrasher')
- )
- try:
- yield
- finally:
- log.info('joining thrashosds')
- thrash_proc.do_join()
- manager.wait_for_recovery(config.get('timeout', 360))
+++ /dev/null
-#cloud-config-archive
-
-- type: text/cloud-config
- content: |
- output:
- all: '| tee -a /var/log/cloud-init-output.log'
-
-# allow passwordless access for debugging
-- |
- #!/bin/bash
- exec passwd -d ubuntu
-
-- |
- #!/bin/bash
-
- # mount a NFS share for storing logs
- apt-get update
- apt-get -y install nfs-common
- mkdir /mnt/log
- # 10.0.2.2 is the host
- mount -v -t nfs -o proto=tcp 10.0.2.2:{mnt_dir} /mnt/log
-
- # mount the iso image that has the test script
- mkdir /mnt/cdrom
- mount -t auto /dev/cdrom /mnt/cdrom
+++ /dev/null
-- |
- #!/bin/bash
- cp /var/log/cloud-init-output.log /mnt/log
-
-- |
- #!/bin/bash
- umount /mnt/log
-
-- |
- #!/bin/bash
- shutdown -h -P now
+++ /dev/null
-from teuthology.misc import get_testdir
-from teuthology.orchestra import run
-
-
-def write_secret_file(ctx, remote, role, keyring, filename):
- """
- Stash the kerying in the filename specified.
- """
- testdir = get_testdir(ctx)
- remote.run(
- args=[
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'ceph-authtool',
- '--name={role}'.format(role=role),
- '--print-key',
- keyring,
- run.Raw('>'),
- filename,
- ],
- )
+++ /dev/null
-import logging
-
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-def rados(ctx, remote, cmd, wait=True, check_status=False):
- testdir = teuthology.get_testdir(ctx)
- log.info("rados %s" % ' '.join(cmd))
- pre = [
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rados',
- ];
- pre.extend(cmd)
- proc = remote.run(
- args=pre,
- check_status=check_status,
- wait=wait,
- )
- if wait:
- return proc.exitstatus
- else:
- return proc
-
-def create_ec_pool(remote, name, profile_name, pgnum, m=1, k=2):
- remote.run(args=[
- 'ceph', 'osd', 'erasure-code-profile', 'set',
- profile_name, 'm=' + str(m), 'k=' + str(k),
- 'ruleset-failure-domain=osd',
- ])
- remote.run(args=[
- 'ceph', 'osd', 'pool', 'create', name,
- str(pgnum), str(pgnum), 'erasure', profile_name,
- ])
-
-def create_replicated_pool(remote, name, pgnum):
- remote.run(args=[
- 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum),
- ])
-
-def create_cache_pool(remote, base_name, cache_name, pgnum, size):
- remote.run(args=[
- 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum)
- ])
- remote.run(args=[
- 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name,
- str(size),
- ])
+++ /dev/null
-from cStringIO import StringIO
-import logging
-import json
-import requests
-from urlparse import urlparse
-
-from teuthology.orchestra.connection import split_user
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-# simple test to indicate if multi-region testing should occur
-def multi_region_enabled(ctx):
- # this is populated by the radosgw-agent task, seems reasonable to
- # use that as an indicator that we're testing multi-region sync
- return 'radosgw_agent' in ctx
-
-def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False):
- log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd))
- testdir = teuthology.get_testdir(ctx)
- pre = [
- 'adjust-ulimits',
- 'ceph-coverage'.format(tdir=testdir),
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin'.format(tdir=testdir),
- '--log-to-stderr',
- '--format', 'json',
- '-n', client,
- ]
- pre.extend(cmd)
- log.info('rgwadmin: cmd=%s' % pre)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- proc = remote.run(
- args=pre,
- check_status=check_status,
- stdout=StringIO(),
- stderr=StringIO(),
- stdin=stdin,
- )
- r = proc.exitstatus
- out = proc.stdout.getvalue()
- j = None
- if not r and out != '':
- try:
- j = json.loads(out)
- log.info(' json result: %s' % j)
- except ValueError:
- j = out
- log.info(' raw result: %s' % j)
- return (r, j)
-
-def get_user_summary(out, user):
- """Extract the summary for a given user"""
- user_summary = None
- for summary in out['summary']:
- if summary.get('user') == user:
- user_summary = summary
-
- if not user_summary:
- raise AssertionError('No summary info found for user: %s' % user)
-
- return user_summary
-
-def get_user_successful_ops(out, user):
- summary = out['summary']
- if len(summary) == 0:
- return 0
- return get_user_summary(out, user)['total']['successful_ops']
-
-def get_zone_host_and_port(ctx, client, zone):
- _, region_map = rgwadmin(ctx, client, check_status=True,
- cmd=['-n', client, 'region-map', 'get'])
- regions = region_map['regions']
- for region in regions:
- for zone_info in region['val']['zones']:
- if zone_info['name'] == zone:
- endpoint = urlparse(zone_info['endpoints'][0])
- host, port = endpoint.hostname, endpoint.port
- if port is None:
- port = 80
- return host, port
- assert False, 'no endpoint for zone {zone} found'.format(zone=zone)
-
-def get_master_zone(ctx, client):
- _, region_map = rgwadmin(ctx, client, check_status=True,
- cmd=['-n', client, 'region-map', 'get'])
- regions = region_map['regions']
- for region in regions:
- is_master = (region['val']['is_master'] == "true")
- log.info('region={r} is_master={ism}'.format(r=region, ism=is_master))
- if not is_master:
- continue
- master_zone = region['val']['master_zone']
- log.info('master_zone=%s' % master_zone)
- for zone_info in region['val']['zones']:
- if zone_info['name'] == master_zone:
- return master_zone
- log.info('couldn\'t find master zone')
- return None
-
-def get_master_client(ctx, clients):
- master_zone = get_master_zone(ctx, clients[0]) # can use any client for this as long as system configured correctly
- if not master_zone:
- return None
-
- for client in clients:
- zone = zone_for_client(ctx, client)
- if zone == master_zone:
- return client
-
- return None
-
-def get_zone_system_keys(ctx, client, zone):
- _, zone_info = rgwadmin(ctx, client, check_status=True,
- cmd=['-n', client,
- 'zone', 'get', '--rgw-zone', zone])
- system_key = zone_info['system_key']
- return system_key['access_key'], system_key['secret_key']
-
-def zone_for_client(ctx, client):
- ceph_config = ctx.ceph.conf.get('global', {})
- ceph_config.update(ctx.ceph.conf.get('client', {}))
- ceph_config.update(ctx.ceph.conf.get(client, {}))
- return ceph_config.get('rgw zone')
-
-def region_for_client(ctx, client):
- ceph_config = ctx.ceph.conf.get('global', {})
- ceph_config.update(ctx.ceph.conf.get('client', {}))
- ceph_config.update(ctx.ceph.conf.get(client, {}))
- return ceph_config.get('rgw region')
-
-def radosgw_data_log_window(ctx, client):
- ceph_config = ctx.ceph.conf.get('global', {})
- ceph_config.update(ctx.ceph.conf.get('client', {}))
- ceph_config.update(ctx.ceph.conf.get(client, {}))
- return ceph_config.get('rgw data log window', 30)
-
-def radosgw_agent_sync_data(ctx, agent_host, agent_port, full=False):
- log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port))
- method = "full" if full else "incremental"
- return requests.post('http://{addr}:{port}/data/{method}'.format(addr = agent_host, port = agent_port, method = method))
-
-def radosgw_agent_sync_metadata(ctx, agent_host, agent_port, full=False):
- log.info('sync agent {h}:{p}'.format(h=agent_host, p=agent_port))
- method = "full" if full else "incremental"
- return requests.post('http://{addr}:{port}/metadata/{method}'.format(addr = agent_host, port = agent_port, method = method))
-
-def radosgw_agent_sync_all(ctx, full=False, data=False):
- if ctx.radosgw_agent.procs:
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- zone_for_client(ctx, agent_client)
- sync_host, sync_port = get_sync_agent(ctx, agent_client)
- log.debug('doing a sync via {host1}'.format(host1=sync_host))
- radosgw_agent_sync_metadata(ctx, sync_host, sync_port, full)
- if (data):
- radosgw_agent_sync_data(ctx, sync_host, sync_port, full)
-
-def host_for_role(ctx, role):
- for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
- if role in roles:
- _, host = split_user(target)
- return host
-
-def get_sync_agent(ctx, source):
- for task in ctx.config['tasks']:
- if 'radosgw-agent' not in task:
- continue
- for client, conf in task['radosgw-agent'].iteritems():
- if conf['src'] == source:
- return host_for_role(ctx, source), conf.get('port', 8000)
- return None, None
+++ /dev/null
-"""
-test_stress_watch task
-"""
-import contextlib
-import logging
-import proc_thrasher
-
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run test_stress_watch
-
- The config should be as follows:
-
- test_stress_watch:
- clients: [client list]
-
- example:
-
- tasks:
- - ceph:
- - test_stress_watch:
- clients: [client.0]
- - interactive:
- """
- log.info('Beginning test_stress_watch...')
- assert isinstance(config, dict), \
- "please list clients to run on"
- testwatch = {}
-
- remotes = []
-
- for role in config.get('clients', ['client.0']):
- assert isinstance(role, basestring)
- PREFIX = 'client.'
- assert role.startswith(PREFIX)
- id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- remotes.append(remote)
-
- args =['CEPH_CLIENT_ID={id_}'.format(id_=id_),
- 'CEPH_ARGS="{flags}"'.format(flags=config.get('flags', '')),
- 'daemon-helper',
- 'kill',
- 'multi_stress_watch foo foo'
- ]
-
- log.info("args are %s" % (args,))
-
- proc = proc_thrasher.ProcThrasher({}, remote,
- args=[run.Raw(i) for i in args],
- logger=log.getChild('testwatch.{id}'.format(id=id_)),
- stdin=run.PIPE,
- wait=False
- )
- proc.start()
- testwatch[id_] = proc
-
- try:
- yield
- finally:
- log.info('joining watch_notify_stress')
- for i in testwatch.itervalues():
- i.join()
+++ /dev/null
-"""
-Workunit task -- Run ceph on sets of specific clients
-"""
-import logging
-import pipes
-import os
-
-from teuthology import misc
-from teuthology.orchestra.run import CommandFailedError
-from teuthology.parallel import parallel
-from teuthology.orchestra import run
-
-log = logging.getLogger(__name__)
-
-CLIENT_PREFIX = 'client.'
-
-
-def task(ctx, config):
- """
- Run ceph on all workunits found under the specified path.
-
- For example::
-
- tasks:
- - ceph:
- - ceph-fuse: [client.0]
- - workunit:
- clients:
- client.0: [direct_io, xattrs.sh]
- client.1: [snaps]
- branch: foo
-
- You can also run a list of workunits on all clients:
- tasks:
- - ceph:
- - ceph-fuse:
- - workunit:
- tag: v0.47
- clients:
- all: [direct_io, xattrs.sh, snaps]
-
- If you have an "all" section it will run all the workunits
- on each client simultaneously, AFTER running any workunits specified
- for individual clients. (This prevents unintended simultaneous runs.)
-
- To customize tests, you can specify environment variables as a dict. You
- can also specify a time limit for each work unit (defaults to 3h):
-
- tasks:
- - ceph:
- - ceph-fuse:
- - workunit:
- sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6
- clients:
- all: [snaps]
- env:
- FOO: bar
- BAZ: quux
- timeout: 3h
-
- :param ctx: Context
- :param config: Configuration
- """
- assert isinstance(config, dict)
- assert isinstance(config.get('clients'), dict), \
- 'configuration must contain a dictionary of clients'
-
- overrides = ctx.config.get('overrides', {})
- misc.deep_merge(config, overrides.get('workunit', {}))
-
- refspec = config.get('branch')
- if refspec is None:
- refspec = config.get('sha1')
- if refspec is None:
- refspec = config.get('tag')
- if refspec is None:
- refspec = 'HEAD'
-
- timeout = config.get('timeout', '3h')
-
- log.info('Pulling workunits from ref %s', refspec)
-
- created_mountpoint = {}
-
- if config.get('env') is not None:
- assert isinstance(config['env'], dict), 'env must be a dictionary'
- clients = config['clients']
-
- # Create scratch dirs for any non-all workunits
- log.info('Making a separate scratch dir for every client...')
- for role in clients.iterkeys():
- assert isinstance(role, basestring)
- if role == "all":
- continue
-
- assert role.startswith(CLIENT_PREFIX)
- created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir'))
- created_mountpoint[role] = created_mnt_dir
-
- # Execute any non-all workunits
- with parallel() as p:
- for role, tests in clients.iteritems():
- if role != "all":
- p.spawn(_run_tests, ctx, refspec, role, tests,
- config.get('env'), timeout=timeout)
-
- # Clean up dirs from any non-all workunits
- for role, created in created_mountpoint.items():
- _delete_dir(ctx, role, created)
-
- # Execute any 'all' workunits
- if 'all' in clients:
- all_tasks = clients["all"]
- _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'),
- config.get('subdir'), timeout=timeout)
-
-
-def _delete_dir(ctx, role, created_mountpoint):
- """
- Delete file used by this role, and delete the directory that this
- role appeared in.
-
- :param ctx: Context
- :param role: "role.#" where # is used for the role id.
- """
- testdir = misc.get_testdir(ctx)
- id_ = role[len(CLIENT_PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- # Is there any reason why this is not: join(mnt, role) ?
- client = os.path.join(mnt, 'client.{id}'.format(id=id_))
-
- # Remove the directory inside the mount where the workunit ran
- remote.run(
- args=[
- 'sudo',
- 'rm',
- '-rf',
- '--',
- client,
- ],
- )
- log.info("Deleted dir {dir}".format(dir=client))
-
- # If the mount was an artificially created dir, delete that too
- if created_mountpoint:
- remote.run(
- args=[
- 'rmdir',
- '--',
- mnt,
- ],
- )
- log.info("Deleted artificial mount point {dir}".format(dir=client))
-
-
-def _make_scratch_dir(ctx, role, subdir):
- """
- Make scratch directories for this role. This also makes the mount
- point if that directory does not exist.
-
- :param ctx: Context
- :param role: "role.#" where # is used for the role id.
- :param subdir: use this subdir (False if not used)
- """
- created_mountpoint = False
- id_ = role[len(CLIENT_PREFIX):]
- log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role))
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- dir_owner = remote.user
- mnt = os.path.join(misc.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
- # if neither kclient nor ceph-fuse are required for a workunit,
- # mnt may not exist. Stat and create the directory if it doesn't.
- try:
- remote.run(
- args=[
- 'stat',
- '--',
- mnt,
- ],
- )
- log.info('Did not need to create dir {dir}'.format(dir=mnt))
- except CommandFailedError:
- remote.run(
- args=[
- 'mkdir',
- '--',
- mnt,
- ],
- )
- log.info('Created dir {dir}'.format(dir=mnt))
- created_mountpoint = True
-
- if not subdir:
- subdir = 'client.{id}'.format(id=id_)
-
- if created_mountpoint:
- remote.run(
- args=[
- 'cd',
- '--',
- mnt,
- run.Raw('&&'),
- 'mkdir',
- '--',
- subdir,
- ],
- )
- else:
- remote.run(
- args=[
- # cd first so this will fail if the mount point does
- # not exist; pure install -d will silently do the
- # wrong thing
- 'cd',
- '--',
- mnt,
- run.Raw('&&'),
- 'sudo',
- 'install',
- '-d',
- '-m', '0755',
- '--owner={user}'.format(user=dir_owner),
- '--',
- subdir,
- ],
- )
-
- return created_mountpoint
-
-
-def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None):
- """
- Make a scratch directory for each client in the cluster, and then for each
- test spawn _run_tests() for each role.
-
- See run_tests() for parameter documentation.
- """
- client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
- client_remotes = list()
-
- created_mountpoint = {}
- for client in client_generator:
- (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
- client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
- created_mountpoint[client] = _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
-
- for unit in tests:
- with parallel() as p:
- for remote, role in client_remotes:
- p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir,
- timeout=timeout)
-
- # cleanup the generated client directories
- client_generator = misc.all_roles_of_type(ctx.cluster, 'client')
- for client in client_generator:
- _delete_dir(ctx, 'client.{id}'.format(id=client), created_mountpoint[client])
-
-
-def _run_tests(ctx, refspec, role, tests, env, subdir=None, timeout=None):
- """
- Run the individual test. Create a scratch directory and then extract the
- workunits from git. Make the executables, and then run the tests.
- Clean up (remove files created) after the tests are finished.
-
- :param ctx: Context
- :param refspec: branch, sha1, or version tag used to identify this
- build
- :param tests: specific tests specified.
- :param env: environment set in yaml file. Could be None.
- :param subdir: subdirectory set in yaml file. Could be None
- :param timeout: If present, use the 'timeout' command on the remote host
- to limit execution time. Must be specified by a number
- followed by 's' for seconds, 'm' for minutes, 'h' for
- hours, or 'd' for days. If '0' or anything that evaluates
- to False is passed, the 'timeout' command is not used.
- """
- testdir = misc.get_testdir(ctx)
- assert isinstance(role, basestring)
- assert role.startswith(CLIENT_PREFIX)
- id_ = role[len(CLIENT_PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- # subdir so we can remove and recreate this a lot without sudo
- if subdir is None:
- scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
- else:
- scratch_tmp = os.path.join(mnt, subdir)
- srcdir = '{tdir}/workunit.{role}'.format(tdir=testdir, role=role)
-
- remote.run(
- logger=log.getChild(role),
- args=[
- 'mkdir', '--', srcdir,
- run.Raw('&&'),
- 'git',
- 'archive',
- '--remote=git://git.ceph.com/ceph.git',
- '%s:qa/workunits' % refspec,
- run.Raw('|'),
- 'tar',
- '-C', srcdir,
- '-x',
- '-f-',
- run.Raw('&&'),
- 'cd', '--', srcdir,
- run.Raw('&&'),
- 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
- run.Raw('&&'),
- 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
- run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)),
- ],
- )
-
- workunits = sorted(misc.get_file(
- remote,
- '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)).split('\0'))
- assert workunits
-
- try:
- assert isinstance(tests, list)
- for spec in tests:
- log.info('Running workunits matching %s on %s...', spec, role)
- prefix = '{spec}/'.format(spec=spec)
- to_run = [w for w in workunits if w == spec or w.startswith(prefix)]
- if not to_run:
- raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec))
- for workunit in to_run:
- log.info('Running workunit %s...', workunit)
- args = [
- 'mkdir', '-p', '--', scratch_tmp,
- run.Raw('&&'),
- 'cd', '--', scratch_tmp,
- run.Raw('&&'),
- run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'),
- run.Raw('CEPH_REF={ref}'.format(ref=refspec)),
- run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)),
- run.Raw('CEPH_ID="{id}"'.format(id=id_)),
- run.Raw('PATH=$PATH:/usr/sbin')
- ]
- if env is not None:
- for var, val in env.iteritems():
- quoted_val = pipes.quote(val)
- env_arg = '{var}={val}'.format(var=var, val=quoted_val)
- args.append(run.Raw(env_arg))
- args.extend([
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir)])
- if timeout and timeout != '0':
- args.extend(['timeout', timeout])
- args.extend([
- '{srcdir}/{workunit}'.format(
- srcdir=srcdir,
- workunit=workunit,
- ),
- ])
- remote.run(
- logger=log.getChild(role),
- args=args,
- label="workunit test {workunit}".format(workunit=workunit)
- )
- remote.run(
- logger=log.getChild(role),
- args=['sudo', 'rm', '-rf', '--', scratch_tmp],
- )
- finally:
- log.info('Stopping %s on %s...', tests, role)
- remote.run(
- logger=log.getChild(role),
- args=[
- 'rm', '-rf', '--', '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role), srcdir,
- ],
- )
+++ /dev/null
-[tox]
-envlist = flake8
-skipsdist = True
-
-[testenv:flake8]
-deps=
- flake8
-commands=flake8 --select=F ceph-qa-suite