+++ /dev/null
-meta:
-- desc: "3-node cluster"
-roles:
-- [mon.a, mds.a, osd.0, osd.1, client.0]
-- [mon.b, mds.b, osd.2, osd.3]
-- [mon.c, mds.c, osd.4, osd.5]
+++ /dev/null
-meta:
-- desc: "1-node cluster"
-roles:
- - [mon.a, osd.0, client.0]
+++ /dev/null
-../../../distros/supported
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: "Build the cluster using ceph-ansible; then check health and make the keyring readable"
-tasks:
-- ceph_ansible:
-- exec:
- mon.a:
- - "sudo ceph health"
-- exec:
- all:
- - "KEYRING=/etc/ceph/ceph.client.admin.keyring; test -f $KEYRING && sudo chmod o+r $KEYRING"
-- install.ship_utilities:
-overrides:
- ceph_ansible:
- vars:
- ceph_test: true
-openstack:
- - volumes:
- count: 3
- size: 20 # GB
+++ /dev/null
-meta:
-- desc: "Set os_tuning_params to values that are safe for VMs"
-overrides:
- ceph_ansible:
- vars:
- os_tuning_params: '[{"name": "kernel.pid_max", "value": 4194303},{"name": "fs.file-max", "value": 26234859}]'
+++ /dev/null
-meta:
-- desc: "Use a stable upstream Ceph release"
-overrides:
- ceph_ansible:
- vars:
- ceph_origin: upstream
- ceph_stable: true
+++ /dev/null
-meta:
-- desc: "Have teuthology tell ceph-ansible which OSD devices to use"
-overrides:
- ceph_ansible:
- vars:
- osd_auto_discovery: false
+++ /dev/null
-meta:
-- desc: "Tell ceph-ansible to discover OSD devices automatically"
-overrides:
- ceph_ansible:
- vars:
- osd_auto_discovery: true
+++ /dev/null
-meta:
-- desc: "Use a collocated journal"
-overrides:
- ceph_ansible:
- vars:
- journal_collocation: true
- journal_size: 1024
+++ /dev/null
-meta:
-- desc: "Run ceph-admin-commands.sh"
-tasks:
-- workunit:
- clients:
- client.0:
- - ceph-tests/ceph-admin-commands.sh
+++ /dev/null
-meta:
-- desc: "Run the rados cls tests"
-tasks:
-- workunit:
- clients:
- client.0:
- - cls
+++ /dev/null
-meta:
-- desc: "Run the rbd cli tests"
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/run_cli_tests.sh
-
+++ /dev/null
-meta:
-- desc: "Run the rbd import/export tests"
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/import_export.sh
--- /dev/null
+meta:
+- desc: "3-node cluster"
+roles:
+- [mon.a, mds.a, osd.0, osd.1, osd.2]
+- [mon.b, osd.3, osd.4, osd.5]
+- [mon.c, osd.6, osd.7, osd.8, client.0]
--- /dev/null
+os_type: centos
+os_version: "7.2"
--- /dev/null
+os_type: ubuntu
+os_version: "14.04"
--- /dev/null
+os_type: ubuntu
+os_version: "16.04"
--- /dev/null
+meta:
+- desc: "Build the ceph cluster using ceph-ansible"
+
+overrides:
+ ceph_ansible:
+ vars:
+ ceph_conf_overrides:
+ global:
+ osd default pool size: 2
+ mon pg warn min per osd: 2
+ ceph_dev: true
+ ceph_dev_branch: jewel
+ ceph_dev_key: https://download.ceph.com/keys/autobuild.asc
+ ceph_origin: upstream
+ ceph_test: true
+ journal_collocation: true
+ journal_size: 1024
+ osd_auto_discovery: false
+
+tasks:
+- ssh-keys:
+- ceph_ansible:
+- install.ship_utilities:
--- /dev/null
+meta:
+- desc: "Run ceph-admin-commands.sh"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - ceph-tests/ceph-admin-commands.sh
--- /dev/null
+meta:
+- desc: "Run the cephfs blogbench tests"
+tasks:
+- ceph-fuse:
+- workunit:
+ clients:
+ all:
+ - suites/blogbench.sh
--- /dev/null
+meta:
+- desc: "Run the rados cls tests"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - cls
--- /dev/null
+meta:
+- desc: "Run the rbd cli tests"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/run_cli_tests.sh
+
--- /dev/null
+meta:
+- desc: "Run the rbd import/export tests"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/import_export.sh