Must be run on bare-metal machines.
On VMs performance results will be inconsistent
and can't be compared across runs.
- Run ceph on a single node.
+ Run ceph on a multi node using ceph-ansible.
Use xfs beneath the osds.
Setup rgw on client.0
roles:
-- [mon.a, mgr.x]
-- [mon.b, mgr.y]
-- [mon.c, mgr.z]
+- [mon.a, mgr.x, osd.9, osd.10, osd.11]
+- [mon.b, mgr.y, osd.12, osd.13, osd.14]
+- [mon.c, mgr.z, osd.15, osd.16, osd.17, rgw.0]
- [osd.0, osd.1, osd.2]
- [osd.3, osd.4, osd.5]
- [osd.6, osd.7, osd.8]
+- [osd.18, osd.19, osd.20]
- [client.0]
tasks:
- install:
-- ceph:
- fs: xfs
- wait-for-scrub: false
- log-whitelist:
- - \(PG_
- - \(OSD_
- - \(OBJECT_
- - overall HEALTH
-- rgw: [client.0]
- ssh_keys:
+- ceph-ansible:
overrides:
+ ceph_ansible:
+ vars:
+ ceph_stable: true
+ ceph_origin: distro
+ ceph_stable_rh_storage: true
+ copy_admin_key: true
+ ceph_repository: rhcs
+ ceph_stable_release: luminous
+ osd_scenario: collocated
+ osd_auto_discovery: true
+ ceph_test: true
+ ceph_conf_overrides:
+ global:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ ceph_mgr_modules:
+ - status
+ - restful
+ cephfs_pools:
+ - name: "cephfs_data"
+ pgs: "64"
+ - name: "cephfs_metadata"
+ pgs: "64"
+
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
-
overrides:
+ ceph_ansible:
+ vars:
+ ceph_stable: true
+ ceph_origin: distro
+ ceph_stable_rh_storage: true
+ ceph_repository: rhcs
+ copy_admin_key: true
+ ceph_stable_release: luminous
+ osd_scenario: collocated
+ osd_auto_discovery: true
+ ceph_test: true
+ ceph_conf_overrides:
+ global:
+ osd objectstore: filestore
+ osd sloppy crc: true
+ ceph_mgr_modules:
+ - status
+ - restful
+ cephfs_pools:
+ - name: "cephfs_data"
+ pgs: "64"
+ - name: "cephfs_metadata"
+ pgs: "64"
ceph:
fs: xfs
conf:
osd:
osd objectstore: filestore
osd sloppy crc: true
-