[tox]
-envlist = {centos7,xenial}-{bluestore,filestore}-{single_type,single_type_dmcrypt},centos7-{bluestore,filestore}-{mixed_type,mixed_type_dmcrypt,mixed_type_explicit, mixed_type_dmcrypt_explicit}
+envlist = centos7-{bluestore,filestore}-{single_type,single_type_dmcrypt},centos7-{bluestore,filestore}-{mixed_type,mixed_type_dmcrypt,mixed_type_explicit, mixed_type_dmcrypt_explicit}
skipsdist = True
[testenv]
centos7-bluestore-mixed_type_dmcrypt: {toxinidir}/centos7/bluestore/mixed-type-dmcrypt
centos7-bluestore-mixed_type_explicit: {toxinidir}/centos7/bluestore/mixed-type-explicit
centos7-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos7/bluestore/mixed-type-dmcrypt-explicit
- xenial-filestore-single_type: {toxinidir}/xenial/filestore/single-type
- xenial-filestore-single_type_dmcrypt: {toxinidir}/xenial/filestore/single-type-dmcrypt
- xenial-bluestore-single_type: {toxinidir}/xenial/bluestore/single-type
- xenial-bluestore-single_type_dmcrypt: {toxinidir}/xenial/bluestore/single-type-dmcrypt
commands=
git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
python -m pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-osd_objectstore: "bluestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-devices:
- - /dev/sdb
- - /dev/sdc
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
-
-# The following is only needed for testing purposes and is not part of
-# ceph-ansible supported variables
-
-osd_ids:
- - 0
- - 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/noop.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_zap.yml
\ No newline at end of file
+++ /dev/null
----
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-osd_objectstore: "bluestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-devices:
- - /dev/sdb
- - /dev/sdc
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
-
-# The following is only needed for testing purposes and is not part of
-# ceph-ansible supported variables
-
-osd_ids:
- - 0
- - 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/noop.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_zap.yml
\ No newline at end of file
+++ /dev/null
----
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-osd_objectstore: "filestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-devices:
- - /dev/sdb
- - /dev/sdc
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
- osd:
- osd_journal_size: 2048
-
-# The following is only needed for testing purposes and is not part of
-# ceph-ansible supported variables
-
-osd_ids:
- - 0
- - 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/noop.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_zap.yml
\ No newline at end of file
+++ /dev/null
----
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-osd_objectstore: "filestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-devices:
- - /dev/sdb
- - /dev/sdc
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
-
-# The following is only needed for testing purposes and is not part of
-# ceph-ansible supported variables
-
-osd_ids:
- - 0
- - 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/noop.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_zap.yml
\ No newline at end of file
+++ /dev/null
----
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
[tox]
-envlist = {centos7,xenial}-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
+envlist = centos7-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
skipsdist = True
[testenv]
# plain/unencrypted
centos7-filestore-create: {toxinidir}/centos7/filestore/create
centos7-bluestore-create: {toxinidir}/centos7/bluestore/create
- xenial-filestore-create: {toxinidir}/xenial/filestore/create
- xenial-bluestore-create: {toxinidir}/xenial/bluestore/create
# dmcrypt
centos7-filestore-dmcrypt: {toxinidir}/centos7/filestore/dmcrypt
centos7-bluestore-dmcrypt: {toxinidir}/centos7/bluestore/dmcrypt
- xenial-filestore-dmcrypt: {toxinidir}/xenial/filestore/dmcrypt
- xenial-bluestore-dmcrypt: {toxinidir}/xenial/bluestore/dmcrypt
# TODO: these are placeholders for now, eventually we want to
# test the prepare/activate workflow of ceph-volume as well
- xenial-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
- xenial-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
centos7-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
commands=
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-lvm_volumes:
- - data: data-lv1
- data_vg: test_group
- crush_device_class: test
- - data: data-lv2
- data_vg: test_group
- db: journal1
- db_vg: journals
- - data: /dev/sdd1
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_bluestore.yml
\ No newline at end of file
+++ /dev/null
----
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-lvm_volumes:
- - data: data-lv1
- data_vg: test_group
- crush_device_class: test
- - data: data-lv2
- data_vg: test_group
- db: journal1
- db_vg: journals
- - data: /dev/sdd1
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-- hosts: osds
- become: yes
- tasks:
-
- - name: stop ceph-osd@2 daemon
- service:
- name: ceph-osd@2
- state: stopped
-
- - name: stop ceph-osd@0 daemon
- service:
- name: ceph-osd@0
- state: stopped
-
-- hosts: mons
- become: yes
- tasks:
-
- - name: destroy osd.2
- command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
-
- - name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
-
-
-- hosts: osds
- become: yes
- tasks:
-
- # osd.2 device
- - name: zap /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # partitions have been completely removed, so re-create them again
- - name: re-create partition /dev/sdd for lvm data usage
- parted:
- device: /dev/sdd
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- label: gpt
- state: present
-
- - name: redeploy osd.2 using /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # osd.0 lv
- - name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: redeploy osd.0 using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: stop ceph-osd@0 daemon
- service:
- name: ceph-osd@0
- state: stopped
-
-
-- hosts: mons
- become: yes
- tasks:
-
- - name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
-
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: prepare osd.0 using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: activate all to start the previously prepared osd.0
- command: "ceph-volume lvm activate --all"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: node inventory
- command: "ceph-volume inventory"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: list all OSDs
- command: "ceph-volume lvm list"
- environment:
- CEPH_VOLUME_DEBUG: 1
+++ /dev/null
----
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "filestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
-lvm_volumes:
- - data: data-lv1
- journal: /dev/sdc1
- data_vg: test_group
- crush_device_class: test
- - data: data-lv2
- journal: journal1
- data_vg: test_group
- journal_vg: journals
- - data: /dev/sdd1
- journal: /dev/sdd2
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_filestore.yml
\ No newline at end of file
+++ /dev/null
----
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "filestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
-lvm_volumes:
- - data: data-lv1
- journal: /dev/sdc1
- data_vg: test_group
- crush_device_class: test
- - data: data-lv2
- journal: journal1
- data_vg: test_group
- journal_vg: journals
- - data: /dev/sdd1
- journal: /dev/sdd2
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: stop ceph-osd@2 daemon
- service:
- name: ceph-osd@2
- state: stopped
-
- - name: stop ceph-osd@0 daemon
- service:
- name: ceph-osd@0
- state: stopped
-
-
-- hosts: mons
- become: yes
- tasks:
-
- - name: destroy osd.2
- command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
-
- - name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
-
-
-- hosts: osds
- become: yes
- tasks:
-
- # osd.2 device
- - name: zap /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: zap /dev/sdd2
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # partitions have been completely removed, so re-create them again
- - name: re-create partition /dev/sdd for lvm data usage
- parted:
- device: /dev/sdd
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- label: gpt
- state: present
-
- - name: re-create partition /dev/sdd lvm journals
- parted:
- device: /dev/sdd
- number: 2
- part_start: 50%
- part_end: 100%
- unit: '%'
- state: present
- label: gpt
-
- - name: redeploy osd.2 using /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # osd.0 lv
- - name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: zap /dev/sdc1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: re-create partition /dev/sdc1
- parted:
- device: /dev/sdc
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- state: present
- label: gpt
-
- - name: prepare osd.0 again using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: activate all to start the previously prepared osd.0
- command: "ceph-volume lvm activate --filestore --all"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: node inventory
- command: "ceph-volume inventory"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: list all OSDs
- command: "ceph-volume lvm list"
- environment:
- CEPH_VOLUME_DEBUG: 1
+++ /dev/null
----
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
[tox]
-envlist = {centos7,xenial}-{filestore,bluestore}-{activate,dmcrypt_plain,dmcrypt_luks}
+envlist = centos7-{filestore,bluestore}-{activate,dmcrypt_plain,dmcrypt_luks}
skipsdist = True
[testenv]
changedir=
centos7-filestore-activate: {toxinidir}/centos7/filestore/activate
centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate
- xenial-filestore-activate: {toxinidir}/xenial/filestore/activate
- xenial-bluestore-activate: {toxinidir}/xenial/bluestore/activate
- xenial-bluestore-dmcrypt_plain: {toxinidir}/xenial/bluestore/dmcrypt-plain
- xenial-bluestore-dmcrypt_luks: {toxinidir}/xenial/bluestore/dmcrypt-luks
- xenial-filestore-dmcrypt_plain: {toxinidir}/xenial/filestore/dmcrypt-plain
- xenial-filestore-dmcrypt_luks: {toxinidir}/xenial/filestore/dmcrypt-luks
centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain
centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks
centos7-filestore-dmcrypt_plain: {toxinidir}/centos7/filestore/dmcrypt-plain
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
+++ /dev/null
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
+++ /dev/null
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: list all OSD directories
- find:
- paths: /var/lib/ceph/osd
- file_type: directory
- register: osd_paths
-
- - name: scan all OSD directories
- command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_paths.files }}"
-
- - name: list all OSD JSON files
- find:
- paths: /etc/ceph/osd
- file_type: file
- register: osd_configs
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_configs.files }}"
+++ /dev/null
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
- osd:
- osd_dmcrypt_type: luks
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
+++ /dev/null
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
+++ /dev/null
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: list all OSD directories
- find:
- paths: /var/lib/ceph/osd
- file_type: directory
- register: osd_paths
-
- - name: scan all OSD directories
- command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_paths.files }}"
-
- - name: list all OSD JSON files
- find:
- paths: /etc/ceph/osd
- file_type: file
- register: osd_configs
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_configs.files }}"
+++ /dev/null
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
- osd:
- osd_dmcrypt_type: plain
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
+++ /dev/null
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
+++ /dev/null
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: list all OSD directories
- find:
- paths: /var/lib/ceph/osd
- file_type: directory
- register: osd_paths
-
- - name: scan all OSD directories
- command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_paths.files }}"
-
- - name: list all OSD JSON files
- find:
- paths: /etc/ceph/osd
- file_type: file
- register: osd_configs
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_configs.files }}"
+++ /dev/null
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "filestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
+++ /dev/null
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
+++ /dev/null
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: scan all running OSDs
- command: "ceph-volume --cluster={{ cluster }} simple scan"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --all"
- environment:
- CEPH_VOLUME_DEBUG: 1
+++ /dev/null
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "filestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
- osd:
- osd_dmcrypt_type: luks
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
+++ /dev/null
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
+++ /dev/null
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: list all OSD directories
- find:
- paths: /var/lib/ceph/osd
- file_type: directory
- register: osd_paths
-
- - name: scan all OSD directories
- command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_paths.files }}"
-
- - name: list all OSD JSON files
- find:
- paths: /etc/ceph/osd
- file_type: file
- register: osd_configs
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_configs.files }}"
+++ /dev/null
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "filestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
- osd:
- osd_dmcrypt_type: plain
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
+++ /dev/null
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
+++ /dev/null
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: list all OSD directories
- find:
- paths: /var/lib/ceph/osd
- file_type: directory
- register: osd_paths
-
- - name: scan all OSD directories
- command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_paths.files }}"
-
- - name: list all OSD JSON files
- find:
- paths: /etc/ceph/osd
- file_type: file
- register: osd_configs
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_configs.files }}"
+++ /dev/null
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: ceph/ubuntu-xenial
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-