--- /dev/null
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+ - data: /dev/vdd1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
--- /dev/null
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+ - data: /dev/vdd1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
--- /dev/null
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
+lvm_volumes:
+ - data: data-lv1
+ journal: /dev/vdc1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ journal: journal1
+ data_vg: test_group
+ journal_vg: journals
+ - data: /dev/vdd1
+ journal: /dev/vdd2
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
--- /dev/null
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
+lvm_volumes:
+ - data: data-lv1
+ journal: /dev/vdc1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ journal: journal1
+ data_vg: test_group
+ journal_vg: journals
+ - data: /dev/vdd1
+ journal: /dev/vdd2
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-lvm_volumes:
- - data: data-lv1
- data_vg: test_group
- crush_device_class: test
- - data: data-lv2
- data_vg: test_group
- db: journal1
- db_vg: journals
- - data: /dev/sdd1
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_bluestore.yml
\ No newline at end of file
+++ /dev/null
----
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: centos/7
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-lvm_volumes:
- - data: data-lv1
- data_vg: test_group
- crush_device_class: test
- - data: data-lv2
- data_vg: test_group
- db: journal1
- db_vg: journals
- - data: /dev/sdd1
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-- hosts: osds
- become: yes
- tasks:
-
- - name: stop ceph-osd@2 daemon
- service:
- name: ceph-osd@2
- state: stopped
-
- - name: stop ceph-osd@0 daemon
- service:
- name: ceph-osd@0
- state: stopped
-
-- hosts: mons
- become: yes
- tasks:
-
- - name: destroy osd.2
- command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
-
- - name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
-
-- hosts: osds
- become: yes
- tasks:
-
- # osd.2 device
- - name: zap /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # partitions have been completely removed, so re-create them again
- - name: re-create partition /dev/sdd for lvm data usage
- parted:
- device: /dev/sdd
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- label: gpt
- state: present
-
- - name: redeploy osd.2 using /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # osd.0 lv
- - name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: redeploy osd.0 using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: stop ceph-osd@0 daemon
- service:
- name: ceph-osd@0
- state: stopped
-
-
-- hosts: mons
- become: yes
- tasks:
-
- - name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
-
-
-- hosts: osds
- become: yes
- tasks:
-
-
- - name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: prepare osd.0 using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: activate all to start the previously prepared osd.0
- command: "ceph-volume lvm activate --all"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: node inventory
- command: "ceph-volume inventory"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: list all OSDs
- command: "ceph-volume lvm list"
- environment:
- CEPH_VOLUME_DEBUG: 1
+++ /dev/null
----
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: centos/7
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "filestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
-lvm_volumes:
- - data: data-lv1
- journal: /dev/sdc1
- data_vg: test_group
- crush_device_class: test
- - data: data-lv2
- journal: journal1
- data_vg: test_group
- journal_vg: journals
- - data: /dev/sdd1
- journal: /dev/sdd2
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_filestore.yml
\ No newline at end of file
+++ /dev/null
----
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: centos/7
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.3.0/24"
-cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "filestore"
-osd_scenario: lvm
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
-lvm_volumes:
- - data: data-lv1
- journal: /dev/sdc1
- data_vg: test_group
- crush_device_class: test
- - data: data-lv2
- journal: journal1
- data_vg: test_group
- journal_vg: journals
- - data: /dev/sdd1
- journal: /dev/sdd2
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: stop ceph-osd@2 daemon
- service:
- name: ceph-osd@2
- state: stopped
-
- - name: stop ceph-osd@0 daemon
- service:
- name: ceph-osd@0
- state: stopped
-
-
-- hosts: mons
- become: yes
- tasks:
-
- - name: destroy osd.2
- command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
-
- - name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
-
-
-- hosts: osds
- become: yes
- tasks:
-
- # osd.2 device
- - name: zap /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: zap /dev/sdd2
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # partitions have been completely removed, so re-create them again
- - name: re-create partition /dev/sdd for lvm data usage
- parted:
- device: /dev/sdd
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- label: gpt
- state: present
-
- - name: re-create partition /dev/sdd lvm journals
- parted:
- device: /dev/sdd
- number: 2
- part_start: 50%
- part_end: 100%
- unit: '%'
- state: present
- label: gpt
-
- - name: redeploy osd.2 using /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # osd.0 lv
- - name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: zap /dev/sdc1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: re-create partition /dev/sdc1
- parted:
- device: /dev/sdc
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- state: present
- label: gpt
-
- - name: prepare osd.0 again using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: activate all to start the previously prepared osd.0
- command: "ceph-volume lvm activate --filestore --all"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: node inventory
- command: "ceph-volume inventory"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: list all OSDs
- command: "ceph-volume lvm list"
- environment:
- CEPH_VOLUME_DEBUG: 1
+++ /dev/null
----
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 1
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.3
-cluster_subnet: 192.168.4
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: centos/7
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/bluestore_lvm
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/setup_partitions.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_bluestore.yml
\ No newline at end of file
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/bluestore_lvm_dmcrypt
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/setup_partitions.yml
\ No newline at end of file
--- /dev/null
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/vdd for lvm data usage
+ parted:
+ device: /dev/vdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: redeploy osd.2 using /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 lv
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: redeploy osd.0 using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: prepare osd.0 using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/filestore_lvm
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/setup_partitions.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_filestore.yml
\ No newline at end of file
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/filestore_lvm_dmcrypt
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/setup_partitions.yml
\ No newline at end of file
--- /dev/null
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap /dev/vdd2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/vdd for lvm data usage
+ parted:
+ device: /dev/vdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: re-create partition /dev/vdd lvm journals
+ parted:
+ device: /dev/vdd
+ number: 2
+ part_start: 50%
+ part_end: 100%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: redeploy osd.2 using /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 lv
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap /dev/vdc1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: re-create partition /dev/vdc1
+ parted:
+ device: /dev/vdc
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: prepare osd.0 again using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --filestore --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
become: yes
tasks:
- - name: partition /dev/sdd for lvm data usage
+ - name: partition /dev/vdd for lvm data usage
parted:
- device: /dev/sdd
+ device: /dev/vdd
number: 1
part_start: 0%
part_end: 50%
label: gpt
state: present
- - name: partition /dev/sdd lvm journals
+ - name: partition /dev/vdd lvm journals
parted:
- device: /dev/sdd
+ device: /dev/vdd
number: 2
part_start: 50%
part_end: 100%
tasks:
# osd.2 device
- - name: zap /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
+ - name: zap /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
# partitions have been completely removed, so re-create them again
- - name: re-create partition /dev/sdd for lvm data usage
+ - name: re-create partition /dev/vdd for lvm data usage
parted:
- device: /dev/sdd
+ device: /dev/vdd
number: 1
part_start: 0%
part_end: 50%
label: gpt
state: present
- - name: redeploy osd.2 using /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
+ - name: redeploy osd.2 using /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
tasks:
# osd.2 device
- - name: zap /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
+ - name: zap /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.2 journal
- - name: zap /dev/sdd2
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
+ - name: zap /dev/vdd2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
# partitions have been completely removed, so re-create them again
- - name: re-create partition /dev/sdd for lvm data usage
+ - name: re-create partition /dev/vdd for lvm data usage
parted:
- device: /dev/sdd
+ device: /dev/vdd
number: 1
part_start: 0%
part_end: 50%
label: gpt
state: present
- - name: re-create partition /dev/sdd lvm journals
+ - name: re-create partition /dev/vdd lvm journals
parted:
- device: /dev/sdd
+ device: /dev/vdd
number: 2
part_start: 50%
part_end: 100%
state: present
label: gpt
- - name: redeploy osd.2 using /dev/sdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
+ - name: redeploy osd.2 using /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_VOLUME_DEBUG: 1
# osd.0 journal device (zap without --destroy that removes the LV)
- - name: zap /dev/sdc1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1"
+ - name: zap /dev/vdc1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: prepare osd.0 again using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
[tox]
-envlist = centos7-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
+envlist = centos8-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
skipsdist = True
[testenv]
DEBIAN_FRONTEND=noninteractive
changedir=
# plain/unencrypted
- centos7-filestore-create: {toxinidir}/centos7/filestore/create
- centos7-bluestore-create: {toxinidir}/centos7/bluestore/create
+ centos8-filestore-create: {toxinidir}/centos8/filestore/create
+ centos8-bluestore-create: {toxinidir}/centos8/bluestore/create
# dmcrypt
- centos7-filestore-dmcrypt: {toxinidir}/centos7/filestore/dmcrypt
- centos7-bluestore-dmcrypt: {toxinidir}/centos7/bluestore/dmcrypt
+ centos8-filestore-dmcrypt: {toxinidir}/centos8/filestore/dmcrypt
+ centos8-bluestore-dmcrypt: {toxinidir}/centos8/bluestore/dmcrypt
# TODO: these are placeholders for now, eventually we want to
# test the prepare/activate workflow of ceph-volume as well
- centos7-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
- centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
+ centos8-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
+ centos8-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
commands=
git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt