--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/bluestore
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/setup_mixed_type.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_explicit.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_zap.yml
\ No newline at end of file
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/bluestore
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/setup_mixed_type.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_zap.yml
\ No newline at end of file
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/bluestore
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/setup_mixed_type.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_explicit.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_zap.yml
\ No newline at end of file
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/bluestore
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/setup_mixed_type.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_zap.yml
\ No newline at end of file
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/bluestore_single
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/noop.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_zap.yml
\ No newline at end of file
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/bluestore_single
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/noop.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_zap.yml
\ No newline at end of file
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
-../../../../../group_vars/bluestore
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_mixed_type.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_explicit.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_zap.yml
\ No newline at end of file
+++ /dev/null
-../../../../vagrant_variables.yml
\ No newline at end of file
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
-../../../../../group_vars/bluestore
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_mixed_type.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_zap.yml
\ No newline at end of file
+++ /dev/null
-../../../../vagrant_variables.yml
\ No newline at end of file
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
-../../../../../group_vars/bluestore
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_mixed_type.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_explicit.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_zap.yml
\ No newline at end of file
+++ /dev/null
-../../../../vagrant_variables.yml
\ No newline at end of file
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
-../../../../../group_vars/bluestore
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_mixed_type.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_zap.yml
\ No newline at end of file
+++ /dev/null
-../../../../vagrant_variables.yml
\ No newline at end of file
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
-../../../../../group_vars/bluestore_single
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/noop.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_zap.yml
\ No newline at end of file
+++ /dev/null
-../../../../vagrant_variables.yml
\ No newline at end of file
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
-../../../../../group_vars/bluestore_single
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/noop.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_zap.yml
\ No newline at end of file
+++ /dev/null
-../../../../vagrant_variables.yml
\ No newline at end of file
tasks:
- name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ command: "ceph osd down osd.{{ item }}"
with_items: "{{ osd_ids }}"
- name: purge osds
- command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ command: "ceph osd purge osd.{{ item }} --yes-i-really-mean-it"
with_items: "{{ osd_ids }}"
- hosts: osds
tasks:
- name: zap devices used for OSDs
- command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy"
+ command: "ceph-volume lvm zap {{ item }} --destroy"
with_items: "{{ devices }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: batch create devices again
- command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ command: "ceph-volume lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: ensure batch create is idempotent
- command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ command: "ceph-volume lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
register: batch_cmd
failed_when: false
environment:
- "'strategy changed' not in batch_cmd.stderr"
- name: run batch --report to see if devices get filtered
- command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ command: "ceph-volume lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
register: report_cmd
failed_when: false
environment:
tasks:
- name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ command: "ceph osd down osd.{{ item }}"
with_items: "{{ osd_ids }}"
- name: purge osds
- command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ command: "ceph osd purge osd.{{ item }} --yes-i-really-mean-it"
with_items: "{{ osd_ids }}"
- hosts: osds
tasks:
- name: zap devices used for OSDs
- command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy"
+ command: "ceph-volume lvm zap {{ item }} --destroy"
with_items: "{{ devices }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: batch create devices again
- command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ command: "ceph-volume lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: ensure batch create is idempotent when all data devices are filtered
- command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ command: "ceph-volume lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
register: batch_cmd
failed_when: false
environment:
- batch_cmd.rc != 0
- name: run batch --report to see if devices get filtered
- command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ command: "ceph-volume lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
register: report_cmd
failed_when: false
environment:
tasks:
- name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ command: "ceph osd down osd.{{ item }}"
with_items: "{{ osd_ids }}"
- name: purge osds
- command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ command: "ceph osd purge osd.{{ item }} --yes-i-really-mean-it"
with_items: "{{ osd_ids }}"
tasks:
- name: zap devices used for OSDs
- command: "ceph-volume --cluster {{ cluster }} lvm zap --osd-id {{ item }} --destroy"
+ command: "ceph-volume lvm zap --osd-id {{ item }} --destroy"
with_items: "{{ osd_ids }}"
environment:
CEPH_VOLUME_DEBUG: 1
[tox]
-envlist = centos8-bluestore-{single_type,single_type_dmcrypt,mixed_type,mixed_type_dmcrypt,mixed_type_explicit,mixed_type_dmcrypt_explicit}
+envlist = centos-bluestore-{single_type,single_type_dmcrypt,mixed_type,mixed_type_dmcrypt,mixed_type_explicit,mixed_type_dmcrypt_explicit}
skipsdist = True
[testenv]
VAGRANT_CWD = {changedir}
CEPH_VOLUME_DEBUG = 1
DEBIAN_FRONTEND=noninteractive
+ ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections
+ CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
changedir=
- centos8-bluestore-single_type: {toxinidir}/centos8/bluestore/single-type
- centos8-bluestore-single_type_dmcrypt: {toxinidir}/centos8/bluestore/single-type-dmcrypt
- centos8-bluestore-mixed_type: {toxinidir}/centos8/bluestore/mixed-type
- centos8-bluestore-mixed_type_dmcrypt: {toxinidir}/centos8/bluestore/mixed-type-dmcrypt
- centos8-bluestore-mixed_type_explicit: {toxinidir}/centos8/bluestore/mixed-type-explicit
- centos8-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos8/bluestore/mixed-type-dmcrypt-explicit
+ centos-bluestore-single_type: {toxinidir}/centos/bluestore/single-type
+ centos-bluestore-single_type_dmcrypt: {toxinidir}/centos/bluestore/single-type-dmcrypt
+ centos-bluestore-mixed_type: {toxinidir}/centos/bluestore/mixed-type
+ centos-bluestore-mixed_type_dmcrypt: {toxinidir}/centos/bluestore/mixed-type-dmcrypt
+ centos-bluestore-mixed_type_explicit: {toxinidir}/centos/bluestore/mixed-type-explicit
+ centos-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos/bluestore/mixed-type-dmcrypt-explicit
commands=
git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
python -m pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
- ansible-galaxy install -r {envdir}/tmp/ceph-ansible/requirements.yml -v
+ ansible-galaxy collection install -r {envdir}/tmp/ceph-ansible/requirements.yml -v -p {envdir}/ansible_collections
# bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:""} {posargs:--provider=virtualbox}
bash {toxinidir}/../scripts/vagrant_up.sh {posargs:--provider=virtualbox}
# use ceph-ansible to deploy a ceph cluster on the vms
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
- # prepare nodes for testing with testinfra
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
-
# test cluster state using testinfra
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
---
ceph_dev: True
-cluster: test
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
osd_objectstore: "bluestore"
osd_scenario: lvm
num_osds: 2
---
ceph_dev: True
-cluster: test
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
dmcrypt: True
ceph_dev: True
-cluster: test
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
---
ceph_dev: True
-cluster: test
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
osd_objectstore: "bluestore"
osd_scenario: lvm
ceph_origin: 'repository'
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/bluestore_lvm
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/setup_partitions.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_bluestore.yml
\ No newline at end of file
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
--- /dev/null
+../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../../../group_vars/bluestore_lvm_dmcrypt
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
--- /dev/null
+../../../playbooks/setup_partitions.yml
\ No newline at end of file
--- /dev/null
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+- hosts: mons
+ become: yes
+ tasks:
+ - name: mark osds down
+ command: "ceph osd down osd.{{ item }}"
+ with_items:
+ - 0
+ - 2
+
+ - name: destroy osd.2
+ command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+ - name: destroy osd.0
+ command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/vdd1
+ command: "ceph-volume lvm zap /dev/vdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/vdd for lvm data usage
+ parted:
+ device: /dev/vdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: redeploy osd.2 using /dev/vdd1
+ command: "ceph-volume lvm create --bluestore --data /dev/vdd1 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 lv
+ - name: zap test_group/data-lv1
+ command: "ceph-volume lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: redeploy osd.0 using test_group/data-lv1
+ command: "ceph-volume lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+ - name: mark osds down
+ command: "ceph osd down osd.0"
+
+ - name: destroy osd.0
+ command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+
+ - name: zap test_group/data-lv1
+ command: "ceph-volume lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: prepare osd.0 using test_group/data-lv1
+ command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
--- /dev/null
+../../../../vagrant_variables.yml
\ No newline at end of file
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
-../../../../../group_vars/bluestore_lvm
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_bluestore.yml
\ No newline at end of file
+++ /dev/null
-../../../../vagrant_variables.yml
\ No newline at end of file
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
-../../../../../group_vars/bluestore_lvm_dmcrypt
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-- hosts: osds
- become: yes
- tasks:
-
- - name: stop ceph-osd@2 daemon
- service:
- name: ceph-osd@2
- state: stopped
-
- - name: stop ceph-osd@0 daemon
- service:
- name: ceph-osd@0
- state: stopped
-
-- hosts: mons
- become: yes
- tasks:
- - name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
- with_items:
- - 0
- - 2
-
- - name: destroy osd.2
- command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
- register: result
- retries: 30
- delay: 1
- until: result is succeeded
-
- - name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
- register: result
- retries: 30
- delay: 1
- until: result is succeeded
-
-- hosts: osds
- become: yes
- tasks:
-
- # osd.2 device
- - name: zap /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # partitions have been completely removed, so re-create them again
- - name: re-create partition /dev/vdd for lvm data usage
- parted:
- device: /dev/vdd
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- label: gpt
- state: present
-
- - name: redeploy osd.2 using /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # osd.0 lv
- - name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: redeploy osd.0 using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: stop ceph-osd@0 daemon
- service:
- name: ceph-osd@0
- state: stopped
-
-
-- hosts: mons
- become: yes
- tasks:
- - name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.0"
-
- - name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
- register: result
- retries: 30
- delay: 1
- until: result is succeeded
-
-
-- hosts: osds
- become: yes
- tasks:
-
-
- - name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: prepare osd.0 using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: activate all to start the previously prepared osd.0
- command: "ceph-volume lvm activate --all"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: node inventory
- command: "ceph-volume inventory"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: list all OSDs
- command: "ceph-volume lvm list"
- environment:
- CEPH_VOLUME_DEBUG: 1
+++ /dev/null
-../../../../vagrant_variables.yml
\ No newline at end of file
become: yes
tasks:
- name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ command: "ceph osd down osd.{{ item }}"
with_items:
- 0
- 2
- name: destroy osd.2
- command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+ command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
until: result is succeeded
- name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+ command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
# osd.2 device
- name: zap /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
+ command: "ceph-volume lvm zap /dev/vdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
state: present
- name: redeploy osd.2 using /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
+ command: "ceph-volume lvm create --bluestore --data /dev/vdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.0 device (zap without --destroy that removes the LV)
- name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ command: "ceph-volume lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: prepare osd.0 again using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
# zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
- name: zap test_zap/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
+ command: "ceph-volume lvm zap --destroy test_zap/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: zap test_zap/data-lv2
- command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
+ command: "ceph-volume lvm zap --destroy test_zap/data-lv2"
environment:
CEPH_VOLUME_DEBUG: 1
[tox]
-envlist = centos8-bluestore-{create,prepare_activate,dmcrypt}
+envlist = centos-bluestore-{create,prepare_activate,dmcrypt}
skipsdist = True
[testenv]
VAGRANT_CWD = {changedir}
CEPH_VOLUME_DEBUG = 1
DEBIAN_FRONTEND=noninteractive
+ ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections
+ CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
changedir=
# plain/unencrypted
- centos8-bluestore-create: {toxinidir}/centos8/bluestore/create
+ centos-bluestore-create: {toxinidir}/centos/bluestore/create
# dmcrypt
- centos8-bluestore-dmcrypt: {toxinidir}/centos8/bluestore/dmcrypt
+ centos-bluestore-dmcrypt: {toxinidir}/centos/bluestore/dmcrypt
# TODO: these are placeholders for now, eventually we want to
# test the prepare/activate workflow of ceph-volume as well
- centos8-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
+ centos-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
commands=
- git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
+ git clone -b {env:CEPH_ANSIBLE_BRANCH:main} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
- ansible-galaxy install -r {envdir}/tmp/ceph-ansible/requirements.yml -v
+ ansible-galaxy collection install -r {envdir}/tmp/ceph-ansible/requirements.yml -v -p {envdir}/ansible_collections
bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
# use ceph-ansible to deploy a ceph cluster on the vms
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
-
- # prepare nodes for testing with testinfra
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
# test cluster state using testinfra
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
DEBIAN_FRONTEND: noninteractive
pre_tasks:
- # If we can't get python2 installed before any module is used we will fail
- # so just try what we can to get it installed
- - name: check for python2
- stat:
- path: /usr/bin/python
- ignore_errors: yes
- register: systempython2
-
- - name: install python2 for debian based systems
- raw: sudo apt-get -y install python-simplejson
- ignore_errors: yes
- when:
- - systempython2.stat is undefined or systempython2.stat.exists == false
-
# Ansible will try to auto-install python-apt, in some systems this might be
# python3-apt, or python-apt, and it has caused whole runs to fail because
# it is trying to do an interactive prompt
- python-apt
- aptitude
- - name: install python2 for fedora
- raw: sudo dnf -y install python creates=/usr/bin/python
- ignore_errors: yes
- when:
- - systempython2.stat is undefined or systempython2.stat.exists == false
-
- - name: install python2 for opensuse
- raw: sudo zypper -n install python-base creates=/usr/bin/python2.7
- ignore_errors: yes
- when:
- - systempython2.stat is undefined or systempython2.stat.exists == false
-
- name: gather facts
setup:
when:
state: latest
when: not is_atomic | bool
+ - name: install net-tools
+ package:
+ name: net-tools
+ state: present
+ when: not is_atomic | bool
+
- name: update the system
command: dnf update -y
changed_when: false
set -e
+CEPH_ANSIBLE_VAGRANT_BOX="${CEPH_ANSIBLE_VAGRANT_BOX:-centos/stream9}"
+
+if [[ "${CEPH_ANSIBLE_VAGRANT_BOX}" =~ "centos/stream" ]]; then
+ EL_VERSION="${CEPH_ANSIBLE_VAGRANT_BOX: -1}"
+ LATEST_IMAGE="$(curl -s https://cloud.centos.org/centos/${EL_VERSION}-stream/x86_64/images/CHECKSUM | sed -nE 's/^SHA256.*\((.*-([0-9]+).*vagrant-libvirt.box)\).*$/\1/p' | sort -u | tail -n1)"
+ vagrant box remove "${CEPH_ANSIBLE_VAGRANT_BOX}" --all --force || true
+ vagrant box add --force --provider libvirt --name "${CEPH_ANSIBLE_VAGRANT_BOX}" "https://cloud.centos.org/centos/${EL_VERSION}-stream/x86_64/images/${LATEST_IMAGE}" --force
+fi
+
retries=0
until [ $retries -ge 5 ]
do
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
+++ /dev/null
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
+++ /dev/null
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: list all OSD directories
- find:
- paths: /var/lib/ceph/osd
- file_type: directory
- register: osd_paths
-
- - name: scan all OSD directories
- command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_paths.files }}"
-
- - name: list all OSD JSON files
- find:
- paths: /etc/ceph/osd
- file_type: file
- register: osd_configs
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_configs.files }}"
+++ /dev/null
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: centos/7
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
- osd:
- osd_dmcrypt_type: luks
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
+++ /dev/null
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
+++ /dev/null
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: scan all running OSDs
- command: "ceph-volume --cluster={{ cluster }} simple scan"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --all"
- environment:
- CEPH_VOLUME_DEBUG: 1
+++ /dev/null
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: centos/7
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
- osd:
- osd_dmcrypt_type: plain
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
+++ /dev/null
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
+++ /dev/null
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
+++ /dev/null
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: list all OSD directories
- find:
- paths: /var/lib/ceph/osd
- file_type: directory
- register: osd_paths
-
- - name: scan all OSD directories
- command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_paths.files }}"
-
- - name: list all OSD JSON files
- find:
- paths: /etc/ceph/osd
- file_type: file
- register: osd_configs
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_configs.files }}"
+++ /dev/null
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: centos/7
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
+++ /dev/null
-[tox]
-envlist = centos7-bluestore-{activate,dmcrypt_plain,dmcrypt_luks}
-skipsdist = True
-
-[testenv]
-deps = mock
-allowlist_externals =
- vagrant
- bash
- git
- sleep
- cp
-passenv=*
-setenv=
- ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg
- ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
- ANSIBLE_STDOUT_CALLBACK = debug
- VAGRANT_CWD = {changedir}
- CEPH_VOLUME_DEBUG = 1
- DEBIAN_FRONTEND=noninteractive
-changedir=
- centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate
- centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain
- centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks
-commands=
- git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
- pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
- ansible-galaxy install -r {envdir}/tmp/ceph-ansible/requirements.yml -v
-
- bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
- bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
-
- cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
-
- # use ceph-ansible to deploy a ceph cluster on the vms
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
-
- # prepare nodes for testing with testinfra
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
-
- # test cluster state testinfra
- py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
-
- # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
- ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
-
- # reboot all vms
- bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
-
- # wait 2 minutes for services to be ready
- sleep 120
-
- # retest to ensure cluster came back up correctly after rebooting
- py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
-
- vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}