]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume tests.functional add bluestore,filestore playbooks
authorAlfredo Deza <adeza@redhat.com>
Thu, 9 Aug 2018 20:49:43 +0000 (16:49 -0400)
committerAlfredo Deza <adeza@redhat.com>
Sat, 11 Aug 2018 22:48:43 +0000 (18:48 -0400)
Signed-off-by: Alfredo Deza <adeza@redhat.com>
src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml [new file with mode: 0644]
src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml [new file with mode: 0644]
src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml [new file with mode: 0644]
src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml [new file with mode: 0644]
src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini [new file with mode: 0644]

diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore.yml
new file mode 100644 (file)
index 0000000..85c702e
--- /dev/null
@@ -0,0 +1,46 @@
+
+- hosts: osds
+  become: yes
+  tasks:
+
+    - name: stop ceph-osd@1 daemon
+      service:
+        name: ceph-osd@1
+        state: stopped
+
+    - name: stop ceph-osd@0 daemon
+      service:
+        name: ceph-osd@0
+        state: stopped
+
+
+- hosts: mons
+  become: yes
+  tasks:
+
+    - name: destroy osd.1
+      command: "ceph osd purge osd.1 --yes-i-really-mean-it"
+
+    - name: destroy osd.0
+      command: "ceph osd purge osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+  become: yes
+  tasks:
+
+    - name: zap /dev/sdd
+      command: "ceph-volume lvm zap /dev/sdb --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+
+    - name: zap /dev/sdc
+      command: "ceph-volume lvm zap /dev/sdc --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+    - name: batch create /dev/sdb and /dev/sdc again
+      command: "ceph-volume lvm batch --yes --bluestore /dev/sdb /dev/sdc"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_bluestore_dmcrypt.yml
new file mode 100644 (file)
index 0000000..9e1a73f
--- /dev/null
@@ -0,0 +1,46 @@
+
+- hosts: osds
+  become: yes
+  tasks:
+
+    - name: stop ceph-osd@1 daemon
+      service:
+        name: ceph-osd@1
+        state: stopped
+
+    - name: stop ceph-osd@0 daemon
+      service:
+        name: ceph-osd@0
+        state: stopped
+
+
+- hosts: mons
+  become: yes
+  tasks:
+
+    - name: destroy osd.1
+      command: "ceph osd purge osd.1 --yes-i-really-mean-it"
+
+    - name: destroy osd.0
+      command: "ceph osd purge osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+  become: yes
+  tasks:
+
+    - name: zap /dev/sdd
+      command: "ceph-volume lvm zap /dev/sdb --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+
+    - name: zap /dev/sdc
+      command: "ceph-volume lvm zap /dev/sdc --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+    - name: batch create /dev/sdb and /dev/sdc again
+      command: "ceph-volume lvm batch --yes --bluestore --dmcrypt /dev/sdb /dev/sdc"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore.yml
new file mode 100644 (file)
index 0000000..95909f9
--- /dev/null
@@ -0,0 +1,46 @@
+
+- hosts: osds
+  become: yes
+  tasks:
+
+    - name: stop ceph-osd@1 daemon
+      service:
+        name: ceph-osd@1
+        state: stopped
+
+    - name: stop ceph-osd@0 daemon
+      service:
+        name: ceph-osd@0
+        state: stopped
+
+
+- hosts: mons
+  become: yes
+  tasks:
+
+    - name: destroy osd.1
+      command: "ceph osd purge osd.1 --yes-i-really-mean-it"
+
+    - name: destroy osd.0
+      command: "ceph osd purge osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+  become: yes
+  tasks:
+
+    - name: zap /dev/sdd
+      command: "ceph-volume lvm zap /dev/sdb --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+
+    - name: zap /dev/sdc
+      command: "ceph-volume lvm zap /dev/sdc --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+    - name: batch create /dev/sdb and /dev/sdc again
+      command: "ceph-volume lvm batch --yes --filestore /dev/sdb /dev/sdc"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_filestore_dmcrypt.yml
new file mode 100644 (file)
index 0000000..81f84e9
--- /dev/null
@@ -0,0 +1,46 @@
+
+- hosts: osds
+  become: yes
+  tasks:
+
+    - name: stop ceph-osd@1 daemon
+      service:
+        name: ceph-osd@1
+        state: stopped
+
+    - name: stop ceph-osd@0 daemon
+      service:
+        name: ceph-osd@0
+        state: stopped
+
+
+- hosts: mons
+  become: yes
+  tasks:
+
+    - name: destroy osd.1
+      command: "ceph osd purge osd.1 --yes-i-really-mean-it"
+
+    - name: destroy osd.0
+      command: "ceph osd purge osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+  become: yes
+  tasks:
+
+    - name: zap /dev/sdd
+      command: "ceph-volume lvm zap /dev/sdb --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+
+    - name: zap /dev/sdc
+      command: "ceph-volume lvm zap /dev/sdc --destroy"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+
+    - name: batch create /dev/sdb and /dev/sdc again
+      command: "ceph-volume lvm batch --yes --filestore --dmcrypt /dev/sdb /dev/sdc"
+      environment:
+        CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
new file mode 100644 (file)
index 0000000..f3924f5
--- /dev/null
@@ -0,0 +1,56 @@
+[tox]
+envlist = {centos7,xenial}-bluestore-{single-type,single-type-dmcrypt}
+skipsdist = True
+
+[testenv]
+whitelist_externals =
+    vagrant
+    bash
+    git
+passenv=*
+setenv=
+  ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
+  ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
+  ANSIBLE_STDOUT_CALLBACK = debug
+  ANSIBLE_RETRY_FILES_ENABLED = False
+  ANSIBLE_SSH_RETRIES = 5
+  VAGRANT_CWD = {changedir}
+  CEPH_VOLUME_DEBUG = 1
+deps=
+  ansible~=2.6,<2.7
+  testinfra
+  pytest-xdist
+  notario>=0.0.13
+changedir=
+  centos7-filestore-single-type: {toxinidir}/centos7/filestore/single-type
+  centos7-bluestore-single-type-dmcrypt: {toxinidir}/centos7/bluestore/single-type-dmcrypt
+  xenial-filestore-single-type: {toxinidir}/xenial/filestore/single-type
+  xenial-bluestore-single-type-dmcrypt: {toxinidir}/xenial/bluestore/single-type-dmcrypt
+commands=
+  git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+
+  vagrant up {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+  bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
+
+  # use ceph-ansible to deploy a ceph cluster on the vms
+  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/site.yml.sample --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}"
+
+  # prepare nodes for testing with testinfra
+  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+
+  # test cluster state using ceph-ansible tests
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+
+  # reboot all vms - attempt
+  bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+
+  # retest to ensure cluster came back up correctly after rebooting
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+
+  # destroy an OSD, zap it's device and recreate it using it's ID
+  ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
+
+  # retest to ensure cluster came back up correctly
+  testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+
+  vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}