From: James Saint-Rossy Date: Tue, 9 Aug 2016 22:57:41 +0000 (-0400) Subject: ceph_devices.yml cleanup and optimization X-Git-Tag: v1.0.6~18^2~2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=5f61ff786719f45711a98bc1522a17b42fb33198;p=ceph-ansible.git ceph_devices.yml cleanup and optimization --- diff --git a/roles/ceph-osd/tasks/check_devices.yml b/roles/ceph-osd/tasks/check_devices.yml index 8bb55f4ec..72dfa90e5 100644 --- a/roles/ceph-osd/tasks/check_devices.yml +++ b/roles/ceph-osd/tasks/check_devices.yml @@ -10,129 +10,26 @@ # allow 2-digit partition numbers so fast SSDs can be shared by > 9 disks # for SSD journals. -- name: check if the device is a partition - shell: "echo '{{ item }}' | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}$'" - with_items: devices - changed_when: false - failed_when: false - register: ispartition +- include: ./check_devices_static.yml when: not osd_auto_discovery -- name: check if the device is a partition (autodiscover disks) - shell: "echo '/dev/{{ item.key }}' | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}$'" - with_dict: ansible_devices - changed_when: false - failed_when: false - register: ispartition_autodiscover - when: - - ansible_devices is defined - - item.value.removable == "0" - - osd_auto_discovery - -# NOTE (leseb): we must do this because of -# https://github.com/ansible/ansible/issues/4297 -- name: combine ispartition results - set_fact: - combined_ispartition_results: "{{ ispartition if not osd_auto_discovery else ispartition_autodiscover }}" - -- name: check the partition status of the osd disks - shell: "parted --script {{ item }} print > /dev/null 2>&1" - with_items: devices - changed_when: false - failed_when: false - register: osd_partition_status - when: - - journal_collocation or raw_multi_journal - - not osd_auto_discovery - -- name: check the partition status of the osd disks (autodiscover disks) - shell: "parted --script /dev/{{ item.key }} print > /dev/null 2>&1" - with_dict: ansible_devices - changed_when: false - failed_when: false - register: osd_partition_status_autodiscover - when: - - journal_collocation or raw_multi_journal - - ansible_devices is defined - - item.value.removable == "0" - - item.value.partitions|count == 0 - - osd_auto_discovery - -# NOTE (leseb): we must do this because of -# https://github.com/ansible/ansible/issues/4297 -- name: combine osd_partition_status results - set_fact: - combined_osd_partition_status_results: "{{ osd_partition_status if not osd_auto_discovery else osd_partition_status_autodiscover }}" +- include: ./check_devices_auto.yml + when: osd_auto_discovery - name: check the partition status of the journal devices shell: "parted --script {{ item }} print > /dev/null 2>&1" - with_items: raw_journal_devices + with_items: '{{ raw_journal_devices }}' changed_when: false failed_when: false register: journal_partition_status when: raw_multi_journal -# NOTE: The following calls to sgdisk are retried because sgdisk is known to -# fully wipe a device the first time around. There is no need to halt execution -# of zapping the whole device so these try again. It is easier to use `||` to -# keep the current flow of the task. -# See: https://github.com/ceph/ceph-ansible/issues/759 -- name: fix partitions gpt header or labels of the osd disks - shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}" - with_together: - - combined_osd_partition_status_results.results - - devices - changed_when: false - when: - - journal_collocation or raw_multi_journal - - not osd_auto_discovery - - item.0.rc != 0 - -- name: fix partitions gpt header or labels of the osd disks (autodiscover disks) - shell: "sgdisk --zap-all --clear --mbrtogpt -- '/dev/{{ item.0.item.key }}' || sgdisk --zap-all --clear --mbrtogpt -- '/dev/{{ item.0.item.key }}'" - with_together: - - combined_osd_partition_status_results.results - - ansible_devices - changed_when: false - when: - - journal_collocation - - osd_auto_discovery - - ansible_devices is defined - - item.0.item.value.removable == "0" - - item.0.item.value.partitions|count == 0 - - item.0.rc != 0 - - name: fix partitions gpt header or labels of the journal devices shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}" with_together: - - journal_partition_status.results - - raw_journal_devices + - '{{ journal_partition_status.results }}' + - '{{ raw_journal_devices }}' changed_when: false when: - raw_multi_journal - item.0.rc != 0 - -- name: check if a partition named 'ceph' exists - shell: "parted --script {{ item }} print | egrep -sq '^ 1.*ceph'" - with_items: devices - changed_when: false - failed_when: false - register: parted - when: not osd_auto_discovery - -- name: check if a partition named 'ceph' exists (autodiscover disks) - shell: "parted --script /dev/{{ item.key }} print | egrep -sq '^ 1.*ceph'" - with_dict: ansible_devices - changed_when: false - failed_when: false - register: parted_autodiscover - when: - - ansible_devices is defined - - item.value.removable == "0" - - osd_auto_discovery - -# NOTE (leseb): we must do this because of -# https://github.com/ansible/ansible/issues/4297 -- name: combine parted results - set_fact: - combined_parted_results: "{{ parted if not osd_auto_discovery else parted_autodiscover }}" diff --git a/roles/ceph-osd/tasks/check_devices_auto.yml b/roles/ceph-osd/tasks/check_devices_auto.yml new file mode 100644 index 000000000..d44ae5e60 --- /dev/null +++ b/roles/ceph-osd/tasks/check_devices_auto.yml @@ -0,0 +1,45 @@ +--- +- name: check if the device is a partition (autodiscover disks) + shell: "echo '/dev/{{ item.key }}' | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}$'" + with_dict: '{{ ansible_devices }}' + changed_when: false + failed_when: false + register: '{{ combined_ispartition_results }}' + when: + - ansible_devices is defined + - item.value.removable == "0" + +- name: check the partition status of the osd disks (autodiscover disks) + shell: "parted --script /dev/{{ item.key }} print > /dev/null 2>&1" + with_dict: '{{ ansible_devices }}' + changed_when: false + failed_when: false + register: combined_osd_partition_status_results + when: + - journal_collocation or raw_multi_journal + - ansible_devices is defined + - item.value.removable == "0" + - item.value.partitions|count == 0 + +- name: fix partitions gpt header or labels of the osd disks (autodiscover disks) + shell: "sgdisk --zap-all --clear --mbrtogpt -- '/dev/{{ item.0.item.key }}' || sgdisk --zap-all --clear --mbrtogpt -- '/dev/{{ item.0.item.key }}'" + with_together: + - '{{ combined_osd_partition_status_results.results }}' + - '{{ ansible_devices }}' + changed_when: false + when: + - journal_collocation + - ansible_devices is defined + - item.0.item.value.removable == "0" + - item.0.item.value.partitions|count == 0 + - item.0.rc != 0 + +- name: check if a partition named 'ceph' exists (autodiscover disks) + shell: "parted --script /dev/{{ item.key }} print | egrep -sq '^ 1.*ceph'" + with_dict: '{{ ansible_devices }}' + changed_when: false + failed_when: false + register: combined_parted_results + when: + - ansible_devices is defined + - item.value.removable == "0" diff --git a/roles/ceph-osd/tasks/check_devices_static.yml b/roles/ceph-osd/tasks/check_devices_static.yml new file mode 100644 index 000000000..1de6067d9 --- /dev/null +++ b/roles/ceph-osd/tasks/check_devices_static.yml @@ -0,0 +1,38 @@ +--- +- name: check if the device is a partition + shell: "echo '{{ item }}' | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}$'" + with_items: '{{ devices }}' + changed_when: false + failed_when: false + register: combined_ispartition_results + +- name: check the partition status of the osd disks + shell: "parted --script {{ item }} print > /dev/null 2>&1" + with_items: '{{ devices }}' + changed_when: false + failed_when: false + register: combined_osd_partition_status_results + when: + - journal_collocation or raw_multi_journal + +# NOTE: The following calls to sgdisk are retried because sgdisk is known to +# fully wipe a device the first time around. There is no need to halt execution +# of zapping the whole device so these try again. It is easier to use `||` to +# keep the current flow of the task. +# See: https://github.com/ceph/ceph-ansible/issues/759 +- name: fix partitions gpt header or labels of the osd disks + shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}" + with_together: + - '{{ combined_osd_partition_status_results.results }}' + - '{{ devices }}' + changed_when: false + when: + - journal_collocation or raw_multi_journal + - item.0.rc != 0 + +- name: check if a partition named 'ceph' exists + shell: "parted --script {{ item }} print | egrep -sq '^ 1.*ceph'" + with_items: '{{ devices }}' + changed_when: false + failed_when: false + register: combined_parted_results