Trying to add more clarity in the role's task structure.
Signed-off-by: leseb <seb@redhat.com>
+++ /dev/null
----
-- name: make sure an installation source was chosen
- fail: msg="choose an installation source or read https://github.com/ceph/ceph-ansible/wiki"
- when:
- not ceph_stable and
- not ceph_dev and
- not ceph_stable_ice and
- not ceph_stable_rh_storage
-
-- name: verify that a method was chosen for red hat storage
- fail: msg="choose between ceph_stable_rh_storage_cdn_install and ceph_stable_rh_storage_iso_install"
- when:
- ceph_stable_rh_storage and
- not ceph_stable_rh_storage_cdn_install and
- not ceph_stable_rh_storage_iso_install
-
-- name: make sure journal_size configured
- fail: msg="journal_size must be configured. See http://ceph.com/docs/master/rados/configuration/osd-config-ref/"
- when: journal_size|int == 0
-
-- name: make sure monitor_interface configured
- fail: msg="monitor_interface must be configured. Interface for the monitor to listen on"
- when: monitor_interface == 'interface'
-
-- name: make sure cluster_network configured
- fail: msg="cluster_network must be configured. Ceph replication network"
- when: cluster_network == '0.0.0.0/0'
-
-- name: make sure public_network configured
- fail: msg="public_network must be configured. Ceph public network"
- when: public_network == '0.0.0.0/0'
-
-- name: make sure an osd scenario was chosen
- fail: msg="please choose an osd scenario"
- when:
- osd_group_name is defined and
- not journal_collocation and
- not raw_multi_journal and
- not osd_directory
-
-- name: verify devices have been provided
- fail: msg="please provide devices to your osd scenario"
- when:
- osd_group_name is defined and
- (journal_collocation or raw_multi_journal) and
- devices is not defined
-
-- name: verify journal devices have been provided
- fail: msg="please provide devices to your osd scenario"
- when:
- osd_group_name is defined and
- raw_multi_journal and
- raw_journal_devices is not defined
-
-- name: verify directories have been provided
- fail: msg="please provide directories to your osd scenario"
- when:
- osd_group_name is defined and
- osd_group_name in group_names and
- osd_directory and
- osd_directories is not defined
--- /dev/null
+---
+- name: make sure an installation source was chosen
+ fail: msg="choose an installation source or read https://github.com/ceph/ceph-ansible/wiki"
+ when:
+ not ceph_stable and
+ not ceph_dev and
+ not ceph_stable_ice and
+ not ceph_stable_rh_storage
+
+- name: verify that a method was chosen for red hat storage
+ fail: msg="choose between ceph_stable_rh_storage_cdn_install and ceph_stable_rh_storage_iso_install"
+ when:
+ ceph_stable_rh_storage and
+ not ceph_stable_rh_storage_cdn_install and
+ not ceph_stable_rh_storage_iso_install
+
+- name: make sure journal_size configured
+ fail: msg="journal_size must be configured. See http://ceph.com/docs/master/rados/configuration/osd-config-ref/"
+ when: journal_size|int == 0
+
+- name: make sure monitor_interface configured
+ fail: msg="monitor_interface must be configured. Interface for the monitor to listen on"
+ when: monitor_interface == 'interface'
+
+- name: make sure cluster_network configured
+ fail: msg="cluster_network must be configured. Ceph replication network"
+ when: cluster_network == '0.0.0.0/0'
+
+- name: make sure public_network configured
+ fail: msg="public_network must be configured. Ceph public network"
+ when: public_network == '0.0.0.0/0'
+
+- name: make sure an osd scenario was chosen
+ fail: msg="please choose an osd scenario"
+ when:
+ osd_group_name is defined and
+ not journal_collocation and
+ not raw_multi_journal and
+ not osd_directory
+
+- name: verify devices have been provided
+ fail: msg="please provide devices to your osd scenario"
+ when:
+ osd_group_name is defined and
+ (journal_collocation or raw_multi_journal) and
+ devices is not defined
+
+- name: verify journal devices have been provided
+ fail: msg="please provide devices to your osd scenario"
+ when:
+ osd_group_name is defined and
+ raw_multi_journal and
+ raw_journal_devices is not defined
+
+- name: verify directories have been provided
+ fail: msg="please provide directories to your osd scenario"
+ when:
+ osd_group_name is defined and
+ osd_group_name in group_names and
+ osd_directory and
+ osd_directories is not defined
--- /dev/null
+---
+- name: fail on unsupported system
+ fail: "msg=System not supported {{ ansible_system }}"
+ when: "ansible_system not in ['Linux']"
+
+- name: fail on unsupported architecture
+ fail: "msg=Architecture not supported {{ ansible_architecture }}"
+ when: "ansible_architecture not in ['x86_64']"
+
+- name: fail on unsupported distribution
+ fail: "msg=Distribution not supported {{ ansible_os_family }}"
+ when: "ansible_os_family not in ['Debian', 'RedHat']"
+
+- name: fail on unsupported distribution for red hat storage
+ fail: "msg=Distribution not supported {{ ansible_distribution_version }} by Red Hat Storage, only RHEL 7.1"
+ when:
+ ceph_stable_rh_storage and
+ {{ ansible_distribution_version | version_compare('7.1', '<') }}
+++ /dev/null
----
-- name: install dependencies
- apt: >
- pkg={{ item }}
- state=present
- update_cache=yes
- cache_valid_time=3600
- with_items:
- - python-pycurl
- - ntp
- - hdparm
-
-- name: install the Ceph repository stable key
- apt_key: >
- data="{{ lookup('file', 'cephstable.asc') }}"
- state=present
- when: ceph_stable
-
-- name: install the Ceph development repository key
- apt_key: >
- data="{{ lookup('file', 'cephdev.asc') }}"
- state=present
- when: ceph_dev
-
-- name: install Intank Ceph Enterprise repository key
- apt_key: >
- data="{{ lookup('file', 'cephstableice.asc') }}"
- state=present
- when: ceph_stable_ice
-
-- name: add Ceph stable repository
- apt_repository: >
- repo="deb http://ceph.com/debian-{{ ceph_stable_release }}/ {{ ceph_stable_distro_source | default(ansible_lsb.codename) }} main"
- state=present
- changed_when: false
- when: ceph_stable
-
-- name: add Ceph development repository
- apt_repository: >
- repo="deb http://gitbuilder.ceph.com/ceph-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/{{ ceph_dev_branch }} {{ ansible_lsb.codename }} main"
- state=present
- changed_when: false
- when: ceph_dev
-
-- name: add Inktank Ceph Enterprise repository
- apt_repository: >
- repo="deb file://{{ ceph_stable_ice_temp_path }} {{ ansible_lsb.codename }} main"
- state=present
- changed_when: false
- when: ceph_stable_ice
-
-- name: install Ceph
- apt: >
- pkg={{ item }}
- state=present
- with_items:
- - ceph
- - ceph-common #|
- - ceph-fs-common #|--> yes, they are already all dependencies from 'ceph'
- - ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
- - ceph-mds #|--> they don't get update so we need to force them
- - libcephfs1 #|
-
-- name: configure rbd clients directories
- file: >
- path={{ item }}
- state=directory
- owner=libvirt-qemu
- group=kvm
- mode=0755
- with_items:
- - rbd_client_log_path
- - rbd_client_admin_socket_path
- when: rbd_client_directories
+++ /dev/null
----
-- name: install dependencies
- yum: >
- name={{ item }}
- state=present
- with_items:
- - python-pycurl
- - ntp
- - hdparm
- - yum-plugin-priorities.noarch
- - epel-release
-
-- name: install the Ceph stable repository key
- rpm_key: >
- key={{ ceph_stable_key }}
- state=present
- when: ceph_stable
-
-- name: install the Ceph development repository key
- rpm_key: >
- key={{ ceph_dev_key }}
- state=present
- when: ceph_dev
-
-- name: install Inktank Ceph Enterprise repository key
- rpm_key: >
- key={{ ceph_stable_ice_temp_path }}/release.asc
- state=present
- when: ceph_stable_ice
-
-- name: install red hat storage repository key
- rpm_key: >
- key={{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release
- state=present
- when:
- ceph_stable_rh_storage and
- ceph_stable_rh_storage_iso_install
-
-- name: add Ceph stable repository
- yum: name=http://ceph.com/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro|replace('rhel', 'el') }}.noarch.rpm
- changed_when: false
- when: ceph_stable
-
-- name: add Ceph development repository
- yum: name=http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm
- changed_when: false
- when: ceph_dev
-
-- name: add Inktank Ceph Enterprise repository
- template: >
- src=redhat_ice_repo.j2
- dest=/etc/yum.repos.d/ice.repo
- owner=root
- group=root
- mode=0644
- when: ceph_stable_ice
-
-- name: add red hat storage repository
- template: >
- src=redhat_storage_repo.j2
- dest=/etc/yum.repos.d/rh_storage.repo
- owner=root
- group=root
- mode=0644
- when:
- ceph_stable_rh_storage and
- ceph_stable_rh_storage_iso_install
-
-- name: install Ceph
- yum: >
- name=ceph
- state=present
- when: not ceph_stable_rh_storage
-
-- name: install red hat storage ceph mon
- yum: >
- name={{ item }}
- state=present
- with_items:
- - ceph
- - ceph-mon
- when:
- ceph_stable_rh_storage and
- mon_group_name in group_names
-
-- name: install red hat storage ceph osd
- yum: >
- name={{ item }}
- state=present
- with_items:
- - ceph
- - ceph-osd
- when:
- ceph_stable_rh_storage and
- osd_group_name in group_names
-
-- name: install Inktank Ceph Enterprise RBD Kernel modules
- yum: >
- name={{ item }}
- with_items:
- - "{{ ceph_stable_ice_temp_path }}/kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm"
- - "{{ ceph_stable_ice_temp_path }}/kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm"
- when: ceph_stable_ice
-
-- name: configure rbd clients directories
- file: >
- path={{ item }}
- state=directory
- owner=qemu
- group=libvirtd
- mode=0755
- with_items:
- - rbd_client_log_path
- - rbd_client_admin_socket_path
- when: rbd_client_directories
--- /dev/null
+---
+- name: install dependencies
+ apt: >
+ pkg={{ item }}
+ state=present
+ update_cache=yes
+ cache_valid_time=3600
+ with_items:
+ - python-pycurl
+ - ntp
+ - hdparm
+
+- name: install the ceph repository stable key
+ apt_key: >
+ data="{{ lookup('file', '../../files/cephstable.asc') }}"
+ state=present
+ when: ceph_stable
+
+- name: install the ceph development repository key
+ apt_key: >
+ data="{{ lookup('file', '../../files/cephdev.asc') }}"
+ state=present
+ when: ceph_dev
+
+- name: install intank ceph enterprise repository key
+ apt_key: >
+ data="{{ lookup('file', '../../files/cephstableice.asc') }}"
+ state=present
+ when: ceph_stable_ice
+
+- name: add ceph stable repository
+ apt_repository: >
+ repo="deb http://ceph.com/debian-{{ ceph_stable_release }}/ {{ ceph_stable_distro_source | default(ansible_lsb.codename) }} main"
+ state=present
+ changed_when: false
+ when: ceph_stable
+
+- name: add ceph development repository
+ apt_repository: >
+ repo="deb http://gitbuilder.ceph.com/ceph-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/{{ ceph_dev_branch }} {{ ansible_lsb.codename }} main"
+ state=present
+ changed_when: false
+ when: ceph_dev
+
+- name: add inktank ceph enterprise repository
+ apt_repository: >
+ repo="deb file://{{ ceph_stable_ice_temp_path }} {{ ansible_lsb.codename }} main"
+ state=present
+ changed_when: false
+ when: ceph_stable_ice
+
+- name: install ceph
+ apt: >
+ pkg={{ item }}
+ state=present
+ with_items:
+ - ceph
+ - ceph-common #|
+ - ceph-fs-common #|--> yes, they are already all dependencies from 'ceph'
+ - ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
+ - ceph-mds #|--> they don't get update so we need to force them
+ - libcephfs1 #|
+
+- name: configure rbd clients directories
+ file: >
+ path={{ item }}
+ state=directory
+ owner=libvirt-qemu
+ group=kvm
+ mode=0755
+ with_items:
+ - rbd_client_log_path
+ - rbd_client_admin_socket_path
+ when: rbd_client_directories
--- /dev/null
+---
+- name: install dependencies
+ yum: >
+ name={{ item }}
+ state=present
+ with_items:
+ - python-pycurl
+ - ntp
+ - hdparm
+ - yum-plugin-priorities.noarch
+ - epel-release
+
+- name: install the ceph stable repository key
+ rpm_key: >
+ key={{ ceph_stable_key }}
+ state=present
+ when: ceph_stable
+
+- name: install the ceph development repository key
+ rpm_key: >
+ key={{ ceph_dev_key }}
+ state=present
+ when: ceph_dev
+
+- name: install inktank ceph enterprise repository key
+ rpm_key: >
+ key={{ ceph_stable_ice_temp_path }}/release.asc
+ state=present
+ when: ceph_stable_ice
+
+- name: install red hat storage repository key
+ rpm_key: >
+ key={{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release
+ state=present
+ when:
+ ceph_stable_rh_storage and
+ ceph_stable_rh_storage_iso_install
+
+- name: add ceph stable repository
+ yum: name=http://ceph.com/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro|replace('rhel', 'el') }}.noarch.rpm
+ changed_when: false
+ when: ceph_stable
+
+- name: add ceph development repository
+ yum: name=http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm
+ changed_when: false
+ when: ceph_dev
+
+- name: add inktank ceph enterprise repository
+ template: >
+ src=redhat_ice_repo.j2
+ dest=/etc/yum.repos.d/ice.repo
+ owner=root
+ group=root
+ mode=0644
+ when: ceph_stable_ice
+
+- name: add red hat storage repository
+ template: >
+ src=redhat_storage_repo.j2
+ dest=/etc/yum.repos.d/rh_storage.repo
+ owner=root
+ group=root
+ mode=0644
+ when:
+ ceph_stable_rh_storage and
+ ceph_stable_rh_storage_iso_install
+
+- name: install ceph
+ yum: >
+ name=ceph
+ state=present
+ when: not ceph_stable_rh_storage
+
+- name: install red hat storage ceph mon
+ yum: >
+ name={{ item }}
+ state=present
+ with_items:
+ - ceph
+ - ceph-mon
+ when:
+ ceph_stable_rh_storage and
+ mon_group_name in group_names
+
+- name: install red hat storage ceph osd
+ yum: >
+ name={{ item }}
+ state=present
+ with_items:
+ - ceph
+ - ceph-osd
+ when:
+ ceph_stable_rh_storage and
+ osd_group_name in group_names
+
+- name: install Inktank Ceph Enterprise RBD Kernel modules
+ yum: >
+ name={{ item }}
+ with_items:
+ - "{{ ceph_stable_ice_temp_path }}/kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm"
+ - "{{ ceph_stable_ice_temp_path }}/kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm"
+ when: ceph_stable_ice
+
+- name: configure rbd clients directories
+ file: >
+ path={{ item }}
+ state=directory
+ owner=qemu
+ group=libvirtd
+ mode=0755
+ with_items:
+ - rbd_client_log_path
+ - rbd_client_admin_socket_path
+ when: rbd_client_directories
---
-- include: os_check.yml
+- include: ./checks/check_system.yml
-- include: check_mandatory_vars.yml
+- include: ./checks/check_mandatory_vars.yml
-- include: os_tuning.yml
+- include: ./misc/system_tuning.yml
when: osd_group_name in group_names
-- include: prerequisite_ice.yml
+- include: ./pre_requisites/prerequisite_ice.yml
when: ceph_stable_ice
-- include: prerequisite_rh_storage_iso_install.yml
+- include: ./pre_requisites/prerequisite_rh_storage_iso_install.yml
when:
ceph_stable_rh_storage and
ceph_stable_rh_storage_iso_install
-- include: prerequisite_rh_storage_cdn_install.yml
+- include: ./pre_requisites/prerequisite_rh_storage_cdn_install.yml
when:
ceph_stable_rh_storage and
ceph_stable_rh_storage_cdn_install
-- include: install_on_redhat.yml
+- include: ./installs/install_on_redhat.yml
when: ansible_os_family == 'RedHat'
-- include: install_on_debian.yml
+- include: ./installs/install_on_debian.yml
when: ansible_os_family == 'Debian'
-- name: check for a Ceph socket
+- name: check for a ceph socket
shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
changed_when: false
ignore_errors: true
ignore_errors: true
register: socketrgw
-- name: generate cluster UUID
+- name: generate cluster uuid
local_action: shell uuidgen | tee fetch/ceph_cluster_uuid.conf
creates=fetch/ceph_cluster_uuid.conf
register: cluster_uuid
sudo: false
when: fsid != '4a158d27-f750-41d5-9e7f-26ce4c9d2d45'
-- name: read cluster UUID if it already exists
+- name: read cluster uuid if it already exists
local_action: command cat fetch/ceph_cluster_uuid.conf
removes=fetch/ceph_cluster_uuid.conf
changed_when: false
sudo: false
when: fsid != '4a158d27-f750-41d5-9e7f-26ce4c9d2d45'
-- name: generate Ceph configuration file
+- name: generate ceph configuration file
template: >
src=ceph.conf.j2
dest=/etc/ceph/ceph.conf
--- /dev/null
+---
+- name: disable osd directory parsing by updatedb
+ command: updatedb -e /var/lib/ceph
+ changed_when: false
+ ignore_errors: true
+
+- name: disable transparent hugepage
+ command: "echo never > /sys/kernel/mm/transparent_hugepage/enabled"
+ changed_when: false
+ ignore_errors: true
+ when: disable_transparent_hugepage
+
+- name: disable swap
+ command: swapoff -a
+ changed_when: false
+ ignore_errors: true
+ when: disable_swap
+
+- name: apply operating system tuning
+ sysctl: >
+ name={{ item.name }}
+ value={{ item.value }}
+ state=present
+ sysctl_file=/etc/sysctl.conf
+ ignoreerrors=yes
+ with_items: os_tuning_params
+++ /dev/null
----
-- name: fail on unsupported system
- fail: "msg=System not supported {{ ansible_system }}"
- when: "ansible_system not in ['Linux']"
-
-- name: fail on unsupported architecture
- fail: "msg=Architecture not supported {{ ansible_architecture }}"
- when: "ansible_architecture not in ['x86_64']"
-
-- name: fail on unsupported distribution
- fail: "msg=Distribution not supported {{ ansible_os_family }}"
- when: "ansible_os_family not in ['Debian', 'RedHat']"
-
-- name: fail on unsupported distribution for red hat storage
- fail: "msg=Distribution not supported {{ ansible_distribution_version }} by Red Hat Storage, only RHEL 7.1"
- when:
- ceph_stable_rh_storage and
- {{ ansible_distribution_version | version_compare('7.1', '<') }}
+++ /dev/null
----
-- name: disable OSD directory parsing by updatedb
- command: updatedb -e /var/lib/ceph
- changed_when: false
- ignore_errors: true
-
-- name: disable transparent hugepage
- command: "echo never > /sys/kernel/mm/transparent_hugepage/enabled"
- changed_when: false
- ignore_errors: true
- when: disable_transparent_hugepage
-
-- name: disable swap
- command: swapoff -a
- changed_when: false
- ignore_errors: true
- when: disable_swap
-
-- name: apply OS tuning
- sysctl: >
- name={{ item.name }}
- value={{ item.value }}
- state=present
- sysctl_file=/etc/sysctl.conf
- ignoreerrors=yes
- with_items: os_tuning_params
--- /dev/null
+---
+- name: create ice package directory
+ file: >
+ path={{ ceph_stable_ice_temp_path }}
+ state=directory
+ owner=root
+ group=root
+ mode=0644
+ when: ceph_stable_ice
+
+- name: get ice packages
+ get_url: >
+ url_username={{ ceph_stable_ice_user }}
+ url_password={{ ceph_stable_ice_password }}
+ url={{ ceph_stable_ice_url }}/{{ ceph_stable_ice_version }}/ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
+ dest={{ ceph_stable_ice_temp_path }}/ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
+ when: ceph_stable_ice
+
+- name: get ice Kernel Modules
+ get_url: >
+ url_username={{ ceph_stable_ice_user }}
+ url_password={{ ceph_stable_ice_password }}
+ url={{ ceph_stable_ice_url }}/{{ ceph_stable_ice_kmod_version }}/{{ item }}
+ dest={{ ceph_stable_ice_temp_path }}
+ with_items:
+ - kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm
+ - kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm
+ when:
+ ceph_stable_ice and
+ ansible_os_family == 'RedHat'
+
+- name: stat extracted ice repo files
+ stat: >
+ path={{ ceph_stable_ice_temp_path }}/ice_setup.py
+ register: repo_exist
+ when: ceph_stable_ice
+
+- name: extract ice packages
+ shell: >
+ tar -xzf ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
+ chdir={{ ceph_stable_ice_temp_path }}
+ changed_when: false
+ when:
+ ceph_stable_ice and
+ repo_exist.stat.exists == False
+
+- name: move ice extracted packages
+ shell: "mv {{ ceph_stable_ice_temp_path }}/ceph/*/* {{ ceph_stable_ice_temp_path }}"
+ changed_when: false
+ when:
+ ceph_stable_ice and
+ repo_exist.stat.exists == False
--- /dev/null
+---
+- name: determine if node is registered with subscription-manager.
+ command: subscription-manager identity
+ register: subscription
+ changed_when: false
+
+- name: check if the red hat storage monitor repo is already present
+ shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-mon-rpms
+ changed_when: false
+ ignore_errors: true
+ register: rh_storage_mon_repo
+ when: mon_group_name in group_names
+
+- name: enable red hat storage monitor repository
+ command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-mon-rpms
+ changed_when: false
+ when:
+ mon_group_name in group_names and
+ rh_storage_mon_repo.rc != 0
+
+- name: check if the red hat storage osd repo is already present
+ shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-osd-rpms
+ changed_when: false
+ ignore_errors: true
+ register: rh_storage_osd_repo
+ when: osd_group_name in group_names
+
+- name: enable red hat storage osd repository
+ command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-osd-rpms
+ changed_when: false
+ when:
+ osd_group_name in group_names and
+ rh_storage_osd_repo.rc != 0
+
+- name: check if the red hat storage rados gateway repo is already present
+ shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-tools-rpms
+ changed_when: false
+ ignore_errors: true
+ register: rh_storage_rgw_repo
+ when: rgw_group_name in group_names
+
+- name: enable red hat storage rados gateway repository
+ command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-tools-rpms
+ changed_when: false
+ when:
+ rgw_group_name in group_names and
+ rh_storage_rgw_repo.rc != 0
--- /dev/null
+---
+- name: create red hat storage package directories
+ file: >
+ path={{ item }}
+ state=directory
+ with_items:
+ - "{{ ceph_stable_rh_storage_mount_path }}"
+ - "{{ ceph_stable_rh_storage_repository_path }}"
+
+- name: fetch the red hat storage iso from the ansible server
+ copy: >
+ src={{ ceph_stable_rh_storage_iso_path }}
+ dest={{ ceph_stable_rh_storage_iso_path }}
+
+- name: mount red hat storage iso file
+ mount: >
+ name={{ ceph_stable_rh_storage_mount_path }}
+ src={{ ceph_stable_rh_storage_iso_path }}
+ fstype=iso9660
+ state=mounted
+
+- name: copy red hat storage iso content
+ shell:
+ cp -r {{ ceph_stable_rh_storage_mount_path }}/* {{ ceph_stable_rh_storage_repository_path }}
+ creates={{ ceph_stable_rh_storage_repository_path }}/README
+
+- name: mount red hat storage iso file
+ mount: >
+ name={{ ceph_stable_rh_storage_mount_path }}
+ src={{ ceph_stable_rh_storage_iso_path }}
+ fstype=iso9660
+ state=unmounted
+++ /dev/null
----
-- name: create ICE package directory
- file: >
- path={{ ceph_stable_ice_temp_path }}
- state=directory
- owner=root
- group=root
- mode=0644
- when: ceph_stable_ice
-
-- name: get ICE packages
- get_url: >
- url_username={{ ceph_stable_ice_user }}
- url_password={{ ceph_stable_ice_password }}
- url={{ ceph_stable_ice_url }}/{{ ceph_stable_ice_version }}/ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
- dest={{ ceph_stable_ice_temp_path }}/ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
- when: ceph_stable_ice
-
-- name: get ICE Kernel Modules
- get_url: >
- url_username={{ ceph_stable_ice_user }}
- url_password={{ ceph_stable_ice_password }}
- url={{ ceph_stable_ice_url }}/{{ ceph_stable_ice_kmod_version }}/{{ item }}
- dest={{ ceph_stable_ice_temp_path }}
- with_items:
- - kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm
- - kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm
- when:
- ceph_stable_ice and
- ansible_os_family == 'RedHat'
-
-- name: stat extracted ICE repo files
- stat: >
- path={{ ceph_stable_ice_temp_path }}/ice_setup.py
- register: repo_exist
- when: ceph_stable_ice
-
-- name: extract ICE packages
- shell: >
- tar -xzf ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
- chdir={{ ceph_stable_ice_temp_path }}
- changed_when: false
- when:
- ceph_stable_ice and
- repo_exist.stat.exists == False
-
-- name: move ICE extracted packages
- shell: "mv {{ ceph_stable_ice_temp_path }}/ceph/*/* {{ ceph_stable_ice_temp_path }}"
- changed_when: false
- when:
- ceph_stable_ice and
- repo_exist.stat.exists == False
+++ /dev/null
----
-- name: determine if node is registered with subscription-manager.
- command: subscription-manager identity
- register: subscription
- changed_when: false
-
-- name: check if the red hat storage monitor repo is already present
- shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-mon-rpms
- changed_when: false
- ignore_errors: true
- register: rh_storage_mon_repo
- when: mon_group_name in group_names
-
-- name: enable red hat storage monitor repository
- command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-mon-rpms
- changed_when: false
- when:
- mon_group_name in group_names and
- rh_storage_mon_repo.rc != 0
-
-- name: check if the red hat storage osd repo is already present
- shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-osd-rpms
- changed_when: false
- ignore_errors: true
- register: rh_storage_osd_repo
- when: osd_group_name in group_names
-
-- name: enable red hat storage osd repository
- command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-osd-rpms
- changed_when: false
- when:
- osd_group_name in group_names and
- rh_storage_osd_repo.rc != 0
-
-- name: check if the red hat storage rados gateway repo is already present
- shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-tools-rpms
- changed_when: false
- ignore_errors: true
- register: rh_storage_rgw_repo
- when: rgw_group_name in group_names
-
-- name: enable red hat storage rados gateway repository
- command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-tools-rpms
- changed_when: false
- when:
- rgw_group_name in group_names and
- rh_storage_rgw_repo.rc != 0
+++ /dev/null
----
-- name: create red hat storage package directories
- file: >
- path={{ item }}
- state=directory
- with_items:
- - "{{ ceph_stable_rh_storage_mount_path }}"
- - "{{ ceph_stable_rh_storage_repository_path }}"
-
-- name: fetch the red hat storage iso from the ansible server
- copy: >
- src={{ ceph_stable_rh_storage_iso_path }}
- dest={{ ceph_stable_rh_storage_iso_path }}
-
-- name: mount red hat storage iso file
- mount: >
- name={{ ceph_stable_rh_storage_mount_path }}
- src={{ ceph_stable_rh_storage_iso_path }}
- fstype=iso9660
- state=mounted
-
-- name: copy red hat storage iso content
- shell:
- cp -r {{ ceph_stable_rh_storage_mount_path }}/* {{ ceph_stable_rh_storage_repository_path }}
- creates={{ ceph_stable_rh_storage_repository_path }}/README
-
-- name: mount red hat storage iso file
- mount: >
- name={{ ceph_stable_rh_storage_mount_path }}
- src={{ ceph_stable_rh_storage_iso_path }}
- fstype=iso9660
- state=unmounted
name=docker-py
version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
-- name: stat for Ceph config and keys
+- name: stat for ceph config and keys
stat: >
path={{ item }}
with_items: ceph_config_keys
ignore_errors: true
register: statconfig
-- name: try to fetch Ceph config and keys
+- name: try to fetch ceph config and keys
copy: >
src=fetch/docker_mon_files/"{{ item }}"
dest=/etc/ceph/
---
# Deploy Ceph metadata server(s)
-- name: copy MDS bootstrap key
+- name: copy mds bootstrap key
copy: >
src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring
dest=/var/lib/ceph/bootstrap-mds/ceph.keyring
mode=600
when: cephx
-- name: create MDS directory
+- name: create mds directory
file: >
path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}
state=directory
mode=0644
when: cephx
-- name: create MDS keyring
+- name: create mds keyring
command: >
ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.{{ ansible_hostname }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
creates=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
changed_when: false
when: cephx
-- name: set MDS key permissions
+- name: set mds key permissions
file: >
path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
mode=0600
changed_when: false
when: ansible_distribution != "Ubuntu"
-- name: start and add that the metadata service to the init sequence (Ubuntu)
+- name: start and add that the metadata service to the init sequence (ubuntu)
service: >
name=ceph-mds
state=started
- name: wait for client.admin key exists
wait_for: path=/etc/ceph/ceph.client.admin.keyring
-- name: create Ceph REST API keyring
+- name: create ceph rest api keyring
command: >
ceph auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.restapi.keyring
creates=/etc/ceph/ceph.client.restapi.keyring
openstack_config and
cephx
-- name: find Ceph keys
+- name: find ceph keys
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
register: ceph_keys
changed_when: false
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
-- name: create Ceph Filesystem
+- name: create ceph filesystem
command: ceph fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
changed_when: false
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
name=docker-py
version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
-- name: stat for Ceph config and keys
+- name: stat for ceph config and keys
stat: >
path={{ item }}
with_items: ceph_config_keys
ignore_errors: true
register: statconfig
-- name: try to fetch Ceph config and keys
+- name: try to fetch ceph config and keys
copy: >
src=fetch/docker_mon_files/"{{ item }}"
dest=/etc/ceph/
- statconfig.results
when: item.1.stat.exists == False
-- name: run the Ceph Monitor docker image
+- name: run the ceph Monitor docker image
docker: >
image="{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}"
name=ceph-{{ ansible_hostname }}
detach=yes
state=running
-- name: collect Ceph files to the Ansible server
+- name: collect ceph files to the ansible server
fetch: >
src={{ item }}
dest=fetch/docker_mon_files/{{ item }}
---
-- name: create OpenStack pool
+- name: create openstack pool
command: rados mkpool {{ item }}
with_items:
- "{{ openstack_glance_pool }}"
changed_when: false
ignore_errors: true
-- name: create OpenStack keys
+- name: create openstack keys
command: >
ceph auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/ceph.{{ item.name }}.keyring
creates=/etc/ceph/ceph.{{ item.name }}.keyring
---
-- name: collect all the pool
+- name: collect all the pools
command: rados lspools
register: ceph_pools
when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
when: ansible_distribution == "Ubuntu"
changed_when: false
-- name: start and add that the monitor service to the init sequence (Ubuntu)
+- name: start and add that the monitor service to the init sequence (ubuntu)
service: >
name=ceph-mon
state=started
failed_when: false
when: ansible_os_family == 'RedHat'
-- name: get Ceph monitor version
+- name: get ceph monitor version
shell: ceph daemon mon."{{ ansible_hostname }}" version | cut -d '"' -f 4 | cut -f 1,2 -d '.'
changed_when: false
register: ceph_version
---
# NOTE (leseb) : this task is for disk devices only because of the explicit use of the first
# partition.
-- name: activate OSD(s) when device is a disk
+- name: activate osd(s) when device is a disk
command: |
ceph-disk activate {{ item.2 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
with_together:
item.1.rc != 0
# NOTE (leseb): this task is for partitions because we don't explicitly use a partition.
-- name: activate OSD(s) when device is a partition
+- name: activate osd(s) when device is a partition
command: "ceph-disk activate {{ item.1 }}"
with_together:
- ispartition.results
- include: osd_fragment.yml
when: crush_location
-- name: start and add that the OSD service to the init sequence
+- name: start and add that the osd service(s) to the init sequence
service: >
name=ceph
state=started
+++ /dev/null
----
-- name: set config and keys paths
- set_fact:
- ceph_config_keys:
- - /etc/ceph/ceph.client.admin.keyring
- - /etc/ceph/ceph.conf
- - /etc/ceph/monmap
- - /etc/ceph/ceph.mon.keyring
- - /var/lib/ceph/bootstrap-osd/ceph.keyring
-
-- name: install docker-py
- pip: >
- name=docker-py
- version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
-
-- name: stat for Ceph config and keys
- stat: >
- path={{ item }}
- with_items: ceph_config_keys
- ignore_errors: true
- register: statconfig
-
-- name: try to fetch Ceph config and keys
- copy: >
- src=fetch/docker_mon_files/"{{ item }}"
- dest=/etc/ceph/
- owner=root
- group=root
- mode=600
- with_together:
- - ceph_config_keys
- - statconfig.results
- when: item.1.stat.exists == False
-
-- name: run the Ceph OSD docker image
- docker: >
- image="{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
- name={{ ansible_hostname }}-osd-{{ item | regex_replace('/', '') }}
- net=host
- state=running
- privileged=yes
- env="CEPH_DAEMON=OSD_CEPH_DISK,OSD_DEVICE={{ item }}"
- volumes="/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev/:/dev/"
- with_items: ceph_osd_docker_devices
-
-- name: ensure ceph_osd service is running
- docker: >
- image="{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
- name={{ ansible_hostname }}-osd-{{ item | regex_replace('/', '') }}
- state=started
- with_items: ceph_osd_docker_devices
+++ /dev/null
----
-## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
-
-- include: check_devices.yml
-- include: zap_devices.yml
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-# NOTE (alahouze): if the device is a partition, the parted command below has
-# failed, this is why we check if the device is a partition too.
-- name: automatic prepare OSD disk(s) without partitions
- command: ceph-disk prepare "/dev/{{ item.key }}"
- ignore_errors: true
- register: prepared_osds
- with_dict: ansible_devices
- when:
- ansible_devices is defined and
- item.value.removable == "0" and
- item.value.partitions|count == 0 and
- journal_collocation and
- osd_auto_discovery
-
-- name: manually Prepare OSD disk(s)
- command: "ceph-disk prepare {{ item.2 }}"
- ignore_errors: true
- with_together:
- - parted.results
- - ispartition.results
- - devices
- when:
- item.0.rc != 0 and
- item.1.rc != 0 and
- journal_collocation and not
- osd_auto_discovery
-
-- include: activate_osds.yml
- include: pre_requisite.yml
when: not ceph_containerized_deployment
-- include: journal_collocation.yml
+- include: ./scenarios/journal_collocation.yml
when: journal_collocation and not ceph_containerized_deployment
-- include: raw_multi_journal.yml
+- include: ./scenarios/raw_multi_journal.yml
when: raw_multi_journal and not ceph_containerized_deployment
-- include: osd_directory.yml
+- include: ./scenarios/osd_directory.yml
when: osd_directory and not ceph_containerized_deployment
-- include: docker.yml
+- include: ./scenarios/docker.yml
when: ceph_containerized_deployment
+++ /dev/null
----
-## SCENARIO 4: USE A DIRECTORY INSTEAD OF A DISK FOR OSD
-
-# NOTE (leseb): we do not check the filesystem underneath the directory
-# so it is really up to you to configure this properly.
-# Declaring more than one directory on the same filesystem will confuse Ceph.
-- name: create OSD directories
- file: >
- path={{ item }}
- state=directory
- owner=root
- group=root
- with_items: osd_directories
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-- name: prepare OSD disk(s)
- command: "ceph-disk prepare {{ item }}"
- with_items: osd_directories
- changed_when: false
- when: osd_directory
-
-- name: activate OSD(s)
- command: "ceph-disk activate {{ item }}"
- with_items: osd_directories
- changed_when: false
-
-- name: start and add that the OSD service to the init sequence
- service: >
- name=ceph
- state=started
- enabled=yes
---
-- name: get OSD path
+- name: get osd path
shell: "df | grep {{ item }} | awk '{print $6}'"
with_items: devices
changed_when: false
ignore_errors: true
register: osd_path
-- name: get OSD id
+- name: get osd id
command: cat {{ item.stdout }}/whoami
with_items: osd_path.results
changed_when: false
ignore_errors: true
register: osd_id
-- name: create a Ceph fragment and assemble directory
+- name: create a ceph fragment and assemble directory
file: >
path={{ item }}
state=directory
- /etc/ceph/ceph.d/
- /etc/ceph/ceph.d/osd_fragments
-- name: create the OSD fragment
+- name: create the osd fragment
template: >
src=osd.conf.j2
dest=/etc/ceph/ceph.d/osd_fragments/osd.{{ item.stdout }}.conf
command: cp /etc/ceph/ceph.conf /etc/ceph/ceph.d/
changed_when: false
-- name: assemble OSD sections
+- name: assemble osd sections
assemble: >
src=/etc/ceph/ceph.d/osd_fragments/
dest=/etc/ceph/ceph.d/osd.conf
group=root
mode=0644
-- name: assemble Ceph conf and OSD fragments
+- name: assemble ceph conf and osd fragments
assemble: >
src=/etc/ceph/ceph.d/
dest=/etc/ceph/ceph.conf
state=present
when: ansible_os_family == 'RedHat'
-- name: copy OSD bootstrap key
+- name: copy osd bootstrap key
copy: >
src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring
dest=/var/lib/ceph/bootstrap-osd/ceph.keyring
+++ /dev/null
----
-## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS
-
-- include: check_devices.yml
-- include: zap_devices.yml
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-# NOTE (alahouze): if the device is a partition, the parted command below has
-# failed, this is why we check if the device is a partition too.
-- name: prepare OSD disk(s)
- command: "ceph-disk prepare {{ item.2 }} {{ item.3 }}"
- with_together:
- - parted.results
- - ispartition.results
- - devices
- - raw_journal_devices
- changed_when: false
- ignore_errors: true
- when:
- item.0.rc != 0 and
- item.1.rc != 0 and
- raw_multi_journal
-
-- include: activate_osds.yml
--- /dev/null
+---
+- name: set config and keys paths
+ set_fact:
+ ceph_config_keys:
+ - /etc/ceph/ceph.client.admin.keyring
+ - /etc/ceph/ceph.conf
+ - /etc/ceph/monmap
+ - /etc/ceph/ceph.mon.keyring
+ - /var/lib/ceph/bootstrap-osd/ceph.keyring
+
+- name: install docker-py
+ pip: >
+ name=docker-py
+ version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
+
+- name: stat for ceph config and keys
+ stat: >
+ path={{ item }}
+ with_items: ceph_config_keys
+ ignore_errors: true
+ register: statconfig
+
+- name: try to fetch ceph config and keys
+ copy: >
+ src=fetch/docker_mon_files/"{{ item }}"
+ dest=/etc/ceph/
+ owner=root
+ group=root
+ mode=600
+ with_together:
+ - ceph_config_keys
+ - statconfig.results
+ when: item.1.stat.exists == False
+
+- name: run the ceph osd docker image
+ docker: >
+ image="{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
+ name={{ ansible_hostname }}-osd-{{ item | regex_replace('/', '') }}
+ net=host
+ state=running
+ privileged=yes
+ env="CEPH_DAEMON=OSD_CEPH_DISK,OSD_DEVICE={{ item }}"
+ volumes="/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev/:/dev/"
+ with_items: ceph_osd_docker_devices
+
+- name: ensure ceph_osd service is running
+ docker: >
+ image="{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
+ name={{ ansible_hostname }}-osd-{{ item | regex_replace('/', '') }}
+ state=started
+ with_items: ceph_osd_docker_devices
--- /dev/null
+---
+## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
+
+- include: ../check_devices.yml
+- include: ../zap_devices.yml
+
+# NOTE (leseb): the prepare process must be parallelized somehow...
+# if you have 64 disks with 4TB each, this will take a while
+# since Ansible will sequential process the loop
+
+# NOTE (alahouze): if the device is a partition, the parted command below has
+# failed, this is why we check if the device is a partition too.
+- name: automatic prepare osd disk(s) without partitions
+ command: ceph-disk prepare "/dev/{{ item.key }}"
+ ignore_errors: true
+ register: prepared_osds
+ with_dict: ansible_devices
+ when:
+ ansible_devices is defined and
+ item.value.removable == "0" and
+ item.value.partitions|count == 0 and
+ journal_collocation and
+ osd_auto_discovery
+
+- name: manually Prepare osd disk(s)
+ command: "ceph-disk prepare {{ item.2 }}"
+ ignore_errors: true
+ with_together:
+ - parted.results
+ - ispartition.results
+ - devices
+ when:
+ item.0.rc != 0 and
+ item.1.rc != 0 and
+ journal_collocation and not
+ osd_auto_discovery
+
+- include: ../activate_osds.yml
--- /dev/null
+---
+## SCENARIO 4: USE A DIRECTORY INSTEAD OF A DISK FOR OSD
+
+# NOTE (leseb): we do not check the filesystem underneath the directory
+# so it is really up to you to configure this properly.
+# Declaring more than one directory on the same filesystem will confuse Ceph.
+- name: create osd directories
+ file: >
+ path={{ item }}
+ state=directory
+ owner=root
+ group=root
+ with_items: osd_directories
+
+# NOTE (leseb): the prepare process must be parallelized somehow...
+# if you have 64 disks with 4TB each, this will take a while
+# since Ansible will sequential process the loop
+- name: prepare OSD disk(s)
+ command: "ceph-disk prepare {{ item }}"
+ with_items: osd_directories
+ changed_when: false
+ when: osd_directory
+
+- name: activate OSD(s)
+ command: "ceph-disk activate {{ item }}"
+ with_items: osd_directories
+ changed_when: false
+
+- name: start and add that the OSD service to the init sequence
+ service: >
+ name=ceph
+ state=started
+ enabled=yes
--- /dev/null
+---
+## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS
+
+- include: ../check_devices.yml
+- include: ../zap_devices.yml
+
+# NOTE (leseb): the prepare process must be parallelized somehow...
+# if you have 64 disks with 4TB each, this will take a while
+# since Ansible will sequential process the loop
+
+# NOTE (alahouze): if the device is a partition, the parted command below has
+# failed, this is why we check if the device is a partition too.
+- name: prepare osd disk(s)
+ command: "ceph-disk prepare {{ item.2 }} {{ item.3 }}"
+ with_together:
+ - parted.results
+ - ispartition.results
+ - devices
+ - raw_journal_devices
+ changed_when: false
+ ignore_errors: true
+ when:
+ item.0.rc != 0 and
+ item.1.rc != 0 and
+ raw_multi_journal
+
+- include: ../activate_osds.yml
# NOTE (leseb): some devices might miss partition label which which will result
# in ceph-disk failing to prepare OSD. Thus zapping them prior to prepare the OSD
# ensures that the device will get successfully prepared.
-- name: erasing partitions and labels from OSD disk(s)
+- name: erasing partitions and labels from osd disk(s)
command: ceph-disk zap {{ item.2 }}
changed_when: false
with_together:
name=docker-py
version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
-- name: stat for Ceph config and keys
+- name: stat for ceph config and keys
stat: >
path={{ item }}
with_items: ceph_config_keys
ignore_errors: true
register: statconfig
-- name: try to fetch Ceph config and keys
+- name: try to fetch ceph config and keys
copy: >
src=fetch/docker_mon_files/"{{ item }}"
dest=/etc/ceph/
+++ /dev/null
----
-- name: add ceph extra
- apt_repository: >
- repo="deb http://ceph.com/packages/ceph-extras/debian {{ ansible_lsb.codename }} main"
- state=present
- when: ansible_lsb.codename in ['natty', 'oneiric', 'precise', 'quantal', 'raring', 'sid', 'squeeze', 'wheezy']
-
-# NOTE (leseb): needed for Ubuntu 12.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
-- name: enable multiverse repo for precise
- apt_repository: >
- repo="{{ item }}"
- state=present
- with_items:
- - deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }} multiverse
- - deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-updates multiverse
- - deb http://security.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-security multiverse
- when:
- ansible_lsb.codename in ['precise'] and not
- http_100_continue
-
-# NOTE (leseb): disable the repo when we are using the Ceph repo for 100-continue packages
-- name: disable multiverse repo for precise
- apt_repository: >
- repo="{{ item }}"
- state=absent
- with_items:
- - deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }} multiverse
- - deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-updates multiverse
- - deb http://security.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-security multiverse
- when:
- ansible_lsb.codename in ['precise'] and
- http_100_continue
-
-# NOTE (leseb): needed for Ubuntu 14.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
-- name: enable multiverse repo for trusty
- command: "apt-add-repository multiverse"
- changed_when: false
- when:
- ansible_lsb.codename in ['trusty'] and not
- http_100_continue
-
-# NOTE (leseb): disable the repo when we are using the Ceph repo for 100-continue packages
-- name: disable multiverse repo for trusty
- command: "apt-add-repository -r multiverse"
- changed_when: false
- when:
- ansible_lsb.codename in ['trusty'] and
- http_100_continue
-
-# NOTE (leseb): if using 100-continue, add Ceph dev key
-- name: install the ceph development repository key
- apt_key: >
- data="{{ lookup('file', 'cephdev.asc') }}"
- state=present
- when: http_100_continue
-
-# NOTE (leseb): if using 100-continue, add Ceph sources and update
-- name: add ceph apache and fastcgi sources
- apt_repository: >
- repo="{{ item }}"
- state=present
- with_items:
- - deb http://gitbuilder.ceph.com/apache2-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
- - deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
- register: purge_default_apache
- when: http_100_continue
-
-# NOTE (leseb): else remove them to ensure you use the default packages
-- name: remove ceph apache and fastcgi sources
- apt_repository: >
- repo="{{ item }}"
- state=absent
- with_items:
- - deb http://gitbuilder.ceph.com/apache2-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
- - deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
- register: purge_ceph_apache
- when: not http_100_continue
-
-# NOTE (leseb): purge Ceph Apache and FastCGI packages if needed
-- name: purge ceph apache and fastcgi packages
- apt: >
- pkg="{{ item }}"
- state=absent
- purge=yes
- with_items:
- - apache2
- - apache2-bin
- - apache2-data
- - apache2-mpm-worker
- - apache2-utils
- - apache2.2-bin
- - apache2.2-common
- - libapache2-mod-fastcgi
- when:
- purge_default_apache.changed or
- purge_ceph_apache.changed
-
-- name: install apache and fastcgi
- apt: >
- pkg={{ item }}
- state=present
- update_cache=yes
- with_items:
- - apache2
- - libapache2-mod-fastcgi
-
-- name: install default httpd.conf
- template: >
- src=httpd.conf
- dest=/etc/apache2/httpd.conf
- owner=root
- group=root
-
-- name: enable some apache mod rewrite and fastcgi
- command: "{{ item }}"
- with_items:
- - a2enmod rewrite
- - a2enmod fastcgi
- changed_when: false
-
-- name: install rados gateway vhost
- template: >
- src=rgw.conf
- dest=/etc/apache2/sites-available/rgw.conf
- owner=root
- group=root
-
-- name: enable rados gateway vhost and disable default site
- command: "{{ item }}"
- with_items:
- - a2ensite rgw.conf
- - a2dissite *default
- changed_when: false
- ignore_errors: true
- notify:
- - restart apache2
-
-- name: install s3gw.fcgi script
- template: >
- src=s3gw.fcgi.j2
- dest=/var/www/s3gw.fcgi
- mode=0555
- owner=root
- group=root
+++ /dev/null
----
-- name: add ceph extra
- template: >
- src=ceph-extra.repo
- dest=/etc/yum.repos.d
- owner=root
- group=root
-
-- name: add special fastcgi repository key
- rpm_key: key=http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt
-
-- name: add special fastcgi repository
- command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
- changed_when: false
-
-- name: install apache and fastcgi
- yum: >
- name={{ item }}
- state=present
- with_items:
- - httpd
- - mod_fastcgi
- - mod_fcgid
-
-- name: install rados gateway vhost
- template: >
- src=rgw.conf
- dest=/etc/httpd/conf.d/rgw.conf
- owner=root
- group=root
-
-- name: install s3gw.fcgi script
- template: >
- src=s3gw.fcgi.j2
- dest=/var/www/s3gw.fcgi
- mode=0555
- owner=root
- group=root
-
-- name: disable default site
- shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf
- changed_when: false
- notify:
- - restart apache2
--- /dev/null
+---
+- name: add ceph extra
+ apt_repository: >
+ repo="deb http://ceph.com/packages/ceph-extras/debian {{ ansible_lsb.codename }} main"
+ state=present
+ when: ansible_lsb.codename in ['natty', 'oneiric', 'precise', 'quantal', 'raring', 'sid', 'squeeze', 'wheezy']
+
+# NOTE (leseb): needed for Ubuntu 12.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
+- name: enable multiverse repo for precise
+ apt_repository: >
+ repo="{{ item }}"
+ state=present
+ with_items:
+ - deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }} multiverse
+ - deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-updates multiverse
+ - deb http://security.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-security multiverse
+ when:
+ ansible_lsb.codename in ['precise'] and not
+ http_100_continue
+
+# NOTE (leseb): disable the repo when we are using the Ceph repo for 100-continue packages
+- name: disable multiverse repo for precise
+ apt_repository: >
+ repo="{{ item }}"
+ state=absent
+ with_items:
+ - deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }} multiverse
+ - deb http://archive.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-updates multiverse
+ - deb http://security.ubuntu.com/ubuntu {{ ansible_lsb.codename }}-security multiverse
+ when:
+ ansible_lsb.codename in ['precise'] and
+ http_100_continue
+
+# NOTE (leseb): needed for Ubuntu 14.04 to have access to libapache2-mod-fastcgi if 100-continue isn't being used
+- name: enable multiverse repo for trusty
+ command: "apt-add-repository multiverse"
+ changed_when: false
+ when:
+ ansible_lsb.codename in ['trusty'] and not
+ http_100_continue
+
+# NOTE (leseb): disable the repo when we are using the Ceph repo for 100-continue packages
+- name: disable multiverse repo for trusty
+ command: "apt-add-repository -r multiverse"
+ changed_when: false
+ when:
+ ansible_lsb.codename in ['trusty'] and
+ http_100_continue
+
+# NOTE (leseb): if using 100-continue, add Ceph dev key
+- name: install the ceph development repository key
+ apt_key: >
+ data="{{ lookup('file', 'cephdev.asc') }}"
+ state=present
+ when: http_100_continue
+
+# NOTE (leseb): if using 100-continue, add Ceph sources and update
+- name: add ceph apache and fastcgi sources
+ apt_repository: >
+ repo="{{ item }}"
+ state=present
+ with_items:
+ - deb http://gitbuilder.ceph.com/apache2-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
+ - deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
+ register: purge_default_apache
+ when: http_100_continue
+
+# NOTE (leseb): else remove them to ensure you use the default packages
+- name: remove ceph apache and fastcgi sources
+ apt_repository: >
+ repo="{{ item }}"
+ state=absent
+ with_items:
+ - deb http://gitbuilder.ceph.com/apache2-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
+ - deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main
+ register: purge_ceph_apache
+ when: not http_100_continue
+
+# NOTE (leseb): purge Ceph Apache and FastCGI packages if needed
+- name: purge ceph apache and fastcgi packages
+ apt: >
+ pkg="{{ item }}"
+ state=absent
+ purge=yes
+ with_items:
+ - apache2
+ - apache2-bin
+ - apache2-data
+ - apache2-mpm-worker
+ - apache2-utils
+ - apache2.2-bin
+ - apache2.2-common
+ - libapache2-mod-fastcgi
+ when:
+ purge_default_apache.changed or
+ purge_ceph_apache.changed
+
+- name: install apache and fastcgi
+ apt: >
+ pkg={{ item }}
+ state=present
+ update_cache=yes
+ with_items:
+ - apache2
+ - libapache2-mod-fastcgi
+
+- name: install default httpd.conf
+ template: >
+ src=httpd.conf
+ dest=/etc/apache2/httpd.conf
+ owner=root
+ group=root
+
+- name: enable some apache mod rewrite and fastcgi
+ command: "{{ item }}"
+ with_items:
+ - a2enmod rewrite
+ - a2enmod fastcgi
+ changed_when: false
+
+- name: install rados gateway vhost
+ template: >
+ src=rgw.conf
+ dest=/etc/apache2/sites-available/rgw.conf
+ owner=root
+ group=root
+
+- name: enable rados gateway vhost and disable default site
+ command: "{{ item }}"
+ with_items:
+ - a2ensite rgw.conf
+ - a2dissite *default
+ changed_when: false
+ ignore_errors: true
+ notify:
+ - restart apache2
+
+- name: install s3gw.fcgi script
+ template: >
+ src=s3gw.fcgi.j2
+ dest=/var/www/s3gw.fcgi
+ mode=0555
+ owner=root
+ group=root
--- /dev/null
+---
+- name: add ceph extra
+ template: >
+ src=ceph-extra.repo
+ dest=/etc/yum.repos.d
+ owner=root
+ group=root
+
+- name: add special fastcgi repository key
+ rpm_key: key=http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt
+
+- name: add special fastcgi repository
+ command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
+ changed_when: false
+
+- name: install apache and fastcgi
+ yum: >
+ name={{ item }}
+ state=present
+ with_items:
+ - httpd
+ - mod_fastcgi
+ - mod_fcgid
+
+- name: install rados gateway vhost
+ template: >
+ src=rgw.conf
+ dest=/etc/httpd/conf.d/rgw.conf
+ owner=root
+ group=root
+
+- name: install s3gw.fcgi script
+ template: >
+ src=s3gw.fcgi.j2
+ dest=/var/www/s3gw.fcgi
+ mode=0555
+ owner=root
+ group=root
+
+- name: disable default site
+ shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf
+ changed_when: false
+ notify:
+ - restart apache2
- include: pre_requisite.yml
when: not ceph_containerized_deployment
-- include: install_redhat.yml
+- include: ./installs/install_redhat.yml
when:
ansible_os_family == 'RedHat' and
radosgw_frontend == 'apache' and not
ceph_containerized_deployment
-- include: install_debian.yml
+- include: ./installs/install_debian.yml
when:
ansible_os_family == 'Debian' and
radosgw_frontend == 'apache' and not
name=docker-py
version=1.1.0 # https://github.com/ansible/ansible-modules-core/issues/1227
-- name: stat for Ceph config and keys
+- name: stat for ceph config and keys
stat: >
path={{ item }}
with_items: ceph_config_keys
ignore_errors: true
register: statconfig
-- name: try to fetch Ceph config and keys
+- name: try to fetch ceph config and keys
copy: >
src=fetch/docker_mon_files/"{{ item }}"
dest=/etc/ceph/
- statconfig.results
when: item.1.stat.exists == False
-- name: run the Ceph REST API docker image
+- name: run the ceph rest api docker image
docker: >
image="{{ ceph_restapi_docker_username }}/{{ ceph_restapi_docker_imagename }}"
name={{ ansible_hostname }}-ceph-restapi
env="RESTAPI_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_restapi_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=RESTAPI"
volumes="/etc/ceph:/etc/ceph"
-- name: ensure Ceph REST API service is running
+- name: ensure ceph rest api service is running
docker: >
image="{{ ceph_restapi_docker_username }}/{{ ceph_restapi_docker_imagename }}"
name="ceph-{{ ansible_hostname }}"
---
-- name: create Ceph REST API directory
+- name: create ceph rest api directory
file: >
path=/var/lib/ceph/restapi/ceph-restapi
state=directory
group=root
mode=0755
-- name: copy Ceph REST API keyring
+- name: copy ceph rest api keyring
copy: >
src=fetch/{{ fsid }}/etc/ceph/ceph.client.restapi.keyring
dest=/var/lib/ceph/restapi/ceph-restapi/keyring
mode=600
when: cephx
-- name: activate Ceph REST API with upstart
+- name: activate ceph rest api with upstart
file: >
path=/var/lib/ceph/restapi/{{ item }}
state=touch
changed_when: false
when: ansible_distribution == "Ubuntu"
-- name: activate Ceph REST API with sysvinit
+- name: activate ceph rest api with sysvinit
file: >
path=/var/lib/ceph/restapi/{{ item }}
state=touch
---
-- name: check if Ceph REST API is already started
+- name: check if ceph rest api is already started
shell: "pgrep ceph-rest-api"
changed_when: false
ignore_errors: true
register: restapi_status
-- name: start Ceph REST API
+- name: start ceph rest api
shell: "nohup ceph-rest-api &"
changed_when: false
when: restapi_status.rc != 0