--- /dev/null
+---
+# Variables here are applicable to the current role
+
+## Setup options
+#
+distro_release: "{{ facter_lsbdistcodename }}" # Seems to be not used.
+apt_key: http://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
+ceph_release: emperor
+redhat_distro: el6 # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11
+
+## Ceph options
+#
+cephx: true
+## Monitor options
+#
+monitor_interface: eth1
+mon_osd_down_out_interval: 600
+mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
+
+## MDS options
+#
+mds: true # disable mds configuration in ceph.conf
+
+# Rados Gateway options
+#
+radosgw: true # referenced in mon role too.
+#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
+
+## OSD options
+#
+journal_size: 100
+pool_default_pg_num: 128
+pool_default_pgp_num: 128
+pool_default_size: 2
+cluster_network: 192.168.0.0/24
+public_network: 192.168.0.0/24
+osd_mkfs_type: xfs
+osd_mon_heartbeat_interval: 30
+# Performance tuning
+filestore_merge_threshold: 40
+filestore_split_multiple: 8
+osd_op_threads: 8
+# Recovery tuning
+osd_recovery_max_active: 5
+osd_max_backfills: 2
+osd_recovery_op_priority: 2
+
+## Testing mode
+# enable this mode _only_ when you have a single node
+common_single_host_mode: true
--- /dev/null
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+mQINBE+5bugBEADP31ZaQNvhOOQxjDwL/VYDLhtaGq4Q74FCY23uSQAMboKwo4JB
+Te2JTSwBwU/RAPuWTrlKaQBPS30VF5SJN9t16llmoBWqhtBVf/lhQonC/28dTB6D
+KR7Ahiz4Nv2g9m1sLau86JblQuODo8vWHXxahYSLQSyyxIXnlE4K3c1k0S4feLqu
+ZxFtc2cFrQ/bUX9zXg6PXjDVAfY2R+x1JKGkVO/iwP+cjS1tCbvzdKcnQJEXpBwd
+yHvDBuF3IjuR9JgrBhb1ALqexhFKHzG1kHFfOZ3DLVohig68lfyjCepGgo0BPOyy
+S3Yk0QMumEaj9zRJurg49zWemX05XiBGt8SeCFxNUjXGYDIzSQ30K8fXmyjB74CW
+EUDUuTpTt7oZF9jKCjfKmQwvW4GgJ4J0FSwiorXPK27didjLJCnkTt43v0ZETMRW
+aADtiKFHl7lICuRmeXbd+6VkVqmoOz7ialMHnZ2KrHlqTcTPMd4llC4ayi2qS6Qb
+dIi1g9fa5YMS6I7yGxmW4AWwNy7SE8DsTja0aGFR9k432r+Vxtr52jrmP2vVexva
+CVaQkdk2/KEY3MjCPngiZwoTcOONYvNMvQaPrUtRuatcWJOgWsQVedY/UBxk968n
+JzfnNDngbcYDRnOD8wLWyBGyYbOdg1ucckLXFEtPVXoRER5JHMcYhyh+/QARAQAB
+tCRDZXBoIFJlbGVhc2UgS2V5IDxzYWdlQG5ld2RyZWFtLm5ldD6JAjgEEwECACIF
+Ak+5bugCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEH6/3V0X7TFtSjEP
+/A2pazEPwXrlQAHAjcXaFcPguKnXFrXRfbLpM9aZPR5gxH8mWl9RhEW/nL5pBf6A
+Tx7lQ4F/h9bDlf4/bejuxUflkrJEPVWkyPf3hvImjSBs+LBTk4OkpUJwYd9AynsG
+551Q0+6qxFfRVLCR6rLPHbMquXsKHROsSumEGUNrsMVC87gvtXEe/AOLUuRLEbjU
+QqGKP2+mvliizU844a11B/bXViXhkNZw66ESAuqOw0dVPTo6aPLhuSDDrGEHQNTz
+BsUseiUq795DqTE/5sL3lbTPrT1hKoIJFixYvaYBdygDgovsAi33nPn8UPitS5aD
+zGJ/ByDdnI4QW15NN1diMp+BuvOCWLpMaxVQNflARlxxtfIfnvaKjgccr1YOyT91
+5tlbdr0y05r1uYZjYU5/4llilypUgzzQB1jeetr06fOpVvswAAWQJiS5JJU+V84W
+r4sIBhZzGw1uvqNxIBWtk85W1ya7CmisRO7PZYW5lsLxZ48BxZhr45ar6/iDYreT
+OOeP1f9GoJW0X+FAocNc/pobY02MhB/BXV1LRM3lY+yOK3sskspnMihMqP7tSfop
+iJRtfXMLNdRRJFVZ5VSr1MCDK5RPQaqVsuvdtVqOJr1RwAQPjjzisOh+NYmvabkd
+cVxjSV5DX0fMODr2l7cAXxJjZsAs6AlnQOGPg/NXKdkZiEYEEBECAAYFAk+5cEAA
+CgkQ2kQg7SiJlcjJIACgsGpIw9ShLBciO3Y349ja7ILjC8cAnRrqoIpFxUrSIJF/
+8+w98auNwA18
+=uX7x
+-----END PGP PUBLIC KEY BLOCK-----
--- /dev/null
+---
+
+- name: "update apt cache"
+ action: apt update-cache=yes
--- /dev/null
+---
+## Common to all the Ceph Debian nodes
+#
+
+- name: Fail on unsupported system
+ fail: msg="System not supported {{ ansible_system }}"
+ when: ansible_system not in ['Linux']
+
+- name: Fail on unsupported architecture
+ fail: msg="Architecture not supported {{ ansible_architecture }}"
+ when: ansible_architecture not in ['x86_64']
+
+- name: Fail on unsupported distribution
+ fail: msg="Distribution not supported {{ ansible_os_family }}"
+ when: ansible_os_family not in ['Debian', 'RedHat']
+
+- name: Install dependancies
+ apt: pkg={{ item }} state=present update_cache=yes cache_valid_time=3600 # we update the cache just in case...
+ with_items:
+ - python-pycurl
+ - ntp
+
+- name: Install the Ceph key
+ apt_key: data="{{ lookup('file', 'cephrelease.asc') }}" state=present
+
+- name: Add Ceph repository
+ apt_repository: repo='deb http://ceph.com/debian-{{ ceph_release }}/ {{ ansible_lsb.codename }} main' state=present
+
+- name: Install Ceph
+ apt: pkg={{ item }} state=latest
+ with_items:
+ - ceph
+ - ceph-common #|
+ - ceph-fs-common #|--> yes, they are already all dependancies from 'ceph'
+ - ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
+ - ceph-mds #|--> they don't get update so we need to force them
+ - libcephfs1 #|
+
+- name: Generate ceph configuration file
+ template: src=roles/common/templates/ceph.conf.j2 dest=/etc/ceph/ceph.conf owner=root group=root mode=0644
--- /dev/null
+---
+## Common to all the Ceph RedHat nodes
+#
+
+- name: Fail on unsupported system
+ fail: msg="System not supported {{ ansible_system }}"
+ when: ansible_system not in ['Linux']
+
+- name: Fail on unsupported architecture
+ fail: msg="Architecture not supported {{ ansible_architecture }}"
+ when: ansible_architecture not in ['x86_64']
+
+- name: Fail on unsupported distribution
+ fail: msg="Distribution not supported {{ ansible_os_family }}"
+ when: ansible_os_family not in ['Debian', 'RedHat']
+
+- name: Install dependancies
+ yum: name={{ item }} state=present
+ with_items:
+ - python-pycurl
+ - ntp
+
+- name: Install the Ceph key
+ rpm_key: key=cephrelease.asc state=present
+
+- name: Add Ceph repository
+ command: rpm -U http://ceph.com/rpm-{{ ceph_release }}/{{ redhat_distro }}/noarch/ceph-release-1-0.el6.noarch.rpm creates=/etc/yum.repos.d/ceph.repo
+
+- name: Install Ceph
+ yum: name=ceph state=latest
+
+- name: Generate Ceph configuration file
+ template: src=roles/common/templates/ceph.conf.j2 dest=/etc/ceph/ceph.conf owner=root group=root mode=0644
--- /dev/null
+---
+## Check OS family
+#
+
+- include: RedHat.yml
+ when: ansible_os_family == 'RedHat'
+- include: Debian.yml
+ when: ansible_os_family == 'Debian'
--- /dev/null
+# {{ ansible_managed }}
+
+[global]
+{% if cephx %}
+ auth cluster required = cephx
+ auth service required = cephx
+ auth client required = cephx
+{% else %}
+ auth cluster required = none
+ auth service required = none
+ auth client required = none
+ auth supported = none
+{% endif %}
+ fsid = {{ fsid }}
+{% if pool_default_pg_num is defined %}
+ osd pool default pg num = {{ pool_default_pg_num }}
+{% endif %}
+{% if pool_default_pgp_num is defined %}
+ osd pool default pgp num = {{ pool_default_pgp_num }}
+{% endif %}
+{% if pool_default_size is defined %}
+ osd pool default size = {{ pool_default_size }}
+{% endif %}
+{% if pool_default_min_size is defined %}
+ osd pool default min size = {{ pool_default_min_size }}
+{% endif %}
+{% if pool_default_crush_rule is defined %}
+ osd pool default crush rule = {{ pool_default_crush_rule }}
+{% endif %}
+{% if common_single_host_mode is defined %}
+ osd crush chooseleaf type = 0
+{% endif %}
+
+[mon]
+ mon osd down out interval = {{ mon_osd_down_out_interval }}
+ mon osd min down reporters = {{ mon_osd_min_down_reporters }}
+{% for host in groups['mons'] %}
+ {% if hostvars[host]['ansible_hostname'] is defined %}
+ [mon.{{ hostvars[host]['ansible_hostname'] }}]
+ host = {{ hostvars[host]['ansible_hostname'] }}
+ mon addr = {{ hostvars[host]['ansible_' + monitor_interface ]['ipv4']['address'] }}
+ {% endif %}
+{% endfor %}
+
+[osd]
+{% if osd_mkfs_type is defined %}
+ osd mkfs type = {{ osd_mkfs_type }}
+{% endif %}
+ osd journal size = {{ journal_size }}
+{% if cluster_network is defined %}
+ cluster_network = {{ cluster_network }}
+{% endif %}
+{% if public_network is defined %}
+ public_network = {{ public_network }}
+{% endif %}
+ osd mon heartbeat interval = {{ osd_mon_heartbeat_interval }}
+ # Performance tuning
+ filestore merge threshold = {{ filestore_merge_threshold }}
+ filestore split multiple = {{ filestore_split_multiple }}
+ osd op threads = {{ osd_op_threads }}
+ # Recovery tuning
+ osd recovery max active = {{ osd_recovery_max_active }}
+ osd max backfills = {{ osd_max_backfills }}
+ osd recovery op priority = {{ osd_recovery_op_priority }}
+{% if mds %}
+[mds]
+{% for host in groups['mdss'] %}
+ {% if hostvars[host]['ansible_hostname'] is defined %}
+ [mds.{{ hostvars[host]['ansible_hostname'] }}]
+ host = {{ hostvars[host]['ansible_hostname'] }}
+ {% endif %}
+{% endfor %}
+{% endif %}
+
+{% if radosgw %}
+{% for host in groups['rgws'] %}
+{% if hostvars[host]['ansible_hostname'] is defined %}
+[client.radosgw.gateway]
+ {% if radosgw_dns_name is defined %}
+ rgw dns name = {{ radosgw_dns_name }}
+ {% endif %}
+ host = {{ hostvars[host]['ansible_hostname'] }}
+ keyring = /etc/ceph/keyring.radosgw.gateway
+ rgw socket path = /tmp/radosgw.sock
+ log file = /var/log/ceph/radosgw.log
+ rgw data = /var/lib/ceph/radosgw/{{ hostvars[host]['ansible_hostname'] }}
+ rgw print continue = false
+{% endif %}
+{% endfor %}
+{% endif %}
--- /dev/null
+---\r
+# You can override default vars defined in defaults/main.yml here,\r
+# but I would advice to use host or group vars instead \r
--- /dev/null
+---
+# Variables here are applicable to the current role
+
+## Ceph options
+#
+cephx: true
+
--- /dev/null
+---
+## Deploy Ceph metadata server(s)
+#
+
+- name: Copy MDS bootstrap key
+ copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring dest=/var/lib/ceph/bootstrap-mds/ceph.keyring owner=root group=root mode=600
+ when: cephx
+
+- name: Create MDS directory
+ action: file path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }} state=directory owner=root group=root mode=0644
+ when: cephx
+
+- name: Create MDS keyring
+ command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.{{ ansible_hostname }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring creates=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
+ when: cephx
+ changed_when: False
+
+- name: Set MDS key permissions
+ file: path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring mode=0600 owner=root group=root
+ when: cephx
+
+- name: Start and add that the MDS service to the init sequence
+ service: name=ceph state=started enabled=yes args=mds
--- /dev/null
+---\r
+# You can override default vars defined in defaults/main.yml here,\r
+# but I would advice to use host or group vars instead \r
--- /dev/null
+---
+# Variables here are applicable to the current role
+
+## Ceph options
+#
+cephx: true
+
+# Rados Gateway options
+# referenced in common role too.
+radosgw: true
--- /dev/null
+#!/bin/bash
+
+echo -n "Ceph state is: "
+/usr/bin/ceph health
+echo ""
--- /dev/null
+---
+## Deploy Ceph monitor(s)
+#
+
+- name: Create monitor initial keyring
+ command: ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} --create-keyring --name=mon. --add-key={{ monitor_secret }} --cap mon 'allow *' creates=/var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}
+
+- name: Set initial monitor key permissions
+ file: path=/var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} mode=0600 owner=root group=root
+
+- name: Create monitor directory
+ file: path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }} state=directory owner=root group=root mode=0644
+
+- name: Ceph monitor mkfs
+ command: ceph-mon --mkfs -i {{ ansible_hostname }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} creates=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/keyring
+
+- name: Start and add that the monitor service to the init sequence
+ service: name=ceph state=started enabled=yes args=mon
+
+# Wait for mon discovery and quorum resolution
+# the admin key is not instantanely created so we have to wait a bit
+#
+
+- name: If client.admin key exists
+ command: stat /etc/ceph/ceph.client.admin.keyring
+ register: result
+ until: result.rc == 0
+ changed_when: False
+
+- name: Create RGW keyring
+ command: ceph auth get-or-create client.radosgw.gateway osd 'allow rwx' mon 'allow rw' -o /etc/ceph/keyring.radosgw.gateway creates=/etc/ceph/keyring.radosgw.gateway
+ when: cephx and radosgw
+ changed_when: False
+
+- name: Copy keys to the ansible server
+ fetch: src={{ item }} dest=fetch/{{ fsid }}/{{ item }} flat=yes
+ when: cephx
+ with_items:
+ - /etc/ceph/ceph.client.admin.keyring # just in case another application needs it
+ - /var/lib/ceph/bootstrap-osd/ceph.keyring # this handles the non-colocation case
+ - /var/lib/ceph/bootstrap-mds/ceph.keyring
+ - /etc/ceph/keyring.radosgw.gateway
+
+- name: Drop in a motd script to report status when logging in
+ copy: src=precise/92-ceph dest=/etc/update-motd.d/92-ceph owner=root group=root mode=0755
+ when: ansible_distribution_release == 'precise'
--- /dev/null
+---\r
+# You can override default vars defined in defaults/main.yml here,\r
+# but I would advice to use host or group vars instead \r
--- /dev/null
+---
+# Variables here are applicable to the current role
+#
+
+## Ceph options
+#
+cephx: true
+
+# Devices to be used as OSDs
+# You can pre-provision disks that are not present yet.
+# Ansible will just skip them. Newly added disk will be
+# automatically configured during the next run.
+#
+
+
+# !! WARNING !!
+#
+# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
+#
+# !! WARNING !!
+
+
+# Declare devices
+# All the scenarii inherit from the following device declaration
+#
+devices: [ '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde']
+
+
+# I. First scenario: journal and osd_data on the same device
+# Use 'true' to enable this scenario
+# This will collocate both journal and data on the same disk
+# creating a partition at the beginning of the device
+#
+journal_collocation: true
+
+
+# II. Second scenario: single journal device for N OSDs
+# Use 'true' to enable this scenario
+#
+raw_journal: false
+raw_journal_device: /dev/sdb
+
+
+# III. Third scenario: N journal devices for N OSDs
+# Use 'true' to enable this scenario
+#
+# In the following example:
+# * sdd and sde will get sdb as a journal
+# * sdf and sdg will get sdc as a journal
+# While starting you have 2 options:
+# 1. Pre-allocate all the devices
+# 2. Progressively add new devices
+
+raw_multi_journal: false
+raw_journal_devices: [ '/dev/sdb', '/dev/sdb', '/dev/sdc', '/dev/sdc' ]
+
+
+# IV. Fourth scenario: use directory instead of disk for OSDs
+# Use 'true' to enable this scenario
+
+osd_directory: false
+osd_directories: [ '/var/lib/ceph/osd/mydir1', '/var/lib/ceph/osd/mydir2', '/var/lib/ceph/osd/mydir3', '/var/lib/ceph/osd/mydir4']
--- /dev/null
+---
+## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
+#
+
+- name: Install dependancies
+ apt: pkg=parted state=present
+ when: ansible_os_family == 'Debian'
+
+- name: Install dependancies
+ yum: name=parted state=present
+ when: ansible_os_family == 'RedHat'
+
+- name: Copy OSD bootstrap key
+ copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
+ when: cephx
+
+# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
+# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
+# it should exist we rc=0 and don't do anything unless we do something like --force
+# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
+# I believe it's safer
+#
+
+- name: Check if the device is a partition or a disk
+ shell: echo '{{ item }}' | egrep '/dev/[a-z]{3}[0-9]$'
+ ignore_errors: true
+ with_items: devices
+ register: ispartition
+ changed_when: False
+
+- name: If partition named 'ceph' exists
+ shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
+ ignore_errors: True
+ with_items: devices
+ register: parted
+ changed_when: False
+
+# Prepare means
+# - create GPT partition for a disk, or a loop label for a partition
+# - mark the partition with the ceph type uuid
+# - create a file system
+# - mark the fs as ready for ceph consumption
+# - entire data disk is used (one big partition)
+# - a new partition is added to the journal disk (so it can be easily shared)
+#
+
+# NOTE (leseb): the prepare process must be parallelized somehow...
+# if you have 64 disks with 4TB each, this will take a while
+# since Ansible will sequential process the loop
+
+# NOTE (alahouze): if the device is a partition, the parted command below has
+# failed, this is why we check if the device is a partition too.
+
+- name: Prepare OSD disk(s)
+ command: ceph-disk prepare {{ item.2 }}
+ when: (item.0.rc != 0 or item.1.rc != 0) and journal_collocation
+ ignore_errors: True
+ with_together:
+ - parted.results
+ - ispartition.results
+ - devices
+
+# Activate means:
+# - mount the volume in a temp location
+# - allocate an osd id (if needed)
+# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
+# - start ceph-osd
+#
+
+# This task is for disk devices only because of the explicit use of the first
+# partition.
+
+- name: Activate OSD(s) when device is a disk
+ command: ceph-disk activate {{ item.2 }}1
+ with_together:
+ - parted.results
+ - ispartition.results
+ - devices
+ when: item.0.rc == 0 and item.1.rc != 0
+ ignore_errors: True
+ changed_when: False
+
+# This task is for partitions because we don't explicitly use a partition.
+
+- name: Activate OSD(s) when device is a partition
+ command: ceph-disk activate {{ item.1 }}
+ with_together:
+ - ispartition.results
+ - devices
+ when: item.0.rc == 0
+ ignore_errors: True
+ changed_when: False
+
+- name: Start and add that the OSD service to the init sequence
+ service: name=ceph state=started enabled=yes
--- /dev/null
+---
+## Deploy Ceph Oject Storage Daemon(s)
+#
+
+- include: journal_collocation.yml
+ when: journal_collocation
+
+- include: raw_journal.yml
+ when: raw_journal
+
+- include: raw_multi_journal.yml
+ when: raw_multi_journal
+
+- include: osd_directory.yml
+ when: osd_directory
--- /dev/null
+---
+## SCENARIO 4: USE A DIRECTORY INSTEAD OF A DISK FOR OSD
+#
+
+- name: Install dependancies
+ apt: pkg=parted state=present
+ when: ansible_os_family == 'Debian'
+
+- name: Install dependancies
+ yum: name=parted state=present
+ when: ansible_os_family == 'RedHat'
+
+- name: Copy OSD bootstrap key
+ copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
+ when: cephx
+
+# NOTE (leseb): we do not check the filesystem underneath the directory
+# so it is really up to you to configure this properly.
+# Declaring more than one directory on the same filesystem will confuse Ceph.
+
+- name: Create OSD directories
+ file: path={{ item }} state=directory owner=root group=root
+ with_items: osd_directories
+
+# Prepare means
+# - create GPT partition
+# - mark the partition with the ceph type uuid
+# - create a file system
+# - mark the fs as ready for ceph consumption
+# - entire data disk is used (one big partition)
+# - a new partition is added to the journal disk (so it can be easily shared)
+#
+
+# NOTE (leseb): the prepare process must be parallelized somehow...
+# if you have 64 disks with 4TB each, this will take a while
+# since Ansible will sequential process the loop
+
+- name: Prepare OSD disk(s)
+ command: ceph-disk prepare {{ item }}
+ when: osd_directory
+ with_items: osd_directories
+
+# Activate means:
+# - mount the volume in a temp location
+# - allocate an osd id (if needed)
+# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
+# - start ceph-osd
+#
+
+- name: Activate OSD(s)
+ command: ceph-disk activate {{ item }}
+ with_items: osd_directories
+ changed_when: False
+
+- name: Start and add that the OSD service to the init sequence
+ service: name=ceph state=started enabled=yes
--- /dev/null
+---
+## SCENARIO 2: SINGLE JOURNAL DEVICE FOR N OSDS
+#
+
+- name: Install dependancies
+ apt: pkg=parted state=present
+ when: ansible_os_family == 'Debian'
+
+- name: Install dependancies
+ yum: name=parted state=present
+ when: ansible_os_family == 'RedHat'
+
+- name: Copy OSD bootstrap key
+ copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
+ when: cephx
+
+# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
+# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
+# it should exist we rc=0 and don't do anything unless we do something like --force
+# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
+# I believe it's safer
+#
+
+- name: Check if the device is a partition or a disk
+ shell: echo '{{ item }}' | egrep '/dev/[a-z]{3}[0-9]$'
+ ignore_errors: true
+ with_items: devices
+ register: ispartition
+ changed_when: False
+
+- name: If partition named 'ceph' exists
+ shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
+ ignore_errors: True
+ with_items: devices
+ register: parted
+ changed_when: False
+
+# Prepare means
+# - create GPT partition for a disk, or a loop label for a partition
+# - mark the partition with the ceph type uuid
+# - create a file system
+# - mark the fs as ready for ceph consumption
+# - entire data disk is used (one big partition)
+# - a new partition is added to the journal disk (so it can be easily shared)
+#
+
+# NOTE (leseb): the prepare process must be parallelized somehow...
+# if you have 64 disks with 4TB each, this will take a while
+# since Ansible will sequential process the loop
+
+# NOTE (alahouze): if the device is a partition, the parted command below has
+# failed, this is why we check if the device is a partition too.
+
+- name: Prepare OSD disk(s)
+ command: ceph-disk prepare {{ item.2 }} {{ raw_journal_device }}
+ when: (item.0.rc != 0 or item.1.rc != 0) and raw_journal
+ ignore_errors: True
+ with_together:
+ - parted.results
+ - ispartition.results
+ - devices
+
+# Activate means:
+# - mount the volume in a temp location
+# - allocate an osd id (if needed)
+# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
+# - start ceph-osd
+#
+
+# This task is for disk devices only because of the explicit use of the first
+# partition.
+
+- name: Activate OSD(s) when device is a disk
+ command: ceph-disk activate {{ item.2 }}1
+ with_together:
+ - parted.results
+ - ispartition.results
+ - devices
+ when: item.0.rc == 0 and item.1.rc != 0
+ ignore_errors: True
+ changed_when: False
+
+# This task is for partitions because we don't explicitly use a partition.
+
+- name: Activate OSD(s) when device is a partition
+ command: ceph-disk activate {{ item.1 }}
+ with_together:
+ - ispartition.results
+ - devices
+ when: item.0.rc == 0
+ ignore_errors: True
+ changed_when: False
+
+- name: Start and add that the OSD service to the init sequence
+ service: name=ceph state=started enabled=yes
--- /dev/null
+---
+## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS
+#
+
+- name: Install dependancies
+ apt: pkg=parted state=present
+ when: ansible_os_family == 'Debian'
+
+- name: Install dependancies
+ yum: name=parted state=present
+ when: ansible_os_family == 'RedHat'
+
+- name: Copy OSD bootstrap key
+ copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
+ when: cephx
+
+# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
+# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
+# it should exist we rc=0 and don't do anything unless we do something like --force
+# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
+# I believe it's safer
+#
+
+- name: Check if the device is a partition or a disk
+ shell: echo '{{ item }}' | egrep '/dev/[a-z]{3}[0-9]$'
+ ignore_errors: true
+ with_items: devices
+ register: ispartition
+ changed_when: False
+
+- name: If partition named 'ceph' exists
+ shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
+ ignore_errors: True
+ with_items: devices
+ register: parted
+ changed_when: False
+
+# Prepare means
+# - create GPT partition for a disk, or a loop label for a partition
+# - mark the partition with the ceph type uuid
+# - create a file system
+# - mark the fs as ready for ceph consumption
+# - entire data disk is used (one big partition)
+# - a new partition is added to the journal disk (so it can be easily shared)
+#
+
+# NOTE (leseb): the prepare process must be parallelized somehow...
+# if you have 64 disks with 4TB each, this will take a while
+# since Ansible will sequential process the loop
+
+# NOTE (alahouze): if the device is a partition, the parted command below has
+# failed, this is why we check if the device is a partition too.
+
+- name: Prepare OSD disk(s)
+ command: ceph-disk prepare {{ item.2 }} {{ item.3 }}
+ when: (item.0.rc != 0 or item.1.rc != 0) and raw_multi_journal
+ ignore_errors: True
+ with_together:
+ - parted.results
+ - ispartition.results
+ - devices
+ - raw_journal_devices
+
+# Activate means:
+# - mount the volume in a temp location
+# - allocate an osd id (if needed)
+# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
+# - start ceph-osd
+#
+
+# This task is for disk devices only because of the explicit use of the first
+# partition.
+
+- name: Activate OSD(s) when device is a disk
+ command: ceph-disk activate {{ item.2 }}1
+ with_together:
+ - parted.results
+ - ispartition.results
+ - devices
+ when: item.0.rc == 0 and item.1.rc != 0
+ ignore_errors: True
+ changed_when: False
+
+# This task is for partitions because we don't explicitly use a partition.
+
+- name: Activate OSD(s) when device is a partition
+ command: ceph-disk activate {{ item.1 }}
+ with_together:
+ - ispartition.results
+ - devices
+ when: item.0.rc == 0
+ ignore_errors: True
+ changed_when: False
+
+- name: Start and add that the OSD service to the init sequence
+ service: name=ceph state=started enabled=yes
--- /dev/null
+---\r
+# You can override default vars defined in defaults/main.yml here,\r
+# but I would advice to use host or group vars instead \r
--- /dev/null
+---
+# Variables here are applicable to the current role
+
+
+## Ceph options
+#
+cephx: true
+
+# Rados Gateway options
+redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
+email_address: foo@bar.com
+
--- /dev/null
+#!/bin/sh
+exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway
--- /dev/null
+---
+- name: restart apache2
+ service: name=apache2 state=restarted enabled=yes
+ when: ansible_os_family == 'Debian'
+
+- name: restart apache2
+ service: name=httpd state=restarted enabled=yes
+ when: ansible_os_family == 'RedHat'
--- /dev/null
+---
+## Deploy RADOS Gateway
+#
+
+- name: Copy RGW bootstrap key
+ copy: src=fetch/{{ fsid }}/etc/ceph/keyring.radosgw.gateway dest=/etc/ceph/keyring.radosgw.gateway owner=root group=root mode=600
+ when: cephx
+
+- name: Set RGW bootstrap key permissions
+ file: path=/etc/ceph/keyring.radosgw.gateway mode=0600 owner=root group=root
+ when: cephx
+
+#- name: Add optimized version of the apache2 package repository
+# apt_repository: repo='deb http://gitbuilder.ceph.com/apache2-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main' state=present
+#
+#- name: Add optimized version of the fastcgi package repository
+# apt_repository: repo='deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main' state=present
+#
+
+- name: Add Ceph extra
+ apt_repository: repo='deb http://ceph.com/packages/ceph-extras/debian {{ ansible_lsb.codename }} main' state=present
+
+- name: Install Apache, fastcgi and Rados Gateway
+ apt: pkg={{ item }} state=present
+ with_items:
+ - apache2
+ - libapache2-mod-fastcgi
+ - radosgw
+
+## Prepare Apache
+#
+
+- name: Install default httpd.conf
+ template: src=httpd.conf dest=/etc/apache2/httpd.conf owner=root group=root
+
+- name: Enable some apache mod rewrite and fastcgi
+ command: "{{ item }}"
+ with_items:
+ - a2enmod rewrite
+ - a2enmod fastcgi
+
+- name: Install Rados Gateway vhost
+ template: src=rgw.conf dest=/etc/apache2/sites-available/rgw.conf owner=root group=root
+
+## Prepare RGW
+#
+
+- name: Create RGW directory
+ file: path=/var/lib/ceph/radosgw/{{ ansible_fqdn }} state=directory owner=root group=root mode=0644
+
+- name: Enable Rados Gateway vhost and disable default site
+ command: "{{ item }}"
+ with_items:
+ - a2ensite rgw.conf
+ - a2dissite default
+ notify:
+ - restart apache2
+
+- name: Install s3gw.fcgi script
+ copy: src=s3gw.fcgi dest=/var/www/s3gw.fcgi mode=0555 owner=root group=root
+
+## If we don't perform this check Ansible will start multiple instance of radosgw
+- name: Check if RGW is started
+ command: /etc/init.d/radosgw status
+ register: rgwstatus
+ ignore_errors: True
+
+- name: Start RGW
+ command: /etc/init.d/radosgw start
+ when: rgwstatus.rc != 0
--- /dev/null
+---
+## Deploy RADOS Gateway
+#
+
+- name: Copy RGW bootstrap key
+ copy: src=fetch/{{ fsid }}/etc/ceph/keyring.radosgw.gateway dest=/etc/ceph/keyring.radosgw.gateway owner=root group=root mode=600
+ when: cephx
+
+- name: Set RGW bootstrap key permissions
+ file: path=/etc/ceph/keyring.radosgw.gateway mode=0644 owner=root group=root
+ when: cephx
+
+- name: Add Ceph extra
+ template: src=ceph-extra.repo dest=/etc/yum.repos.d owner=root group=root
+
+- name: Add special fastcgi repository key
+ rpm_key: key=http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt
+
+- name: Add special fastcgi repository
+ command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
+
+- name: Install Apache, fastcgi, and Rados Gateway
+ yum: name={{ item }} state=present
+ with_items:
+ - httpd
+ - mod_fastcgi
+ - mod_fcgid
+ - ceph-radosgw
+
+## Prepare Apache
+#
+
+- name: Install Rados Gateway vhost
+ template: src=rgw.conf dest=/etc/httpd/conf.d/rgw.conf owner=root group=root
+
+## Prepare RGW
+#
+
+- name: Create RGW directory
+ file: path=/var/lib/ceph/radosgw/{{ ansible_fqdn }} state=directory owner=root group=root mode=0644
+
+- name: Install s3gw.fcgi script
+ copy: src=s3gw.fcgi dest=/var/www/s3gw.fcgi mode=0555 owner=root group=root
+
+- name: Disable default site
+ shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf
+ changed_when: False
+ notify:
+ - restart apache2
+
+## If we don't perform this check Ansible will start multiple instance of radosgw
+- name: Check if RGW is started
+ command: /etc/init.d/ceph-radosgw status
+ register: rgwstatus
+ ignore_errors: True
+
+- name: Start RGW
+ command: /etc/init.d/ceph-radosgw start
+ when: rgwstatus.rc != 0
--- /dev/null
+---
+## Check OS family
+#
+
+- include: RedHat.yml
+ when: ansible_os_family == 'RedHat'
+
+- include: Debian.yml
+ when: ansible_os_family == 'Debian'
--- /dev/null
+# {{ ansible_managed }}
+
+[ceph-extras]
+name=Ceph Extras Packages
+baseurl=http://ceph.com/packages/ceph-extras/rpm/{{ redhat_distro_ceph_extra }}/$basearch
+enabled=1
+priority=2
+gpgcheck=1
+type=rpm-md
+gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
+
+{% if (redhat_distro_ceph_extra != "centos6.4" and redhat_distro_ceph_extra != "rhel6.4" and redhat_distro_ceph_extra != "rhel6.5") %}
+[ceph-extras-noarch]
+name=Ceph Extras noarch
+baseurl=http://ceph.com/packages/ceph-extras/rpm/{{ redhat_distro_ceph_extra }}/noarch
+enabled=1
+priority=2
+gpgcheck=1
+type=rpm-md
+gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
+{% endif %}
+
+[ceph-extras-source]
+name=Ceph Extras Sources
+baseurl=http://ceph.com/packages/ceph-extras/rpm/{{ redhat_distro_ceph_extra }}/SRPMS
+enabled=1
+priority=2
+gpgcheck=1
+type=rpm-md
+gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
--- /dev/null
+# {{ ansible_managed }}
+
+ServerName {{ ansible_hostname }}
--- /dev/null
+# {{ ansible_managed }}
+
+FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock
+<VirtualHost *:80>
+ ServerName {{ ansible_hostname }}
+ ServerAdmin {{ email_address }}@{{ ansible_fqdn }}
+ DocumentRoot /var/www
+
+ <IfModule mod_fastcgi.c>
+ <Directory /var/www>
+ Options +ExecCGI
+ AllowOverride All
+ SetHandler fastcgi-script
+ Order allow,deny
+ Allow from all
+ AuthBasicAuthoritative Off
+ </Directory>
+ </IfModule>
+
+ RewriteEngine On
+ RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /s3gw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
+
+</VirtualHost>
--- /dev/null
+---\r
+# You can override default vars defined in defaults/main.yml here,\r
+# but I would advice to use host or group vars instead \r
+++ /dev/null
----
-# Variables here are applicable to the current role
-
-## Setup options
-#
-distro_release: "{{ facter_lsbdistcodename }}" # Seems to be not used.
-apt_key: http://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
-ceph_release: emperor
-redhat_distro: el6 # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11
-
-## Ceph options
-#
-cephx: true
-## Monitor options
-#
-monitor_interface: eth1
-mon_osd_down_out_interval: 600
-mon_osd_min_down_reporters: 7 # number of OSDs per host + 1
-
-## MDS options
-#
-mds: true # disable mds configuration in ceph.conf
-
-# Rados Gateway options
-#
-radosgw: true # referenced in mon role too.
-#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
-
-## OSD options
-#
-journal_size: 100
-pool_default_pg_num: 128
-pool_default_pgp_num: 128
-pool_default_size: 2
-cluster_network: 192.168.0.0/24
-public_network: 192.168.0.0/24
-osd_mkfs_type: xfs
-osd_mon_heartbeat_interval: 30
-# Performance tuning
-filestore_merge_threshold: 40
-filestore_split_multiple: 8
-osd_op_threads: 8
-# Recovery tuning
-osd_recovery_max_active: 5
-osd_max_backfills: 2
-osd_recovery_op_priority: 2
-
-## Testing mode
-# enable this mode _only_ when you have a single node
-common_single_host_mode: true
+++ /dev/null
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1.4.11 (GNU/Linux)
-
-mQINBE+5bugBEADP31ZaQNvhOOQxjDwL/VYDLhtaGq4Q74FCY23uSQAMboKwo4JB
-Te2JTSwBwU/RAPuWTrlKaQBPS30VF5SJN9t16llmoBWqhtBVf/lhQonC/28dTB6D
-KR7Ahiz4Nv2g9m1sLau86JblQuODo8vWHXxahYSLQSyyxIXnlE4K3c1k0S4feLqu
-ZxFtc2cFrQ/bUX9zXg6PXjDVAfY2R+x1JKGkVO/iwP+cjS1tCbvzdKcnQJEXpBwd
-yHvDBuF3IjuR9JgrBhb1ALqexhFKHzG1kHFfOZ3DLVohig68lfyjCepGgo0BPOyy
-S3Yk0QMumEaj9zRJurg49zWemX05XiBGt8SeCFxNUjXGYDIzSQ30K8fXmyjB74CW
-EUDUuTpTt7oZF9jKCjfKmQwvW4GgJ4J0FSwiorXPK27didjLJCnkTt43v0ZETMRW
-aADtiKFHl7lICuRmeXbd+6VkVqmoOz7ialMHnZ2KrHlqTcTPMd4llC4ayi2qS6Qb
-dIi1g9fa5YMS6I7yGxmW4AWwNy7SE8DsTja0aGFR9k432r+Vxtr52jrmP2vVexva
-CVaQkdk2/KEY3MjCPngiZwoTcOONYvNMvQaPrUtRuatcWJOgWsQVedY/UBxk968n
-JzfnNDngbcYDRnOD8wLWyBGyYbOdg1ucckLXFEtPVXoRER5JHMcYhyh+/QARAQAB
-tCRDZXBoIFJlbGVhc2UgS2V5IDxzYWdlQG5ld2RyZWFtLm5ldD6JAjgEEwECACIF
-Ak+5bugCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEH6/3V0X7TFtSjEP
-/A2pazEPwXrlQAHAjcXaFcPguKnXFrXRfbLpM9aZPR5gxH8mWl9RhEW/nL5pBf6A
-Tx7lQ4F/h9bDlf4/bejuxUflkrJEPVWkyPf3hvImjSBs+LBTk4OkpUJwYd9AynsG
-551Q0+6qxFfRVLCR6rLPHbMquXsKHROsSumEGUNrsMVC87gvtXEe/AOLUuRLEbjU
-QqGKP2+mvliizU844a11B/bXViXhkNZw66ESAuqOw0dVPTo6aPLhuSDDrGEHQNTz
-BsUseiUq795DqTE/5sL3lbTPrT1hKoIJFixYvaYBdygDgovsAi33nPn8UPitS5aD
-zGJ/ByDdnI4QW15NN1diMp+BuvOCWLpMaxVQNflARlxxtfIfnvaKjgccr1YOyT91
-5tlbdr0y05r1uYZjYU5/4llilypUgzzQB1jeetr06fOpVvswAAWQJiS5JJU+V84W
-r4sIBhZzGw1uvqNxIBWtk85W1ya7CmisRO7PZYW5lsLxZ48BxZhr45ar6/iDYreT
-OOeP1f9GoJW0X+FAocNc/pobY02MhB/BXV1LRM3lY+yOK3sskspnMihMqP7tSfop
-iJRtfXMLNdRRJFVZ5VSr1MCDK5RPQaqVsuvdtVqOJr1RwAQPjjzisOh+NYmvabkd
-cVxjSV5DX0fMODr2l7cAXxJjZsAs6AlnQOGPg/NXKdkZiEYEEBECAAYFAk+5cEAA
-CgkQ2kQg7SiJlcjJIACgsGpIw9ShLBciO3Y349ja7ILjC8cAnRrqoIpFxUrSIJF/
-8+w98auNwA18
-=uX7x
------END PGP PUBLIC KEY BLOCK-----
+++ /dev/null
----
-
-- name: "update apt cache"
- action: apt update-cache=yes
+++ /dev/null
----
-## Common to all the Ceph Debian nodes
-#
-
-- name: Fail on unsupported system
- fail: msg="System not supported {{ ansible_system }}"
- when: ansible_system not in ['Linux']
-
-- name: Fail on unsupported architecture
- fail: msg="Architecture not supported {{ ansible_architecture }}"
- when: ansible_architecture not in ['x86_64']
-
-- name: Fail on unsupported distribution
- fail: msg="Distribution not supported {{ ansible_os_family }}"
- when: ansible_os_family not in ['Debian', 'RedHat']
-
-- name: Install dependancies
- apt: pkg={{ item }} state=present update_cache=yes cache_valid_time=3600 # we update the cache just in case...
- with_items:
- - python-pycurl
- - ntp
-
-- name: Install the Ceph key
- apt_key: data="{{ lookup('file', 'cephrelease.asc') }}" state=present
-
-- name: Add Ceph repository
- apt_repository: repo='deb http://ceph.com/debian-{{ ceph_release }}/ {{ ansible_lsb.codename }} main' state=present
-
-- name: Install Ceph
- apt: pkg={{ item }} state=latest
- with_items:
- - ceph
- - ceph-common #|
- - ceph-fs-common #|--> yes, they are already all dependancies from 'ceph'
- - ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
- - ceph-mds #|--> they don't get update so we need to force them
- - libcephfs1 #|
-
-- name: Generate ceph configuration file
- template: src=roles/common/templates/ceph.conf.j2 dest=/etc/ceph/ceph.conf owner=root group=root mode=0644
+++ /dev/null
----
-## Common to all the Ceph RedHat nodes
-#
-
-- name: Fail on unsupported system
- fail: msg="System not supported {{ ansible_system }}"
- when: ansible_system not in ['Linux']
-
-- name: Fail on unsupported architecture
- fail: msg="Architecture not supported {{ ansible_architecture }}"
- when: ansible_architecture not in ['x86_64']
-
-- name: Fail on unsupported distribution
- fail: msg="Distribution not supported {{ ansible_os_family }}"
- when: ansible_os_family not in ['Debian', 'RedHat']
-
-- name: Install dependancies
- yum: name={{ item }} state=present
- with_items:
- - python-pycurl
- - ntp
-
-- name: Install the Ceph key
- rpm_key: key=cephrelease.asc state=present
-
-- name: Add Ceph repository
- command: rpm -U http://ceph.com/rpm-{{ ceph_release }}/{{ redhat_distro }}/noarch/ceph-release-1-0.el6.noarch.rpm creates=/etc/yum.repos.d/ceph.repo
-
-- name: Install Ceph
- yum: name=ceph state=latest
-
-- name: Generate Ceph configuration file
- template: src=roles/common/templates/ceph.conf.j2 dest=/etc/ceph/ceph.conf owner=root group=root mode=0644
+++ /dev/null
----
-## Check OS family
-#
-
-- include: RedHat.yml
- when: ansible_os_family == 'RedHat'
-- include: Debian.yml
- when: ansible_os_family == 'Debian'
+++ /dev/null
-# {{ ansible_managed }}
-
-[global]
-{% if cephx %}
- auth cluster required = cephx
- auth service required = cephx
- auth client required = cephx
-{% else %}
- auth cluster required = none
- auth service required = none
- auth client required = none
- auth supported = none
-{% endif %}
- fsid = {{ fsid }}
-{% if pool_default_pg_num is defined %}
- osd pool default pg num = {{ pool_default_pg_num }}
-{% endif %}
-{% if pool_default_pgp_num is defined %}
- osd pool default pgp num = {{ pool_default_pgp_num }}
-{% endif %}
-{% if pool_default_size is defined %}
- osd pool default size = {{ pool_default_size }}
-{% endif %}
-{% if pool_default_min_size is defined %}
- osd pool default min size = {{ pool_default_min_size }}
-{% endif %}
-{% if pool_default_crush_rule is defined %}
- osd pool default crush rule = {{ pool_default_crush_rule }}
-{% endif %}
-{% if common_single_host_mode is defined %}
- osd crush chooseleaf type = 0
-{% endif %}
-
-[mon]
- mon osd down out interval = {{ mon_osd_down_out_interval }}
- mon osd min down reporters = {{ mon_osd_min_down_reporters }}
-{% for host in groups['mons'] %}
- {% if hostvars[host]['ansible_hostname'] is defined %}
- [mon.{{ hostvars[host]['ansible_hostname'] }}]
- host = {{ hostvars[host]['ansible_hostname'] }}
- mon addr = {{ hostvars[host]['ansible_' + monitor_interface ]['ipv4']['address'] }}
- {% endif %}
-{% endfor %}
-
-[osd]
-{% if osd_mkfs_type is defined %}
- osd mkfs type = {{ osd_mkfs_type }}
-{% endif %}
- osd journal size = {{ journal_size }}
-{% if cluster_network is defined %}
- cluster_network = {{ cluster_network }}
-{% endif %}
-{% if public_network is defined %}
- public_network = {{ public_network }}
-{% endif %}
- osd mon heartbeat interval = {{ osd_mon_heartbeat_interval }}
- # Performance tuning
- filestore merge threshold = {{ filestore_merge_threshold }}
- filestore split multiple = {{ filestore_split_multiple }}
- osd op threads = {{ osd_op_threads }}
- # Recovery tuning
- osd recovery max active = {{ osd_recovery_max_active }}
- osd max backfills = {{ osd_max_backfills }}
- osd recovery op priority = {{ osd_recovery_op_priority }}
-{% if mds %}
-[mds]
-{% for host in groups['mdss'] %}
- {% if hostvars[host]['ansible_hostname'] is defined %}
- [mds.{{ hostvars[host]['ansible_hostname'] }}]
- host = {{ hostvars[host]['ansible_hostname'] }}
- {% endif %}
-{% endfor %}
-{% endif %}
-
-{% if radosgw %}
-{% for host in groups['rgws'] %}
-{% if hostvars[host]['ansible_hostname'] is defined %}
-[client.radosgw.gateway]
- {% if radosgw_dns_name is defined %}
- rgw dns name = {{ radosgw_dns_name }}
- {% endif %}
- host = {{ hostvars[host]['ansible_hostname'] }}
- keyring = /etc/ceph/keyring.radosgw.gateway
- rgw socket path = /tmp/radosgw.sock
- log file = /var/log/ceph/radosgw.log
- rgw data = /var/lib/ceph/radosgw/{{ hostvars[host]['ansible_hostname'] }}
- rgw print continue = false
-{% endif %}
-{% endfor %}
-{% endif %}
+++ /dev/null
----\r
-# You can override default vars defined in defaults/main.yml here,\r
-# but I would advice to use host or group vars instead \r
+++ /dev/null
----
-# Variables here are applicable to the current role
-
-## Ceph options
-#
-cephx: true
-
+++ /dev/null
----
-## Deploy Ceph metadata server(s)
-#
-
-- name: Copy MDS bootstrap key
- copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring dest=/var/lib/ceph/bootstrap-mds/ceph.keyring owner=root group=root mode=600
- when: cephx
-
-- name: Create MDS directory
- action: file path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }} state=directory owner=root group=root mode=0644
- when: cephx
-
-- name: Create MDS keyring
- command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.{{ ansible_hostname }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring creates=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
- when: cephx
- changed_when: False
-
-- name: Set MDS key permissions
- file: path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring mode=0600 owner=root group=root
- when: cephx
-
-- name: Start and add that the MDS service to the init sequence
- service: name=ceph state=started enabled=yes args=mds
+++ /dev/null
----\r
-# You can override default vars defined in defaults/main.yml here,\r
-# but I would advice to use host or group vars instead \r
+++ /dev/null
----
-# Variables here are applicable to the current role
-
-## Ceph options
-#
-cephx: true
-
-# Rados Gateway options
-# referenced in common role too.
-radosgw: true
+++ /dev/null
-#!/bin/bash
-
-echo -n "Ceph state is: "
-/usr/bin/ceph health
-echo ""
+++ /dev/null
----
-## Deploy Ceph monitor(s)
-#
-
-- name: Create monitor initial keyring
- command: ceph-authtool /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} --create-keyring --name=mon. --add-key={{ monitor_secret }} --cap mon 'allow *' creates=/var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }}
-
-- name: Set initial monitor key permissions
- file: path=/var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} mode=0600 owner=root group=root
-
-- name: Create monitor directory
- file: path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }} state=directory owner=root group=root mode=0644
-
-- name: Ceph monitor mkfs
- command: ceph-mon --mkfs -i {{ ansible_hostname }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ ansible_hostname }} creates=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/keyring
-
-- name: Start and add that the monitor service to the init sequence
- service: name=ceph state=started enabled=yes args=mon
-
-# Wait for mon discovery and quorum resolution
-# the admin key is not instantanely created so we have to wait a bit
-#
-
-- name: If client.admin key exists
- command: stat /etc/ceph/ceph.client.admin.keyring
- register: result
- until: result.rc == 0
- changed_when: False
-
-- name: Create RGW keyring
- command: ceph auth get-or-create client.radosgw.gateway osd 'allow rwx' mon 'allow rw' -o /etc/ceph/keyring.radosgw.gateway creates=/etc/ceph/keyring.radosgw.gateway
- when: cephx and radosgw
- changed_when: False
-
-- name: Copy keys to the ansible server
- fetch: src={{ item }} dest=fetch/{{ fsid }}/{{ item }} flat=yes
- when: cephx
- with_items:
- - /etc/ceph/ceph.client.admin.keyring # just in case another application needs it
- - /var/lib/ceph/bootstrap-osd/ceph.keyring # this handles the non-colocation case
- - /var/lib/ceph/bootstrap-mds/ceph.keyring
- - /etc/ceph/keyring.radosgw.gateway
-
-- name: Drop in a motd script to report status when logging in
- copy: src=precise/92-ceph dest=/etc/update-motd.d/92-ceph owner=root group=root mode=0755
- when: ansible_distribution_release == 'precise'
+++ /dev/null
----\r
-# You can override default vars defined in defaults/main.yml here,\r
-# but I would advice to use host or group vars instead \r
+++ /dev/null
----
-# Variables here are applicable to the current role
-#
-
-## Ceph options
-#
-cephx: true
-
-# Devices to be used as OSDs
-# You can pre-provision disks that are not present yet.
-# Ansible will just skip them. Newly added disk will be
-# automatically configured during the next run.
-#
-
-
-# !! WARNING !!
-#
-# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
-#
-# !! WARNING !!
-
-
-# Declare devices
-# All the scenarii inherit from the following device declaration
-#
-devices: [ '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde']
-
-
-# I. First scenario: journal and osd_data on the same device
-# Use 'true' to enable this scenario
-# This will collocate both journal and data on the same disk
-# creating a partition at the beginning of the device
-#
-journal_collocation: true
-
-
-# II. Second scenario: single journal device for N OSDs
-# Use 'true' to enable this scenario
-#
-raw_journal: false
-raw_journal_device: /dev/sdb
-
-
-# III. Third scenario: N journal devices for N OSDs
-# Use 'true' to enable this scenario
-#
-# In the following example:
-# * sdd and sde will get sdb as a journal
-# * sdf and sdg will get sdc as a journal
-# While starting you have 2 options:
-# 1. Pre-allocate all the devices
-# 2. Progressively add new devices
-
-raw_multi_journal: false
-raw_journal_devices: [ '/dev/sdb', '/dev/sdb', '/dev/sdc', '/dev/sdc' ]
-
-
-# IV. Fourth scenario: use directory instead of disk for OSDs
-# Use 'true' to enable this scenario
-
-osd_directory: false
-osd_directories: [ '/var/lib/ceph/osd/mydir1', '/var/lib/ceph/osd/mydir2', '/var/lib/ceph/osd/mydir3', '/var/lib/ceph/osd/mydir4']
+++ /dev/null
----
-## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
-#
-
-- name: Install dependancies
- apt: pkg=parted state=present
- when: ansible_os_family == 'Debian'
-
-- name: Install dependancies
- yum: name=parted state=present
- when: ansible_os_family == 'RedHat'
-
-- name: Copy OSD bootstrap key
- copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
- when: cephx
-
-# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
-# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
-# it should exist we rc=0 and don't do anything unless we do something like --force
-# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
-# I believe it's safer
-#
-
-- name: Check if the device is a partition or a disk
- shell: echo '{{ item }}' | egrep '/dev/[a-z]{3}[0-9]$'
- ignore_errors: true
- with_items: devices
- register: ispartition
- changed_when: False
-
-- name: If partition named 'ceph' exists
- shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
- ignore_errors: True
- with_items: devices
- register: parted
- changed_when: False
-
-# Prepare means
-# - create GPT partition for a disk, or a loop label for a partition
-# - mark the partition with the ceph type uuid
-# - create a file system
-# - mark the fs as ready for ceph consumption
-# - entire data disk is used (one big partition)
-# - a new partition is added to the journal disk (so it can be easily shared)
-#
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-# NOTE (alahouze): if the device is a partition, the parted command below has
-# failed, this is why we check if the device is a partition too.
-
-- name: Prepare OSD disk(s)
- command: ceph-disk prepare {{ item.2 }}
- when: (item.0.rc != 0 or item.1.rc != 0) and journal_collocation
- ignore_errors: True
- with_together:
- - parted.results
- - ispartition.results
- - devices
-
-# Activate means:
-# - mount the volume in a temp location
-# - allocate an osd id (if needed)
-# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
-# - start ceph-osd
-#
-
-# This task is for disk devices only because of the explicit use of the first
-# partition.
-
-- name: Activate OSD(s) when device is a disk
- command: ceph-disk activate {{ item.2 }}1
- with_together:
- - parted.results
- - ispartition.results
- - devices
- when: item.0.rc == 0 and item.1.rc != 0
- ignore_errors: True
- changed_when: False
-
-# This task is for partitions because we don't explicitly use a partition.
-
-- name: Activate OSD(s) when device is a partition
- command: ceph-disk activate {{ item.1 }}
- with_together:
- - ispartition.results
- - devices
- when: item.0.rc == 0
- ignore_errors: True
- changed_when: False
-
-- name: Start and add that the OSD service to the init sequence
- service: name=ceph state=started enabled=yes
+++ /dev/null
----
-## Deploy Ceph Oject Storage Daemon(s)
-#
-
-- include: journal_collocation.yml
- when: journal_collocation
-
-- include: raw_journal.yml
- when: raw_journal
-
-- include: raw_multi_journal.yml
- when: raw_multi_journal
-
-- include: osd_directory.yml
- when: osd_directory
+++ /dev/null
----
-## SCENARIO 4: USE A DIRECTORY INSTEAD OF A DISK FOR OSD
-#
-
-- name: Install dependancies
- apt: pkg=parted state=present
- when: ansible_os_family == 'Debian'
-
-- name: Install dependancies
- yum: name=parted state=present
- when: ansible_os_family == 'RedHat'
-
-- name: Copy OSD bootstrap key
- copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
- when: cephx
-
-# NOTE (leseb): we do not check the filesystem underneath the directory
-# so it is really up to you to configure this properly.
-# Declaring more than one directory on the same filesystem will confuse Ceph.
-
-- name: Create OSD directories
- file: path={{ item }} state=directory owner=root group=root
- with_items: osd_directories
-
-# Prepare means
-# - create GPT partition
-# - mark the partition with the ceph type uuid
-# - create a file system
-# - mark the fs as ready for ceph consumption
-# - entire data disk is used (one big partition)
-# - a new partition is added to the journal disk (so it can be easily shared)
-#
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-- name: Prepare OSD disk(s)
- command: ceph-disk prepare {{ item }}
- when: osd_directory
- with_items: osd_directories
-
-# Activate means:
-# - mount the volume in a temp location
-# - allocate an osd id (if needed)
-# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
-# - start ceph-osd
-#
-
-- name: Activate OSD(s)
- command: ceph-disk activate {{ item }}
- with_items: osd_directories
- changed_when: False
-
-- name: Start and add that the OSD service to the init sequence
- service: name=ceph state=started enabled=yes
+++ /dev/null
----
-## SCENARIO 2: SINGLE JOURNAL DEVICE FOR N OSDS
-#
-
-- name: Install dependancies
- apt: pkg=parted state=present
- when: ansible_os_family == 'Debian'
-
-- name: Install dependancies
- yum: name=parted state=present
- when: ansible_os_family == 'RedHat'
-
-- name: Copy OSD bootstrap key
- copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
- when: cephx
-
-# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
-# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
-# it should exist we rc=0 and don't do anything unless we do something like --force
-# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
-# I believe it's safer
-#
-
-- name: Check if the device is a partition or a disk
- shell: echo '{{ item }}' | egrep '/dev/[a-z]{3}[0-9]$'
- ignore_errors: true
- with_items: devices
- register: ispartition
- changed_when: False
-
-- name: If partition named 'ceph' exists
- shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
- ignore_errors: True
- with_items: devices
- register: parted
- changed_when: False
-
-# Prepare means
-# - create GPT partition for a disk, or a loop label for a partition
-# - mark the partition with the ceph type uuid
-# - create a file system
-# - mark the fs as ready for ceph consumption
-# - entire data disk is used (one big partition)
-# - a new partition is added to the journal disk (so it can be easily shared)
-#
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-# NOTE (alahouze): if the device is a partition, the parted command below has
-# failed, this is why we check if the device is a partition too.
-
-- name: Prepare OSD disk(s)
- command: ceph-disk prepare {{ item.2 }} {{ raw_journal_device }}
- when: (item.0.rc != 0 or item.1.rc != 0) and raw_journal
- ignore_errors: True
- with_together:
- - parted.results
- - ispartition.results
- - devices
-
-# Activate means:
-# - mount the volume in a temp location
-# - allocate an osd id (if needed)
-# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
-# - start ceph-osd
-#
-
-# This task is for disk devices only because of the explicit use of the first
-# partition.
-
-- name: Activate OSD(s) when device is a disk
- command: ceph-disk activate {{ item.2 }}1
- with_together:
- - parted.results
- - ispartition.results
- - devices
- when: item.0.rc == 0 and item.1.rc != 0
- ignore_errors: True
- changed_when: False
-
-# This task is for partitions because we don't explicitly use a partition.
-
-- name: Activate OSD(s) when device is a partition
- command: ceph-disk activate {{ item.1 }}
- with_together:
- - ispartition.results
- - devices
- when: item.0.rc == 0
- ignore_errors: True
- changed_when: False
-
-- name: Start and add that the OSD service to the init sequence
- service: name=ceph state=started enabled=yes
+++ /dev/null
----
-## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS
-#
-
-- name: Install dependancies
- apt: pkg=parted state=present
- when: ansible_os_family == 'Debian'
-
-- name: Install dependancies
- yum: name=parted state=present
- when: ansible_os_family == 'RedHat'
-
-- name: Copy OSD bootstrap key
- copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
- when: cephx
-
-# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
-# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
-# it should exist we rc=0 and don't do anything unless we do something like --force
-# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
-# I believe it's safer
-#
-
-- name: Check if the device is a partition or a disk
- shell: echo '{{ item }}' | egrep '/dev/[a-z]{3}[0-9]$'
- ignore_errors: true
- with_items: devices
- register: ispartition
- changed_when: False
-
-- name: If partition named 'ceph' exists
- shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
- ignore_errors: True
- with_items: devices
- register: parted
- changed_when: False
-
-# Prepare means
-# - create GPT partition for a disk, or a loop label for a partition
-# - mark the partition with the ceph type uuid
-# - create a file system
-# - mark the fs as ready for ceph consumption
-# - entire data disk is used (one big partition)
-# - a new partition is added to the journal disk (so it can be easily shared)
-#
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-# NOTE (alahouze): if the device is a partition, the parted command below has
-# failed, this is why we check if the device is a partition too.
-
-- name: Prepare OSD disk(s)
- command: ceph-disk prepare {{ item.2 }} {{ item.3 }}
- when: (item.0.rc != 0 or item.1.rc != 0) and raw_multi_journal
- ignore_errors: True
- with_together:
- - parted.results
- - ispartition.results
- - devices
- - raw_journal_devices
-
-# Activate means:
-# - mount the volume in a temp location
-# - allocate an osd id (if needed)
-# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
-# - start ceph-osd
-#
-
-# This task is for disk devices only because of the explicit use of the first
-# partition.
-
-- name: Activate OSD(s) when device is a disk
- command: ceph-disk activate {{ item.2 }}1
- with_together:
- - parted.results
- - ispartition.results
- - devices
- when: item.0.rc == 0 and item.1.rc != 0
- ignore_errors: True
- changed_when: False
-
-# This task is for partitions because we don't explicitly use a partition.
-
-- name: Activate OSD(s) when device is a partition
- command: ceph-disk activate {{ item.1 }}
- with_together:
- - ispartition.results
- - devices
- when: item.0.rc == 0
- ignore_errors: True
- changed_when: False
-
-- name: Start and add that the OSD service to the init sequence
- service: name=ceph state=started enabled=yes
+++ /dev/null
----\r
-# You can override default vars defined in defaults/main.yml here,\r
-# but I would advice to use host or group vars instead \r
+++ /dev/null
----
-# Variables here are applicable to the current role
-
-
-## Ceph options
-#
-cephx: true
-
-# Rados Gateway options
-redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
-email_address: foo@bar.com
-
+++ /dev/null
-#!/bin/sh
-exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway
+++ /dev/null
----
-- name: restart apache2
- service: name=apache2 state=restarted enabled=yes
- when: ansible_os_family == 'Debian'
-
-- name: restart apache2
- service: name=httpd state=restarted enabled=yes
- when: ansible_os_family == 'RedHat'
+++ /dev/null
----
-## Deploy RADOS Gateway
-#
-
-- name: Copy RGW bootstrap key
- copy: src=fetch/{{ fsid }}/etc/ceph/keyring.radosgw.gateway dest=/etc/ceph/keyring.radosgw.gateway owner=root group=root mode=600
- when: cephx
-
-- name: Set RGW bootstrap key permissions
- file: path=/etc/ceph/keyring.radosgw.gateway mode=0600 owner=root group=root
- when: cephx
-
-#- name: Add optimized version of the apache2 package repository
-# apt_repository: repo='deb http://gitbuilder.ceph.com/apache2-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main' state=present
-#
-#- name: Add optimized version of the fastcgi package repository
-# apt_repository: repo='deb http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/master {{ ansible_lsb.codename }} main' state=present
-#
-
-- name: Add Ceph extra
- apt_repository: repo='deb http://ceph.com/packages/ceph-extras/debian {{ ansible_lsb.codename }} main' state=present
-
-- name: Install Apache, fastcgi and Rados Gateway
- apt: pkg={{ item }} state=present
- with_items:
- - apache2
- - libapache2-mod-fastcgi
- - radosgw
-
-## Prepare Apache
-#
-
-- name: Install default httpd.conf
- template: src=httpd.conf dest=/etc/apache2/httpd.conf owner=root group=root
-
-- name: Enable some apache mod rewrite and fastcgi
- command: "{{ item }}"
- with_items:
- - a2enmod rewrite
- - a2enmod fastcgi
-
-- name: Install Rados Gateway vhost
- template: src=rgw.conf dest=/etc/apache2/sites-available/rgw.conf owner=root group=root
-
-## Prepare RGW
-#
-
-- name: Create RGW directory
- file: path=/var/lib/ceph/radosgw/{{ ansible_fqdn }} state=directory owner=root group=root mode=0644
-
-- name: Enable Rados Gateway vhost and disable default site
- command: "{{ item }}"
- with_items:
- - a2ensite rgw.conf
- - a2dissite default
- notify:
- - restart apache2
-
-- name: Install s3gw.fcgi script
- copy: src=s3gw.fcgi dest=/var/www/s3gw.fcgi mode=0555 owner=root group=root
-
-## If we don't perform this check Ansible will start multiple instance of radosgw
-- name: Check if RGW is started
- command: /etc/init.d/radosgw status
- register: rgwstatus
- ignore_errors: True
-
-- name: Start RGW
- command: /etc/init.d/radosgw start
- when: rgwstatus.rc != 0
+++ /dev/null
----
-## Deploy RADOS Gateway
-#
-
-- name: Copy RGW bootstrap key
- copy: src=fetch/{{ fsid }}/etc/ceph/keyring.radosgw.gateway dest=/etc/ceph/keyring.radosgw.gateway owner=root group=root mode=600
- when: cephx
-
-- name: Set RGW bootstrap key permissions
- file: path=/etc/ceph/keyring.radosgw.gateway mode=0644 owner=root group=root
- when: cephx
-
-- name: Add Ceph extra
- template: src=ceph-extra.repo dest=/etc/yum.repos.d owner=root group=root
-
-- name: Add special fastcgi repository key
- rpm_key: key=http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt
-
-- name: Add special fastcgi repository
- command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
-
-- name: Install Apache, fastcgi, and Rados Gateway
- yum: name={{ item }} state=present
- with_items:
- - httpd
- - mod_fastcgi
- - mod_fcgid
- - ceph-radosgw
-
-## Prepare Apache
-#
-
-- name: Install Rados Gateway vhost
- template: src=rgw.conf dest=/etc/httpd/conf.d/rgw.conf owner=root group=root
-
-## Prepare RGW
-#
-
-- name: Create RGW directory
- file: path=/var/lib/ceph/radosgw/{{ ansible_fqdn }} state=directory owner=root group=root mode=0644
-
-- name: Install s3gw.fcgi script
- copy: src=s3gw.fcgi dest=/var/www/s3gw.fcgi mode=0555 owner=root group=root
-
-- name: Disable default site
- shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf
- changed_when: False
- notify:
- - restart apache2
-
-## If we don't perform this check Ansible will start multiple instance of radosgw
-- name: Check if RGW is started
- command: /etc/init.d/ceph-radosgw status
- register: rgwstatus
- ignore_errors: True
-
-- name: Start RGW
- command: /etc/init.d/ceph-radosgw start
- when: rgwstatus.rc != 0
+++ /dev/null
----
-## Check OS family
-#
-
-- include: RedHat.yml
- when: ansible_os_family == 'RedHat'
-
-- include: Debian.yml
- when: ansible_os_family == 'Debian'
+++ /dev/null
-# {{ ansible_managed }}
-
-[ceph-extras]
-name=Ceph Extras Packages
-baseurl=http://ceph.com/packages/ceph-extras/rpm/{{ redhat_distro_ceph_extra }}/$basearch
-enabled=1
-priority=2
-gpgcheck=1
-type=rpm-md
-gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
-
-{% if (redhat_distro_ceph_extra != "centos6.4" and redhat_distro_ceph_extra != "rhel6.4" and redhat_distro_ceph_extra != "rhel6.5") %}
-[ceph-extras-noarch]
-name=Ceph Extras noarch
-baseurl=http://ceph.com/packages/ceph-extras/rpm/{{ redhat_distro_ceph_extra }}/noarch
-enabled=1
-priority=2
-gpgcheck=1
-type=rpm-md
-gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
-{% endif %}
-
-[ceph-extras-source]
-name=Ceph Extras Sources
-baseurl=http://ceph.com/packages/ceph-extras/rpm/{{ redhat_distro_ceph_extra }}/SRPMS
-enabled=1
-priority=2
-gpgcheck=1
-type=rpm-md
-gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
+++ /dev/null
-# {{ ansible_managed }}
-
-ServerName {{ ansible_hostname }}
+++ /dev/null
-# {{ ansible_managed }}
-
-FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock
-<VirtualHost *:80>
- ServerName {{ ansible_hostname }}
- ServerAdmin {{ email_address }}@{{ ansible_fqdn }}
- DocumentRoot /var/www
-
- <IfModule mod_fastcgi.c>
- <Directory /var/www>
- Options +ExecCGI
- AllowOverride All
- SetHandler fastcgi-script
- Order allow,deny
- Allow from all
- AuthBasicAuthoritative Off
- </Directory>
- </IfModule>
-
- RewriteEngine On
- RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /s3gw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
-
-</VirtualHost>
+++ /dev/null
----\r
-# You can override default vars defined in defaults/main.yml here,\r
-# but I would advice to use host or group vars instead \r
# This playbook does a rolling update for all the Ceph services
# Change the value of serial: to adjust the number of server to be updated.
#
-# The four roles that apply to the ceph hosts will be applied: common,
-# mon, osd and mds. So any changes to configuration, package updates, etc,
+# The four roles that apply to the ceph hosts will be applied: ceph-common,
+# ceph-mon, ceph-osd and ceph-mds. So any changes to configuration, package updates, etc,
# will be applied as part of the rolling update process.
#
- rgws
sudo: True
roles:
- - common
+ - ceph-common
- hosts: mons
serial: 1
sudo: True
roles:
- - mon
+ - ceph-mon
post_tasks:
- name: restart monitor(s)
service: name=ceph state=restarted args=mon
serial: 1
sudo: True
roles:
- - osd
+ - ceph-osd
post_tasks:
- name: restart object storage daemon(s)
command: service ceph-osd-all restart
serial: 1
sudo: True
roles:
- - mds
+ - ceph-mds
post_tasks:
- name: restart metadata server(s)
service: name=ceph state=restarted args=mds
- rgws
sudo: True
roles:
- - common
+ - ceph-common
- hosts: mons
sudo: True
roles:
- - mon
+ - ceph-mon
- hosts: osds
sudo: True
roles:
- - osd
+ - ceph-osd
- hosts: mdss
sudo: True
roles:
- - mds
+ - ceph-mds
- hosts: rgws
sudo: True
roles:
- - radosgw
+ - ceph-radosgw