---
- action: apt update-cache=yes
+ - name: update apt cache
++ apt: update-cache=yes
- - name: "update apt cache"
- action: apt update-cache=yes
-
- - name: "restart ceph ubuntu"
- shell: service ceph restart ; service ceph-osd-all restart
- when: socket.rc == 0
-
- - name: "restart ceph debian redhat"
+ - name: restart ceph
- shell: service ceph restart
+ command: service ceph restart
when: socket.rc == 0
- shell: service ceph-osd-all restart
- when: socket.rc == 0 and ansible_distribution == 'Ubuntu'
+
+ - name: restart ceph-osd-all on ubuntu
++ shell: service ceph restart ; service ceph-osd-all restart
++ when: socket.rc == 0 and ansible_distribution == 'Ubuntu'
--- /dev/null
- apt: >
- pkg={{ item }}
- state=present
- update_cache=yes
+ ---
+ - name: Install dependencies
- apt_key: >
++ apt: >
++ pkg={{ item }}
++ state=present
++ update_cache=yes
+ cache_valid_time=3600
+ with_items:
+ - python-pycurl
+ - ntp
+ - hdparm
+
+ - name: Install the Ceph repository stable key
- apt_key: >
++ apt_key: >
+ data="{{ lookup('file', 'cephstable.asc') }}"
+ state=present
+ when: ceph_stable
+
+ - name: Install the Ceph developement repository key
- apt_repository: >
++ apt_key: >
+ data="{{ lookup('file', 'cephdev.asc') }}"
+ state=present
+ when: ceph_dev
+
+ - name: Add Ceph stable repository
++ apt_repository: >
+ repo="deb http://ceph.com/debian-{{ ceph_stable_release }}/ {{ ansible_lsb.codename }} main"
+ state=present
+ when: ceph_stable
+
+ - name: Add Ceph development repository
+ apt_repository: >
+ repo="deb http://gitbuilder.ceph.com/ceph-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/{{ ceph_dev_branch }} {{ ansible_lsb.codename }} main"
+ state=present
+ when: ceph_dev
+
+ - name: Install Ceph
+ apt: >
+ pkg={{ item }}
+ state=latest
+ with_items:
+ - ceph
+ - ceph-common #|
+ - ceph-fs-common #|--> yes, they are already all dependencies from 'ceph'
+ - ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
+ - ceph-mds #|--> they don't get update so we need to force them
+ - libcephfs1 #|
--- /dev/null
- yum: >
- name={{ item }}
+ ---
+ - name: Install dependencies
- key={{ ceph_key }}
++ yum: >
++ name={{ item }}
+ state=present
+ with_items:
+ - python-pycurl
+ - ntp
+ - hdparm
+
+ - name: Install the Ceph stable repository key
+ rpm_key: >
- key={{ ceph_key }}
++ key={{ ceph_key }}
+ state=present
+ when: ceph_stable
+
+ - name: Install the Ceph developement repository key
+ rpm_key: >
- yum: >
- name=ceph
++ key={{ ceph_key }}
+ state=present
+ when: ceph_dev
+
+ - name: Add Ceph stable repository
+ command: "rpm -U http://ceph.com/rpm-{{ ceph_stable_release }}/{{ redhat_distro }}/noarch/ceph-release-1-0.el6.noarch.rpm creates=/etc/yum.repos.d/ceph.repo"
+ when: ceph_stable
+
+ - name: Add Ceph development repository
+ command: "rpm -U http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm creates=/etc/yum.repos.d/ceph.repo"
+ when: ceph_dev
+
+ - name: Install Ceph
++ yum: >
++ name=ceph
+ state=latest
+
---
- ## Check OS family
- #
+ - name: Fail on unsupported system
+ fail: "msg=System not supported {{ ansible_system }}"
+ when: "ansible_system not in ['Linux']"
- - include: RedHat.yml
+ - name: Fail on unsupported architecture
+ fail: "msg=Architecture not supported {{ ansible_architecture }}"
+ when: "ansible_architecture not in ['x86_64']"
+
+ - name: Fail on unsupported distribution
+ fail: "msg=Distribution not supported {{ ansible_os_family }}"
+ when: "ansible_os_family not in ['Debian', 'RedHat']"
+
+ - include: install_on_redhat.yml
when: ansible_os_family == 'RedHat'
- - include: Debian.yml
++
+ - include: install_on_debian.yml
when: ansible_os_family == 'Debian'
- src=ceph.conf.j2
- dest=/etc/ceph/ceph.conf
- owner=root
- group=root
+
+ - name: Check for a Ceph socket
+ shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
+ ignore_errors: true
+ register: socket
+
+ - name: Generate Ceph configuration file
+ template: >
++ src=ceph.conf.j2
++ dest=/etc/ceph/ceph.conf
++ owner=root
++ group=root
+ mode=0644
+ notify:
+ - restart ceph
+ - restart ceph-osd-all on ubuntu
+
+ - name: Disable OSD directory parsing by updatedb
+ command: updatedb -e /var/lib/ceph
++ ignore_errors: true
---\r
- # You can override default vars defined in defaults/main.yml here,\r
- # but I would advice to use host or group vars instead \r
+ # You can override vars by using host or group vars\r
+ \r
+ ## Setup options\r
+ #\r
+ \r
+ # ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT\r
+ #fsid:\r
+ \r
+ ## Packages branch\r
+ ceph_key: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc\r
+ ceph_stable: true # use ceph stable branch\r
+ ceph_stable_release: firefly # ceph stable release\r
+ \r
+ # This option is needed for _both_ stable and dev version, so please always fill the right version\r
+ # # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11, centos7 (see http://ceph.com/rpm-firefly/)\r
+ ceph_stable_redhat_distro: el7\r
+ \r
+ ceph_dev: false # use ceph developement branch\r
+ ceph_dev_branch: master # developement branch you would like to use e.g: master, wip-hack\r
+ # supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,\r
+ # fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).\r
+ # For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.\r
+ ceph_dev_redhat_distro: centos7\r
+ \r
+ ## Ceph options\r
+ #\r
+ cephx: true\r
+ cephx_require_signatures: true\r
+ cephx_cluster_require_signatures: true\r
+ cephx_service_require_signatures: false\r
+ disable_in_memory_logs: true\r
+ \r
+ ## Monitor options\r
+ #\r
+ monitor_interface: eth1\r
+ mon_osd_down_out_interval: 600\r
+ mon_osd_min_down_reporters: 7 # number of OSDs per host + 1\r
+ mon_clock_drift_allowed: .15\r
+ mon_clock_drift_warn_backoff: 30\r
+ mon_osd_full_ratio: .95\r
+ mon_osd_nearfull_ratio: .85\r
+ mon_osd_report_timeout: 300\r
+ \r
+ ## OSD options\r
+ #\r
+ journal_size: 100\r
+ pool_default_pg_num: 128\r
+ pool_default_pgp_num: 128\r
+ pool_default_size: 2\r
+ pool_default_min_size: 1\r
+ cluster_network: 192.168.42.0/24\r
+ public_network: 192.168.42.0/24\r
+ osd_mkfs_type: xfs\r
+ osd_mkfs_options_xfs: -f -i size=2048\r
+ osd_mount_options_xfs: noatime\r
+ osd_mon_heartbeat_interval: 30\r
+ # CRUSH\r
+ pool_default_crush_rule: 0\r
+ osd_crush_update_on_start: "true"\r
+ # Object backend\r
+ osd_objectstore: filestore\r
+ # Performance tuning\r
+ filestore_merge_threshold: 40\r
+ filestore_split_multiple: 8\r
+ osd_op_threads: 8\r
+ filestore_op_threads: 8\r
+ filestore_max_sync_interval: 5\r
+ osd_max_scrubs: 1\r
+ # Recovery tuning\r
+ osd_recovery_max_active: 5\r
+ osd_max_backfills: 2\r
+ osd_recovery_op_priority: 2\r
++osd_recovery_max_chunk: 1048576\r
++osd_recovery_threads: 1\r
+ \r
+ ## MDS options\r
+ #\r
+ mds: true # disable mds configuration in ceph.conf\r
+ \r
+ # Rados Gateway options\r
+ #\r
+ radosgw: true # referenced in monitor role too.\r
+ #radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls\r
+ \r
+ ## Testing mode\r
+ # enable this mode _only_ when you have a single node\r
+ # if you don't want it keep the option commented\r
+ #common_single_host_mode: true\r
---
## Deploy Ceph metadata server(s)
- #
- name: Copy MDS bootstrap key
- copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring dest=/var/lib/ceph/bootstrap-mds/ceph.keyring owner=root group=root mode=600
+ copy: >
- src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring
- dest=/var/lib/ceph/bootstrap-mds/ceph.keyring
- owner=root
- group=root
++ src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring
++ dest=/var/lib/ceph/bootstrap-mds/ceph.keyring
++ owner=root
++ group=root
+ mode=600
when: cephx
- name: Create MDS directory
- action: file path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }} state=directory owner=root group=root mode=0644
+ file: >
- path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}
- state=directory
- owner=root
- group=root
++ path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}
++ state=directory
++ owner=root
++ group=root
+ mode=0644
when: cephx
- name: Create MDS keyring
changed_when: False
- name: Set MDS key permissions
- file: path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring mode=0600 owner=root group=root
+ file: >
- path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
- mode=0600
- owner=root
++ path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
++ mode=0600
++ owner=root
+ group=root
when: cephx
- name: Start and add that the MDS service to the init sequence
- service: name=ceph state=started enabled=yes args=mds
- service: >
- name=ceph
- state=started
- enabled=yes
++ service: >
++ name=ceph
++ state=started
++ enabled=yes
+ args=mds
changed_when: False
- name: Copy keys to the ansible server
- fetch: src={{ item }} dest=fetch/{{ fsid }}/{{ item }} flat=yes
+ fetch: >
- src={{ item }}
- dest=fetch/{{ fsid }}/{{ item }}
++ src={{ item }}
++ dest=fetch/{{ fsid }}/{{ item }}
+ flat=yes
when: cephx
with_items:
- /etc/ceph/ceph.client.admin.keyring # just in case another application needs it
- /etc/ceph/keyring.radosgw.gateway
- name: Drop in a motd script to report status when logging in
- copy: src=precise/92-ceph dest=/etc/update-motd.d/92-ceph owner=root group=root mode=0755
+ copy: >
- src=precise/92-ceph
- dest=/etc/update-motd.d/92-ceph
- owner=root
- group=root
++ src=precise/92-ceph
++ dest=/etc/update-motd.d/92-ceph
++ owner=root
++ group=root
+ mode=0755
when: ansible_distribution_release == 'precise'
--- /dev/null
- service: >
- name=ceph
- state=started
+ ---
+ # Activate means:
+ # - mount the volume in a temp location
+ # - allocate an osd id (if needed)
+ # - remount in the correct location /var/lib/ceph/osd/$cluster-$id
+ # - start ceph-osd
+ #
+
+ # This task is for disk devices only because of the explicit use of the first
+ # partition.
+
+ - name: Activate OSD(s) when device is a disk
+ command: |
+ ceph-disk activate {{ item.2 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
+ with_together:
+ - parted.results
+ - ispartition.results
+ - devices
+ when: item.0.rc == 0 and item.1.rc != 0
+ ignore_errors: True
+ changed_when: False
+
+ # This task is for partitions because we don't explicitly use a partition.
+
+ - name: Activate OSD(s) when device is a partition
+ command: "ceph-disk activate {{ item.1 }}"
+ with_together:
+ - ispartition.results
+ - devices
+ when: item.0.rc == 0
+ ignore_errors: True
+ changed_when: False
+
+ - name: Start and add that the OSD service to the init sequence
++ service: >
++ name=ceph
++ state=started
+ enabled=yes
---
## Deploy Ceph Oject Storage Daemon(s)
- #
+
+ - name: Install dependencies
+ apt: >
- pkg=parted
++ pkg=parted
+ state=present
+ when: ansible_os_family == 'Debian'
+
+ - name: Install dependencies
+ yum: >
- name=parted
++ name=parted
+ state=present
+ when: ansible_os_family == 'RedHat'
+
+ - name: Copy OSD bootstrap key
+ copy: >
- src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring
- dest=/var/lib/ceph/bootstrap-osd/ceph.keyring
- owner=root
- group=root
++ src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring
++ dest=/var/lib/ceph/bootstrap-osd/ceph.keyring
++ owner=root
++ group=root
+ mode=600
+ when: cephx
- include: journal_collocation.yml
when: journal_collocation
# Declaring more than one directory on the same filesystem will confuse Ceph.
- name: Create OSD directories
- file: path={{ item }} state=directory owner=root group=root
- file: >
- path={{ item }}
- state=directory
- owner=root
++ file: >
++ path={{ item }}
++ state=directory
++ owner=root
+ group=root
with_items: osd_directories
# Prepare means
changed_when: False
- name: Start and add that the OSD service to the init sequence
- service: name=ceph state=started enabled=yes args=osd
+ service: >
- name=ceph
- state=started
++ name=ceph
++ state=started
+ enabled=yes
---\r
# You can override default vars defined in defaults/main.yml here,\r
--# but I would advice to use host or group vars instead \r
++# but I would advice to use host or group vars instead\r
+ \r
+ ## Ceph options\r
+ #\r
+ \r
+ # ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT\r
+ # fsid:\r
+ cephx: true\r
+ \r
+ # Devices to be used as OSDs\r
+ # You can pre-provision disks that are not present yet.\r
+ # Ansible will just skip them. Newly added disk will be\r
+ # automatically configured during the next run.\r
+ #\r
+ \r
+ \r
+ # !! WARNING !!\r
+ #\r
+ # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\\r
+ #\r
+ # !! WARNING !!\r
+ \r
+ \r
+ # Declare devices\r
+ # All the scenarii inherit from the following device declaration\r
+ #\r
-devices: \r
++devices:\r
+ - /dev/sdb\r
+ - /dev/sdc\r
+ - /dev/sdd\r
+ - /dev/sde\r
+ \r
+ \r
+ # I. First scenario: journal and osd_data on the same device\r
+ # Use 'true' to enable this scenario\r
+ # This will collocate both journal and data on the same disk\r
+ # creating a partition at the beginning of the device\r
+ \r
+ journal_collocation: true\r
+ \r
+ \r
+ # II. Second scenario: single journal device for N OSDs\r
+ # Use 'true' to enable this scenario\r
+ \r
+ # deprecated, please use scenario III with a single raw_journal_device\r
+ \r
+ \r
+ # III. Third scenario: N journal devices for N OSDs\r
+ # Use 'true' to enable this scenario\r
+ #\r
+ # In the following example:\r
+ # * sdd and sde will get sdb as a journal\r
+ # * sdf and sdg will get sdc as a journal\r
+ # While starting you have 2 options:\r
+ # 1. Pre-allocate all the devices\r
+ # 2. Progressively add new devices\r
+ \r
+ raw_multi_journal: false\r
+ raw_journal_devices:\r
+ - /dev/sdb\r
+ - /dev/sdb\r
+ - /dev/sdc\r
+ - /dev/sdc\r
+ \r
+ \r
+ # IV. Fourth scenario: use directory instead of disk for OSDs\r
+ # Use 'true' to enable this scenario\r
+ \r
+ osd_directory: false\r
-osd_directories: \r
- - /var/lib/ceph/osd/mydir1\r
- - /var/lib/ceph/osd/mydir2\r
- - /var/lib/ceph/osd/mydir3\r
- - /var/lib/ceph/osd/mydir4\r
++osd_directories:\r
++ - /var/lib/ceph/osd/mydir1\r
++ - /var/lib/ceph/osd/mydir2\r
++ - /var/lib/ceph/osd/mydir3\r
++ - /var/lib/ceph/osd/mydir4\r
--- /dev/null
- src=ceph-extra.repo
- dest=/etc/yum.repos.d
- owner=root
+ ---
+ ## Deploy RADOS Gateway
+ #
+
+ - name: Add Ceph extra
+ template: >
++ src=ceph-extra.repo
++ dest=/etc/yum.repos.d
++ owner=root
+ group=root
+
+ - name: Add special fastcgi repository key
+ rpm_key: key=http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt
+
+ - name: Add special fastcgi repository
+ command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
+
+ - name: "Install Apache, fastcgi, and Rados Gateway"
+ yum: >
+ name={{ item }}
+ state=present
+ with_items:
+ - httpd
+ - mod_fastcgi
+ - mod_fcgid
+ - ceph-radosgw
+
+ ## Prepare Apache
+ #
+
+ - name: Install Rados Gateway vhost
+ template: >
+ src=rgw.conf
+ dest=/etc/httpd/conf.d/rgw.conf
+ owner=root
+ group=root
+
+ ## Prepare RGW
+ #
+
+ - name: Create RGW directory
+ file: >
+ path=/var/lib/ceph/radosgw/{{ ansible_fqdn }}
+ state=directory
+ owner=root
+ group=root
+ mode=0644
+
+ - name: Install s3gw.fcgi script
+ copy: >
+ src=s3gw.fcgi
+ dest=/var/www/s3gw.fcgi
+ mode=0555
+ owner=root
+ group=root
+
+ - name: Disable default site
+ shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf
+ changed_when: False
+ notify:
+ - restart apache2
+
+ ## If we don't perform this check Ansible will start multiple instance of radosgw
+ - name: Check if RGW is started
+ command: /etc/init.d/ceph-radosgw status
+ register: rgwstatus
+ ignore_errors: True
+
+ - name: Start RGW
+ command: /etc/init.d/ceph-radosgw start
+ when: rgwstatus.rc != 0
## Check OS family
#
--- include: RedHat.yml
++- include: install_redhat.yml
when: ansible_os_family == 'RedHat'
--- include: Debian.yml
++- include: install_debian.yml
when: ansible_os_family == 'Debian'
---
- - apt_repository: repo=ppa:vbernat/haproxy-1.5 state=present
-- name: add repository
++- name: Add repository
+ apt_repository: >
- repo=ppa:vbernat/haproxy-1.5
++ repo=ppa:vbernat/haproxy-1.5
+ state=present
- - apt: name={{ item }} state=present
-- name: install haproxy
++- name: Install haproxy
+ apt: >
+ name={{ item }}
+ state=present
with_items:
- haproxy
- socat
- - copy: src=precise/haproxy dest=/etc/default/haproxy
-- name: copy default configuration
++- name: Copy default configuration
+ copy: >
+ src=precise/haproxy
+ dest=/etc/default/haproxy
notify: restart haproxy
- - template: src=precise/haproxy.cfg dest=/etc/haproxy/haproxy.cfg backup=yes
-- name: create configuration
++- name: Create configuration
+ template: >
+ src=precise/haproxy.cfg
+ dest=/etc/haproxy/haproxy.cfg
+ backup=yes
notify: restart haproxy
- - service: name=haproxy state=started enabled=yes
-- name: start and enable haproxy
++- name: Start and enable haproxy
+ service: >
+ name=haproxy
+ state=started
+ enabled=yes
- ceph-mds
post_tasks:
- name: restart metadata server(s)
- service: name=ceph state=restarted args=mds
+ service: >
- name=ceph
- state=restarted
++ name=ceph
++ state=restarted
+ args=mds