]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Merge branch 'master' of https://github.com/jjoos/ceph-ansible into jjoos-master
authorSébastien Han <sebastien.han@enovance.com>
Wed, 5 Nov 2014 16:57:28 +0000 (17:57 +0100)
committerSébastien Han <sebastien.han@enovance.com>
Thu, 6 Nov 2014 14:18:56 +0000 (15:18 +0100)
Signed-off-by: Sébastien Han <sebastien.han@enovance.com>
Conflicts:
roles/ceph-common/defaults/main.yml
roles/ceph-common/tasks/Debian.yml
roles/ceph-osd/tasks/journal_collocation.yml
roles/ceph-osd/tasks/osd_directory.yml
roles/ceph-osd/tasks/raw_journal.yml
roles/ceph-osd/tasks/raw_multi_journal.yml

15 files changed:
1  2 
roles/ceph-common/handlers/main.yml
roles/ceph-common/tasks/install_on_debian.yml
roles/ceph-common/tasks/install_on_redhat.yml
roles/ceph-common/tasks/main.yml
roles/ceph-common/vars/main.yml
roles/ceph-mds/tasks/main.yml
roles/ceph-mon/tasks/main.yml
roles/ceph-osd/tasks/activate_osds.yml
roles/ceph-osd/tasks/main.yml
roles/ceph-osd/tasks/osd_directory.yml
roles/ceph-osd/vars/main.yml
roles/ceph-radosgw/tasks/install_redhat.yml
roles/ceph-radosgw/tasks/main.yml
roles/haproxy/tasks/precise.yml
rolling_update.yml

index 7de099f7c59e0b8b079e59e800fd1d812d522b5f,43787c4611fcc7b2169aab6b70c2595268c906c0..ac61a1b9f40e4e03e83e48743a9f355bb12b4f01
@@@ -1,12 -1,11 +1,11 @@@
  ---
 -  action: apt update-cache=yes
+ - name: update apt cache
++  apt: update-cache=yes
  
- - name: "update apt cache"
-   action: apt update-cache=yes
- - name: "restart ceph ubuntu"
-   shell: service ceph restart ; service ceph-osd-all restart
-   when: socket.rc == 0
- - name: "restart ceph debian redhat"
+ - name: restart ceph
 -  shell: service ceph restart
 +  command: service ceph restart
    when: socket.rc == 0
 -  shell: service ceph-osd-all restart
 -  when: socket.rc == 0 and ansible_distribution == 'Ubuntu'
+ - name: restart ceph-osd-all on ubuntu
++  shell: service ceph restart ; service ceph-osd-all restart
++  when: socket.rc == 0 and ansible_distribution == 'Ubuntu'
index 0000000000000000000000000000000000000000,c9089d326db22f377a6349d2563a5ad1f8611d36..972210065ead4fac96ffa8c63fb20ebb25c36de3
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,47 +1,47 @@@
 -  apt: > 
 -    pkg={{ item }} 
 -    state=present 
 -    update_cache=yes 
+ ---
+ - name: Install dependencies
 -  apt_key: > 
++  apt: >
++    pkg={{ item }}
++    state=present
++    update_cache=yes
+     cache_valid_time=3600
+   with_items:
+     - python-pycurl
+     - ntp
+     - hdparm
+ - name: Install the Ceph repository stable key
 -  apt_key: > 
++  apt_key: >
+     data="{{ lookup('file', 'cephstable.asc') }}"
+     state=present
+   when: ceph_stable
+ - name: Install the Ceph developement repository key
 -  apt_repository: > 
++  apt_key: >
+     data="{{ lookup('file', 'cephdev.asc') }}"
+     state=present
+   when: ceph_dev
+ - name: Add Ceph stable repository
++  apt_repository: >
+     repo="deb http://ceph.com/debian-{{ ceph_stable_release }}/ {{ ansible_lsb.codename }} main"
+     state=present
+   when: ceph_stable
+ - name: Add Ceph development repository
+   apt_repository: >
+     repo="deb http://gitbuilder.ceph.com/ceph-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/{{ ceph_dev_branch }} {{ ansible_lsb.codename }} main"
+     state=present
+   when: ceph_dev
+ - name: Install Ceph
+   apt: >
+     pkg={{ item }}
+     state=latest
+   with_items:
+     - ceph
+     - ceph-common    #|
+     - ceph-fs-common #|--> yes, they are already all dependencies from 'ceph'
+     - ceph-fuse      #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
+     - ceph-mds       #|--> they don't get update so we need to force them
+     - libcephfs1     #|
index 0000000000000000000000000000000000000000,2903113c8268178145e4e73bd1b96f43032c1982..0463db46f5f173e5cd6cc9995dc7e6bcaa95fc36
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,35 +1,35 @@@
 -  yum: > 
 -    name={{ item }} 
+ ---
+ - name: Install dependencies
 -    key={{ ceph_key }} 
++  yum: >
++    name={{ item }}
+     state=present
+   with_items:
+     - python-pycurl
+     - ntp
+     - hdparm
+ - name: Install the Ceph stable repository key
+   rpm_key: >
 -    key={{ ceph_key }} 
++    key={{ ceph_key }}
+     state=present
+   when: ceph_stable
+ - name: Install the Ceph developement repository key
+   rpm_key: >
 -  yum: > 
 -    name=ceph 
++    key={{ ceph_key }}
+     state=present
+   when: ceph_dev
+ - name: Add Ceph stable repository
+   command: "rpm -U http://ceph.com/rpm-{{ ceph_stable_release }}/{{ redhat_distro }}/noarch/ceph-release-1-0.el6.noarch.rpm creates=/etc/yum.repos.d/ceph.repo"
+   when: ceph_stable
+ - name: Add Ceph development repository
+   command: "rpm -U http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm creates=/etc/yum.repos.d/ceph.repo"
+   when: ceph_dev
+ - name: Install Ceph
++  yum: >
++    name=ceph
+     state=latest
index 602b57c69d9e0fdb02145d5c3aad6b17f823cfe8,4129bea96aa83b0896a4e963393dc428a9899872..4d23e76a08d8a4f23f86eac96b6309396b54a656
@@@ -1,8 -1,36 +1,38 @@@
  ---
- ## Check OS family
- #
+ - name: Fail on unsupported system
+   fail: "msg=System not supported {{ ansible_system }}"
+   when: "ansible_system not in ['Linux']"
  
- - include: RedHat.yml
+ - name: Fail on unsupported architecture
+   fail: "msg=Architecture not supported {{ ansible_architecture }}"
+   when: "ansible_architecture not in ['x86_64']"
+ - name: Fail on unsupported distribution
+   fail: "msg=Distribution not supported {{ ansible_os_family }}"
+   when: "ansible_os_family not in ['Debian', 'RedHat']"
+ - include: install_on_redhat.yml
    when: ansible_os_family == 'RedHat'
- - include: Debian.yml
++
+ - include: install_on_debian.yml
    when: ansible_os_family == 'Debian'
 -    src=ceph.conf.j2 
 -    dest=/etc/ceph/ceph.conf 
 -    owner=root 
 -    group=root 
+ - name: Check for a Ceph socket
+   shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
+   ignore_errors: true
+   register: socket
+ - name: Generate Ceph configuration file
+   template: >
++    src=ceph.conf.j2
++    dest=/etc/ceph/ceph.conf
++    owner=root
++    group=root
+     mode=0644
+   notify:
+     - restart ceph
+     - restart ceph-osd-all on ubuntu
+ - name: Disable OSD directory parsing by updatedb
+   command: updatedb -e /var/lib/ceph
++  ignore_errors: true
index d91813410431da7dadb0cdbc332581587e05c5fc,abe6f2c130a938f9f60cfbf8afe3bd45a60fad20..bc7de5048cf17f79d38d0acdd0b09a66e150ea0b
@@@ -1,3 -1,87 +1,89 @@@
  ---\r
- # You can override default vars defined in defaults/main.yml here,\r
- # but I would advice to use host or group vars instead \r
+ # You can override vars by using host or group vars\r
\r
+ ## Setup options\r
+ #\r
\r
+ # ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT\r
+ #fsid:\r
\r
+ ## Packages branch\r
+ ceph_key: https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc\r
+ ceph_stable: true # use ceph stable branch\r
+ ceph_stable_release: firefly # ceph stable release\r
\r
+ # This option is needed for _both_ stable and dev version, so please always fill the right version\r
+ # # supported distros are el6, rhel6, f18, f19, opensuse12.2, sles11, centos7 (see http://ceph.com/rpm-firefly/)\r
+ ceph_stable_redhat_distro: el7\r
\r
+ ceph_dev: false # use ceph developement branch\r
+ ceph_dev_branch: master # developement branch you would like to use e.g: master, wip-hack\r
+ # supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,\r
+ # fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).\r
+ # For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.\r
+ ceph_dev_redhat_distro: centos7\r
\r
+ ## Ceph options\r
+ #\r
+ cephx: true\r
+ cephx_require_signatures: true\r
+ cephx_cluster_require_signatures: true\r
+ cephx_service_require_signatures: false\r
+ disable_in_memory_logs: true\r
\r
+ ## Monitor options\r
+ #\r
+ monitor_interface: eth1\r
+ mon_osd_down_out_interval: 600\r
+ mon_osd_min_down_reporters: 7 # number of OSDs per host + 1\r
+ mon_clock_drift_allowed: .15\r
+ mon_clock_drift_warn_backoff: 30\r
+ mon_osd_full_ratio: .95\r
+ mon_osd_nearfull_ratio: .85\r
+ mon_osd_report_timeout: 300\r
\r
+ ## OSD options\r
+ #\r
+ journal_size: 100\r
+ pool_default_pg_num: 128\r
+ pool_default_pgp_num: 128\r
+ pool_default_size: 2\r
+ pool_default_min_size: 1\r
+ cluster_network: 192.168.42.0/24\r
+ public_network: 192.168.42.0/24\r
+ osd_mkfs_type: xfs\r
+ osd_mkfs_options_xfs: -f -i size=2048\r
+ osd_mount_options_xfs: noatime\r
+ osd_mon_heartbeat_interval: 30\r
+ # CRUSH\r
+ pool_default_crush_rule: 0\r
+ osd_crush_update_on_start: "true"\r
+ # Object backend\r
+ osd_objectstore: filestore\r
+ # Performance tuning\r
+ filestore_merge_threshold: 40\r
+ filestore_split_multiple: 8\r
+ osd_op_threads: 8\r
+ filestore_op_threads: 8\r
+ filestore_max_sync_interval: 5\r
+ osd_max_scrubs: 1\r
+ # Recovery tuning\r
+ osd_recovery_max_active: 5\r
+ osd_max_backfills: 2\r
+ osd_recovery_op_priority: 2\r
++osd_recovery_max_chunk: 1048576\r
++osd_recovery_threads: 1\r
\r
+ ## MDS options\r
+ #\r
+ mds: true # disable mds configuration in ceph.conf\r
\r
+ # Rados Gateway options\r
+ #\r
+ radosgw: true # referenced in monitor role too.\r
+ #radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls\r
\r
+ ## Testing mode\r
+ # enable this mode _only_ when you have a single node\r
+ # if you don't want it keep the option commented\r
+ #common_single_host_mode: true\r
index 1b08d8eb749e51db5f8b95899a7a8b994fdaed65,26cb5f279a2052194f9e9b1d215366417283d123..35393ba422f2ccedd54d51b7874c84a113553875
@@@ -1,13 -1,22 +1,22 @@@
  ---
  ## Deploy Ceph metadata server(s)
- #
  
  - name: Copy MDS bootstrap key
-   copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring dest=/var/lib/ceph/bootstrap-mds/ceph.keyring owner=root group=root mode=600
+   copy: >
 -    src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring 
 -    dest=/var/lib/ceph/bootstrap-mds/ceph.keyring 
 -    owner=root 
 -    group=root 
++    src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-mds/ceph.keyring
++    dest=/var/lib/ceph/bootstrap-mds/ceph.keyring
++    owner=root
++    group=root
+     mode=600
    when: cephx
  
  - name: Create MDS directory
-   action: file path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }} state=directory owner=root group=root mode=0644
+   file: >
 -    path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }} 
 -    state=directory 
 -    owner=root 
 -    group=root 
++    path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}
++    state=directory
++    owner=root
++    group=root
+     mode=0644
    when: cephx
  
  - name: Create MDS keyring
    changed_when: False
  
  - name: Set MDS key permissions
-   file: path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring mode=0600 owner=root group=root
+   file: >
 -    path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring 
 -    mode=0600 
 -    owner=root 
++    path=/var/lib/ceph/mds/ceph-{{ ansible_hostname }}/keyring
++    mode=0600
++    owner=root
+     group=root
    when: cephx
  
  - name: Start and add that the MDS service to the init sequence
-   service: name=ceph state=started enabled=yes args=mds
 -  service: > 
 -    name=ceph 
 -    state=started 
 -    enabled=yes 
++  service: >
++    name=ceph
++    state=started
++    enabled=yes
+     args=mds
index b0b0c3e545d218389e307fa30a13dbd21ecbd578,a57746b87e4690571c6bc2950b09ae4632e98d30..f64c1ef9d2414fcfd70c97aaaeb37241973a0c82
    changed_when: False
  
  - name: Copy keys to the ansible server
-   fetch: src={{ item }} dest=fetch/{{ fsid }}/{{ item }} flat=yes
+   fetch: >
 -    src={{ item }} 
 -    dest=fetch/{{ fsid }}/{{ item }} 
++    src={{ item }}
++    dest=fetch/{{ fsid }}/{{ item }}
+     flat=yes
    when: cephx
    with_items:
      - /etc/ceph/ceph.client.admin.keyring # just in case another application needs it
      - /etc/ceph/keyring.radosgw.gateway
  
  - name: Drop in a motd script to report status when logging in
-   copy: src=precise/92-ceph dest=/etc/update-motd.d/92-ceph owner=root group=root mode=0755
+   copy: >
 -    src=precise/92-ceph 
 -    dest=/etc/update-motd.d/92-ceph 
 -    owner=root 
 -    group=root 
++    src=precise/92-ceph
++    dest=/etc/update-motd.d/92-ceph
++    owner=root
++    group=root
+     mode=0755
    when: ansible_distribution_release == 'precise'
index 0000000000000000000000000000000000000000,2d937a53062e4c8c44212d04a5a8c5af909e0511..806b2b187e91d67dd14e709d8d81a5f9fa797e12
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,38 +1,38 @@@
 -  service: > 
 -    name=ceph 
 -    state=started 
+ ---
+ # Activate means:
+ # - mount the volume in a temp location
+ # - allocate an osd id (if needed)
+ # - remount in the correct location /var/lib/ceph/osd/$cluster-$id
+ # - start ceph-osd
+ #
+ # This task is for disk devices only because of the explicit use of the first
+ # partition.
+ - name: Activate OSD(s) when device is a disk
+   command: |
+     ceph-disk activate {{ item.2 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
+   with_together:
+     - parted.results
+     - ispartition.results
+     - devices
+   when: item.0.rc == 0 and item.1.rc != 0
+   ignore_errors: True
+   changed_when: False
+ # This task is for partitions because we don't explicitly use a partition.
+ - name: Activate OSD(s) when device is a partition
+   command: "ceph-disk activate {{ item.1 }}"
+   with_together:
+     - ispartition.results
+     - devices
+   when: item.0.rc == 0
+   ignore_errors: True
+   changed_when: False
+ - name: Start and add that the OSD service to the init sequence
++  service: >
++    name=ceph
++    state=started
+     enabled=yes
index 1ff2a2db2db0136f13ace81b9af3906417e53d04,f9288bf6db85be2342a1d8ae5ef197088b6003f1..e7bc3d41300e6a2d8cc6bf6ab3b33534fbbbb1ca
@@@ -1,6 -1,26 +1,26 @@@
  ---
  ## Deploy Ceph Oject Storage Daemon(s)
- #
+ - name: Install dependencies
+   apt: >
 -    pkg=parted 
++    pkg=parted
+     state=present
+   when: ansible_os_family == 'Debian'
+ - name: Install dependencies
+   yum: >
 -    name=parted 
++    name=parted
+     state=present
+   when: ansible_os_family == 'RedHat'
+ - name: Copy OSD bootstrap key
+   copy: >
 -    src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring 
 -    dest=/var/lib/ceph/bootstrap-osd/ceph.keyring 
 -    owner=root 
 -    group=root 
++    src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring
++    dest=/var/lib/ceph/bootstrap-osd/ceph.keyring
++    owner=root
++    group=root
+     mode=600
+   when: cephx
  
  - include: journal_collocation.yml
    when: journal_collocation
index 419da1476d5c03c2aab0c82193f25fc938ab4ab3,3d477d3b04ebb8a73335250642f93e6dafec5817..d01a206f3afbb45556c23ebc2f6edd3c5e1c9098
@@@ -19,7 -6,11 +6,11 @@@
  # Declaring more than one directory on the same filesystem will confuse Ceph.
  
  - name: Create OSD directories
-   file: path={{ item }} state=directory owner=root group=root
 -  file: > 
 -    path={{ item }} 
 -    state=directory 
 -    owner=root 
++  file: >
++    path={{ item }}
++    state=directory
++    owner=root
+     group=root
    with_items: osd_directories
  
  # Prepare means
@@@ -53,4 -44,7 +44,7 @@@
    changed_when: False
  
  - name: Start and add that the OSD service to the init sequence
-   service: name=ceph state=started enabled=yes args=osd
+   service: >
 -    name=ceph 
 -    state=started 
++    name=ceph
++    state=started
+     enabled=yes
index d91813410431da7dadb0cdbc332581587e05c5fc,020470c699c14ef09d4482c9833f4b775ba14e8f..bd81387d7a115feda97404d9b5da3f2283306bbf
@@@ -1,3 -1,76 +1,76 @@@
  ---\r
  # You can override default vars defined in defaults/main.yml here,\r
--# but I would advice to use host or group vars instead \r
++# but I would advice to use host or group vars instead\r
\r
+ ## Ceph options\r
+ #\r
\r
+ # ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT\r
+ # fsid:\r
+ cephx: true\r
\r
+ # Devices to be used as OSDs\r
+ # You can pre-provision disks that are not present yet.\r
+ # Ansible will just skip them. Newly added disk will be\r
+ # automatically configured during the next run.\r
+ #\r
\r
\r
+ # !! WARNING !!\r
+ #\r
+ # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\\r
+ #\r
+ # !! WARNING !!\r
\r
\r
+ # Declare devices\r
+ # All the scenarii inherit from the following device declaration\r
+ #\r
 -devices: \r
++devices:\r
+   - /dev/sdb\r
+   - /dev/sdc\r
+   - /dev/sdd\r
+   - /dev/sde\r
\r
\r
+ # I. First scenario: journal and osd_data on the same device\r
+ # Use 'true' to enable this scenario\r
+ # This will collocate both journal and data on the same disk\r
+ # creating a partition at the beginning of the device\r
\r
+ journal_collocation: true\r
\r
\r
+ # II. Second scenario: single journal device for N OSDs\r
+ # Use 'true' to enable this scenario\r
\r
+ # deprecated, please use scenario III with a single raw_journal_device\r
\r
\r
+ # III. Third scenario: N journal devices for N OSDs\r
+ # Use 'true' to enable this scenario\r
+ #\r
+ # In the following example:\r
+ # * sdd and sde will get sdb as a journal\r
+ # * sdf and sdg will get sdc as a journal\r
+ # While starting you have 2 options:\r
+ # 1. Pre-allocate all the devices\r
+ # 2. Progressively add new devices\r
\r
+ raw_multi_journal: false\r
+ raw_journal_devices:\r
+   - /dev/sdb\r
+   - /dev/sdb\r
+   - /dev/sdc\r
+   - /dev/sdc\r
\r
\r
+ # IV. Fourth scenario: use directory instead of disk for OSDs\r
+ # Use 'true' to enable this scenario\r
\r
+ osd_directory: false\r
 -osd_directories: \r
 - - /var/lib/ceph/osd/mydir1\r
 - - /var/lib/ceph/osd/mydir2\r
 - - /var/lib/ceph/osd/mydir3\r
 - - /var/lib/ceph/osd/mydir4\r
++osd_directories:\r
++  - /var/lib/ceph/osd/mydir1\r
++  - /var/lib/ceph/osd/mydir2\r
++  - /var/lib/ceph/osd/mydir3\r
++  - /var/lib/ceph/osd/mydir4\r
index 0000000000000000000000000000000000000000,511af7b48e2a869f76acaf18ccf74ea094ec4e15..9da4ee4e1659eed3cfa6b5daab6cc5d3d84d7386
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,71 +1,71 @@@
 -    src=ceph-extra.repo 
 -    dest=/etc/yum.repos.d 
 -    owner=root 
+ ---
+ ## Deploy RADOS Gateway
+ #
+ - name: Add Ceph extra
+   template: >
++    src=ceph-extra.repo
++    dest=/etc/yum.repos.d
++    owner=root
+     group=root
+ - name: Add special fastcgi repository key
+   rpm_key: key=http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt
+ - name: Add special fastcgi repository
+   command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
+ - name: "Install Apache, fastcgi, and Rados Gateway"
+   yum: >
+     name={{ item }}
+     state=present
+   with_items:
+     - httpd
+     - mod_fastcgi
+     - mod_fcgid
+     - ceph-radosgw
+ ## Prepare Apache
+ #
+ - name: Install Rados Gateway vhost
+   template: >
+     src=rgw.conf
+     dest=/etc/httpd/conf.d/rgw.conf
+     owner=root
+     group=root
+ ## Prepare RGW
+ #
+ - name: Create RGW directory
+   file: >
+     path=/var/lib/ceph/radosgw/{{ ansible_fqdn }}
+     state=directory
+     owner=root
+     group=root
+     mode=0644
+ - name: Install s3gw.fcgi script
+   copy: >
+     src=s3gw.fcgi
+     dest=/var/www/s3gw.fcgi
+     mode=0555
+     owner=root
+     group=root
+ - name: Disable default site
+   shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf
+   changed_when: False
+   notify:
+     - restart apache2
+ ## If we don't perform this check Ansible will start multiple instance of radosgw
+ - name: Check if RGW is started
+   command: /etc/init.d/ceph-radosgw status
+   register: rgwstatus
+   ignore_errors: True
+ - name: Start RGW
+   command: /etc/init.d/ceph-radosgw start
+   when: rgwstatus.rc != 0
index f23dc609796ac79c73450b3dbbdc3b6fde285b45,9463a82503c68e53b52f5be4a6f2796e97d717e4..3f993725dfb6024744905eb17cfd22121dd22993
@@@ -2,8 -10,8 +10,8 @@@
  ## Check OS family
  #
  
--- include: RedHat.yml
++- include: install_redhat.yml
    when: ansible_os_family == 'RedHat'
  
--- include: Debian.yml
++- include: install_debian.yml
    when: ansible_os_family == 'Debian'
index d1e2f0518823de6abe86f79b9d671b762891169e,b9c409fb1802aa6b043517e2c9e5a775eb97e617..428b054e99c04ad907658c48767579c66aae025f
@@@ -1,15 -1,32 +1,32 @@@
  ---
- - apt_repository: repo=ppa:vbernat/haproxy-1.5 state=present
 -- name: add repository
++- name: Add repository
+   apt_repository: >
 -    repo=ppa:vbernat/haproxy-1.5 
++    repo=ppa:vbernat/haproxy-1.5
+     state=present
  
- - apt: name={{ item }} state=present
 -- name: install haproxy
++- name: Install haproxy
+   apt: >
+     name={{ item }}
+     state=present
    with_items:
     - haproxy
     - socat
  
- - copy: src=precise/haproxy dest=/etc/default/haproxy
 -- name: copy default configuration
++- name: Copy default configuration
+   copy: >
+     src=precise/haproxy
+     dest=/etc/default/haproxy
    notify: restart haproxy
  
- - template: src=precise/haproxy.cfg dest=/etc/haproxy/haproxy.cfg backup=yes
 -- name: create configuration
++- name: Create configuration
+   template: >
+     src=precise/haproxy.cfg
+     dest=/etc/haproxy/haproxy.cfg
+     backup=yes
    notify: restart haproxy
  
- - service: name=haproxy state=started enabled=yes
 -- name: start and enable haproxy
++- name: Start and enable haproxy
+   service: >
+     name=haproxy
+     state=started
+     enabled=yes
index 56b63573eadbdf5c017d0629e7a779d88202a88e,798def97fbd99e508f433f98f4c896b36cb97956..3c606271c1d3158dda88759954ef430e1b75ea7a
@@@ -47,4 -50,7 +50,7 @@@
    - ceph-mds
    post_tasks:
    - name: restart metadata server(s)
-     service: name=ceph state=restarted args=mds
+     service: >
 -      name=ceph 
 -      state=restarted 
++      name=ceph
++      state=restarted
+       args=mds