]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Add handlers for containerized deployment
authorGuillaume Abrioux <gabrioux@redhat.com>
Fri, 28 Jul 2017 23:00:06 +0000 (01:00 +0200)
committerGuillaume Abrioux <gabrioux@redhat.com>
Wed, 2 Aug 2017 15:12:20 +0000 (17:12 +0200)
Until now, there is no handlers for containerized deployments.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
18 files changed:
infrastructure-playbooks/purge-docker-cluster.yml
infrastructure-playbooks/rolling_update.yml
roles/ceph-common/handlers/main.yml [deleted file]
roles/ceph-common/tasks/checks/check_socket.yml [deleted file]
roles/ceph-common/tasks/main.yml
roles/ceph-common/templates/restart_mon_daemon.sh.j2 [deleted file]
roles/ceph-common/templates/restart_osd_daemon.sh.j2 [deleted file]
roles/ceph-defaults/defaults/main.yml
roles/ceph-defaults/handlers/main.yml [new file with mode: 0644]
roles/ceph-defaults/tasks/check_socket.yml [new file with mode: 0644]
roles/ceph-defaults/tasks/main.yml
roles/ceph-defaults/templates/restart_mon_daemon.sh.j2 [new file with mode: 0644]
roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 [new file with mode: 0644]
roles/ceph-docker-common/tasks/create_configs.yml
roles/ceph-mon/defaults/main.yml
roles/ceph-rgw/tasks/docker/start_docker_rgw.yml
roles/ceph-rgw/templates/ceph-radosgw.service.j2 [new file with mode: 0644]
roles/ceph-rgw/templates/ceph-rgw.service.j2 [deleted file]

index 9ef7ccc8214d5a57c44f5fc4b7b8ec7c34a6bc08..03c623f61461fef7ac5324309c663fd9d44c1f8d 100644 (file)
 
   tasks:
 
-  - name: disable ceph rgw service
+# For backward compatibility
+  - name: disable ceph rgw service (old unit name, for backward compatibility)
     service:
       name: "ceph-rgw@{{ ansible_hostname }}"
       state: stopped
       enabled: no
     ignore_errors: true
 
+  - name: disable ceph rgw service (new unit name)
+    service:
+      name: "ceph-radosgw@{{ ansible_hostname }}"
+      state: stopped
+      enabled: no
+    ignore_errors: true
+
   - name: remove ceph rgw container
     docker:
       image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
 
   - name: remove ceph rgw service
     file:
-      path: /etc/systemd/system/ceph-rgw@.service
+      path: "{{ item }}"
       state: absent
+    with_items:
+# For backward compatibility
+      - /etc/systemd/system/ceph-rgw@.service
+      - /etc/systemd/system/ceph-radosgw@.service
 
   - name: remove ceph rgw image
     docker_image:
index 5c0919732b1747865b1e1d3aed79fbaf5f675de9..9ddd4a77c571ea0a2c7ce6a560c097b35f58fff5 100644 (file)
 
     - name: restart containerized ceph rgws with systemd
       service:
-        name: ceph-rgw@{{ ansible_hostname }}
+        name: ceph-radosgw@{{ ansible_hostname }}
         state: restarted
         enabled: yes
       when:
diff --git a/roles/ceph-common/handlers/main.yml b/roles/ceph-common/handlers/main.yml
deleted file mode 100644 (file)
index cf3ed1d..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
----
-- name: update apt cache
-  apt:
-    update-cache: yes
-  when: ansible_os_family == 'Debian'
-
-- block:
-  - name: copy mon restart script
-    template:
-      src: restart_mon_daemon.sh.j2
-      dest: /tmp/restart_mon_daemon.sh
-      owner: root
-      group: root
-      mode: 0750
-    listen: "restart ceph mons"
-
-  - name: restart ceph mon daemon(s)
-    command: /tmp/restart_mon_daemon.sh
-    listen: "restart ceph mons"
-
-  when:
-# We do not want to run these checks on initial deployment (`socket.rc == 0`)
-    - socket.rc == 0
-    - ceph_current_fsid.rc == 0
-    - mon_group_name in group_names
-
-# This does not just restart OSDs but everything else too. Unfortunately
-# at this time the ansible role does not have an OSD id list to use
-# for restarting them specifically.
-- block:
-  - name: copy osd restart script
-    template:
-      src: restart_osd_daemon.sh.j2
-      dest: /tmp/restart_osd_daemon.sh
-      owner: root
-      group: root
-      mode: 0750
-    listen: "restart ceph osds"
-
-  - name: restart ceph osds daemon(s)
-    command: /tmp/restart_osd_daemon.sh
-    listen: "restart ceph osds"
-    when: handler_health_osd_check
-
-  when:
-# We do not want to run these checks on initial deployment (`socket.rc == 0`)
-# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
-    - ((crush_location is defined and crush_location) or socket.rc == 0)
-    - ceph_current_fsid.rc == 0
-    - osd_group_name in group_names
-# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
-    - inventory_hostname in play_hosts
-
-- name: restart ceph mdss
-  service:
-    name: ceph-mds@{{ mds_name }}
-    state: restarted
-  # serial: 1 would be the proper solution here, but that can only be set on play level
-  # upstream issue: https://github.com/ansible/ansible/issues/12170
-  run_once: true
-  with_items: "{{ groups.get(mds_group_name, []) }}"
-  delegate_to: "{{ item }}"
-  when:
-    - mds_group_name in group_names
-
-- name: restart ceph rgws
-  service:
-    name: ceph-radosgw@rgw.{{ ansible_hostname }}
-    state: restarted
-  # serial: 1 would be the proper solution here, but that can only be set on play level
-  # upstream issue: https://github.com/ansible/ansible/issues/12170
-  run_once: true
-  with_items: "{{ groups.get(rgw_group_name, []) }}"
-  delegate_to: "{{ item }}"
-  when:
-    - rgw_group_name in group_names
-
-- name: restart ceph nfss
-  service:
-    name: nfs-ganesha
-    state: restarted
-  when:
-    - nfs_group_name in group_names
diff --git a/roles/ceph-common/tasks/checks/check_socket.yml b/roles/ceph-common/tasks/checks/check_socket.yml
deleted file mode 100644 (file)
index 79b512c..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
----
-# These checks are used to avoid running handlers at initial deployment.
-- name: check for a ceph socket
-  shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
-  changed_when: false
-  failed_when: false
-  always_run: true
-  register: socket
-
-- name: check for a rados gateway socket
-  shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1"
-  changed_when: false
-  failed_when: false
-  always_run: true
-  register: socketrgw
index b2c1a0e3f271b96acd427c7d83780f0acb9543eb..9c65a27bfc67d8c4a2d95740390d0413d178a7c0 100644 (file)
     - ceph_current_fsid.rc == 0
     - mon_group_name in group_names
 
-- include: ./checks/check_socket.yml
 - include: create_ceph_initial_dirs.yml
 - include: generate_ceph_conf.yml
 - include: create_rbd_client_dir.yml
diff --git a/roles/ceph-common/templates/restart_mon_daemon.sh.j2 b/roles/ceph-common/templates/restart_mon_daemon.sh.j2
deleted file mode 100644 (file)
index 4424cca..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-RETRIES="{{ handler_health_mon_check_retries }}"
-DELAY="{{ handler_health_mon_check_delay }}"
-MONITOR_NAME="{{ monitor_name }}"
-CLUSTER="{{ cluster }}"
-SOCKET=/var/run/ceph/${CLUSTER}-mon.${MONITOR_NAME}.asok
-
-
-check_quorum() {
-while [ $RETRIES -ne 0 ]; do
-  MEMBERS=$(ceph --cluster ${CLUSTER} -s --format json | sed -r 's/.*"quorum_names":(\[[^]]+\]).*/\1/')
-  test "${MEMBERS/$MONITOR_NAME}" != "$MEMBERS" && exit 0
-  sleep $DELAY
-  let RETRIES=RETRIES-1
-done
-# If we reach this point, it means there is a problem with the quorum
-echo "Error with quorum."
-echo "cluster status:"
-ceph --cluster ${CLUSTER} -s
-exit 1
-}
-
-# First, restart the daemon
-systemctl restart ceph-mon@${MONITOR_NAME}
-
-COUNT=10
-# Wait and ensure the socket exists after restarting the daemon
-while [ $COUNT -ne 0 ]; do
-  test -S $SOCKET && check_quorum
-  sleep 1
-  let COUNT=COUNT-1
-done
-# If we reach this point, it means the socket is not present.
-echo "Socket file ${SOCKET} could not be found, which means the monitor is not running."
-exit 1
diff --git a/roles/ceph-common/templates/restart_osd_daemon.sh.j2 b/roles/ceph-common/templates/restart_osd_daemon.sh.j2
deleted file mode 100644 (file)
index ae31f40..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-RETRIES="{{ handler_health_osd_check_retries }}"
-DELAY="{{ handler_health_osd_check_delay }}"
-CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}"
-
-check_pgs() {
-  while [ $RETRIES -ne 0 ]; do
-    test "[""$(ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')"
-    RET=$?
-    test $RET -eq 0 && return 0
-    sleep $DELAY
-    let RETRIES=RETRIES-1
-  done
-  # PGs not clean, exiting with return code 1
-  echo "Error while running 'ceph $CEPH_CLI -s', PGs were not reported as active+clean"
-  echo "It is possible that the cluster has less OSDs than the replica configuration"
-  echo "Will refuse to continue"
-  ceph $CEPH_CLI -s
-  exit 1
-}
-
-for id in $(ls /var/lib/ceph/osd/ | sed 's/.*-//'); do
-  # First, restart daemon(s)
-  systemctl restart ceph-osd@${id}
-  # We need to wait because it may take some time for the socket to actually exists
-  COUNT=10
-  # Wait and ensure the socket exists after restarting the daemon
-  SOCKET=/var/run/ceph/{{ cluster }}-osd.${id}.asok
-  while [ $COUNT -ne 0 ]; do
-    test -S $SOCKET && check_pgs && continue 2
-    sleep 1
-    let COUNT=COUNT-1
-  done
-  # If we reach this point, it means the socket is not present.
-  echo "Socket file ${SOCKET} could not be found, which means the osd daemon is not running."
-  exit 1
-done
index 2669e1bc9203e19b809b1e55861d84d9937caaa1..3a41ed5fbabdb9cec99f93d568b6b61b5975a3ea 100644 (file)
@@ -367,7 +367,7 @@ os_tuning_params:
 ##########
 # DOCKER #
 ##########
-
+docker_exec_cmd:
 docker: false
 ceph_docker_image: "ceph/daemon"
 ceph_docker_image_tag: latest
diff --git a/roles/ceph-defaults/handlers/main.yml b/roles/ceph-defaults/handlers/main.yml
new file mode 100644 (file)
index 0000000..e8659f9
--- /dev/null
@@ -0,0 +1,95 @@
+---
+- name: update apt cache
+  apt:
+    update-cache: yes
+  when: ansible_os_family == 'Debian'
+
+- block:
+  - name: copy mon restart script
+    template:
+      src: restart_mon_daemon.sh.j2
+      dest: /tmp/restart_mon_daemon.sh
+      owner: root
+      group: root
+      mode: 0750
+    listen: "restart ceph mons"
+
+  - name: restart ceph mon daemon(s)
+    command: /tmp/restart_mon_daemon.sh
+    listen: "restart ceph mons"
+  when:
+# We do not want to run these checks on initial deployment (`socket.rc == 0`)
+    - socket.rc == 0
+    - mon_group_name in group_names
+
+# This does not just restart OSDs but everything else too. Unfortunately
+# at this time the ansible role does not have an OSD id list to use
+# for restarting them specifically.
+- name: copy osd restart script
+  template:
+    src: restart_osd_daemon.sh.j2
+    dest: /tmp/restart_osd_daemon.sh
+    owner: root
+    group: root
+    mode: 0750
+  listen: "restart ceph osds"
+  when:
+    - inventory_hostname in play_hosts
+    - osd_group_name in group_names
+
+- name: restart containerized ceph osds daemon(s)
+  command: /tmp/restart_osd_daemon.sh
+  listen: "restart ceph osds"
+  with_items: "{{ socket_osd_container.results }}"
+  when:
+  # We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
+  # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
+    - ((crush_location is defined and crush_location) or item.get('rc') == 0)
+    - handler_health_osd_check
+    # See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
+    - inventory_hostname in play_hosts
+    - osd_group_name in group_names
+
+- name: restart non-containerized ceph osds daemon(s)
+  command: /tmp/restart_osd_daemon.sh
+  listen: "restart ceph osds"
+  when:
+  # We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
+  # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
+    - ((crush_location is defined and crush_location) or socket.rc == 0)
+    - ceph_current_fsid.rc == 0
+    - handler_health_osd_check
+    # See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
+    - inventory_hostname in play_hosts
+    - osd_group_name in group_names
+
+- name: restart ceph mdss
+  service:
+    name: ceph-mds@{{ mds_name }}
+    state: restarted
+  # serial: 1 would be the proper solution here, but that can only be set on play level
+  # upstream issue: https://github.com/ansible/ansible/issues/12170
+  run_once: true
+  with_items: "{{ groups.get(mds_group_name, []) }}"
+  delegate_to: "{{ item }}"
+  when:
+    - mds_group_name in group_names
+
+- name: restart ceph rgws
+  service:
+    name: ceph-radosgw@rgw.{{ ansible_hostname }}
+    state: restarted
+  # serial: 1 would be the proper solution here, but that can only be set on play level
+  # upstream issue: https://github.com/ansible/ansible/issues/12170
+  run_once: true
+  with_items: "{{ groups.get(rgw_group_name, []) }}"
+  delegate_to: "{{ item }}"
+  when:
+    - rgw_group_name in group_names
+
+- name: restart ceph nfss
+  service:
+    name: nfs-ganesha
+    state: restarted
+  when:
+    - nfs_group_name in group_names
diff --git a/roles/ceph-defaults/tasks/check_socket.yml b/roles/ceph-defaults/tasks/check_socket.yml
new file mode 100644 (file)
index 0000000..11f04f6
--- /dev/null
@@ -0,0 +1,21 @@
+---
+# These checks are used to avoid running handlers at initial deployment.
+- name: check for a ceph socket
+  shell: |
+    {{ docker_exec_cmd }} bash -c 'stat {{ rbd_client_admin_socket_path }}/*.asok > /dev/null 2>&1'
+  changed_when: false
+  failed_when: false
+  always_run: true
+  register: socket
+
+- name: check for a ceph socket in containerized deployment (osds)
+  shell: |
+    docker exec ceph-osd-"{{ ansible_hostname }}"-"{{ item | replace('/', '') }}" bash -c 'stat /var/run/ceph/*.asok > /dev/null 2>&1'
+  changed_when: false
+  failed_when: false
+  always_run: true
+  register: socket_osd_container
+  with_items: "{{ devices }}"
+  when:
+    - containerized_deployment
+    - inventory_hostname in groups.get(osd_group_name)
index 163464872ed930bf7b90a884dfa3e1927bd0b56a..25887efa78ccf42d789864515df5685d01286e39 100644 (file)
@@ -1,2 +1,3 @@
 ---
 - include: facts.yml
+- include: check_socket.yml
diff --git a/roles/ceph-defaults/templates/restart_mon_daemon.sh.j2 b/roles/ceph-defaults/templates/restart_mon_daemon.sh.j2
new file mode 100644 (file)
index 0000000..745f691
--- /dev/null
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+RETRIES="{{ handler_health_mon_check_retries }}"
+DELAY="{{ handler_health_mon_check_delay }}"
+MONITOR_NAME="{{ monitor_name }}"
+SOCKET=/var/run/ceph/{{ cluster }}-mon.${MONITOR_NAME}.asok
+
+
+check_quorum() {
+while [ $RETRIES -ne 0 ]; do
+  MEMBERS=$({{ docker_exec_cmd }} ceph --cluster {{ cluster }}   -s --format json | sed -r 's/.*"quorum_names":(\[[^]]+\]).*/\1/')
+  test "${MEMBERS/$MONITOR_NAME}" != "$MEMBERS" && exit 0
+  sleep $DELAY
+  let RETRIES=RETRIES-1
+done
+# If we reach this point, it means there is a problem with the quorum
+echo "Error with quorum."
+echo "cluster status:"
+{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s
+exit 1
+}
+
+# First, restart the daemon
+systemctl restart ceph-mon@${MONITOR_NAME}
+
+COUNT=10
+# Wait and ensure the socket exists after restarting the daemon
+while [ $COUNT -ne 0 ]; do
+  {{ docker_exec_cmd }} test -S $SOCKET && check_quorum
+  sleep 1
+  let COUNT=COUNT-1
+done
+# If we reach this point, it means the socket is not present.
+echo "Socket file ${SOCKET} could not be found, which means the monitor is not running."
+exit 1
diff --git a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2
new file mode 100644 (file)
index 0000000..de1fe10
--- /dev/null
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+RETRIES="{{ handler_health_osd_check_retries }}"
+DELAY="{{ handler_health_osd_check_delay }}"
+CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}"
+
+check_pgs() {
+  while [ $RETRIES -ne 0 ]; do
+    test "[""$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')"
+    RET=$?
+    test $RET -eq 0 && return 0
+    sleep $DELAY
+    let RETRIES=RETRIES-1
+  done
+  # PGs not clean, exiting with return code 1
+  echo "Error while running 'ceph $CEPH_CLI -s', PGs were not reported as active+clean"
+  echo "It is possible that the cluster has less OSDs than the replica configuration"
+  echo "Will refuse to continue"
+  $docker_exec ceph "$CEPH_CLI" -s
+  exit 1
+}
+
+wait_for_socket_in_docker() {
+  if ! docker exec "$1" timeout 10 bash -c "while [ ! -e /var/run/ceph/*.asok ]; do sleep 1 ; done"; then
+    log "Timed out while trying to look for a Ceph OSD socket."
+    log "Abort mission!"
+    exit 1
+  fi
+}
+
+get_dev_name() {
+  echo $1 | sed -r 's/ceph-osd@([a-z]{1,4})\.service/\1/'
+}
+
+get_docker_id_from_dev_name() {
+  local id
+  local count
+  count=10
+  while [ $count -ne 0 ]; do
+    id=$(docker ps -q -f "name=$1")
+    test "$id" != "" && break
+    sleep 1
+    let count=count-1
+  done
+  echo "$id"
+}
+
+get_docker_osd_id() {
+  wait_for_socket_in_docker $1
+  docker exec "$1" ls /var/run/ceph | cut -d'.' -f2
+}
+
+# For containerized deployments, the unit file looks like: ceph-osd@sda.service
+# For non-containerized deployments, the unit file looks like: ceph-osd@0.service
+for unit in $(systemctl list-units | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"); do
+  # First, restart daemon(s)
+  systemctl restart "${unit}"
+  # We need to wait because it may take some time for the socket to actually exists
+  COUNT=10
+  # Wait and ensure the socket exists after restarting the daemon
+  {% if containerized_deployment -%}
+  id=$(get_dev_name "$unit")
+  container_id=$(get_docker_id_from_dev_name "$id")
+  osd_id=$(get_docker_osd_id "$container_id")
+  docker_exec="docker exec $container_id"
+  {% else %}
+  osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]{1,2}')
+  {% endif %}
+  SOCKET=/var/run/ceph/test-osd.${osd_id}.asok
+  while [ $COUNT -ne 0 ]; do
+    $docker_exec test -S "$SOCKET" && check_pgs && continue 2
+    sleep 1
+    let COUNT=COUNT-1
+  done
+  # If we reach this point, it means the socket is not present.
+  echo "Socket file ${SOCKET} could not be found, which means the osd daemon is not running."
+  exit 1
+done
index d1e6a2926ca6b3c28a86f73e2409713d3f9b6cd4..a33f72f4b3b5460a3dd882720d565f939b89b981 100644 (file)
     config_type: ini
   when:
     - (not mon_containerized_default_ceph_conf_with_kv and
-        (inventory_hostname in groups.get(mon_group_name, []))) or
+        (inventory_hostname in groups.get(mon_group_name, []) or inventory_hostname in groups.get(osd_group_name, []))) or
       (not mon_containerized_default_ceph_conf_with_kv and
         ((groups.get(nfs_group_name, []) | length > 0)
           and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
+  notify:
+    - restart ceph mons
+    - restart ceph osds
+    - restart ceph mdss
+    - restart ceph rgws
 
 - name: set fsid fact when generate_fsid = true
   set_fact:
index 28ec80cfbbea0020e692b3d2a437490bf983dfe7..f81b254eb8a83add39da06173357a22e346237e4 100644 (file)
@@ -105,7 +105,6 @@ openstack_keys:
 ##########
 # DOCKER #
 ##########
-docker_exec_cmd:
 ceph_mon_docker_subnet: "{{ public_network }}"# subnet of the monitor_interface
 
 # ceph_mon_docker_extra_env:
index e3bef3603523089f88a9b2d77158d59f4beec93b..3bce3d4fea96ddb2368a820c4808255cfe7da670 100644 (file)
@@ -2,14 +2,21 @@
 - name: generate systemd unit file
   become: true
   template:
-    src: "{{ role_path }}/templates/ceph-rgw.service.j2"
-    dest: /etc/systemd/system/ceph-rgw@.service
+    src: "{{ role_path }}/templates/ceph-radosgw.service.j2"
+    dest: /etc/systemd/system/ceph-radosgw@.service
     owner: "root"
     group: "root"
     mode: "0644"
 
+# For backward compatibility
+- name: disable old systemd unit ('ceph-rgw@') if present
+  service:
+    name: ceph-rgw@{{ ansible_hostname }}
+    state: disable
+  ignore_errors: true
+
 - name: enable systemd unit file for rgw instance
-  shell: systemctl enable ceph-rgw@{{ ansible_hostname }}.service
+  shell: systemctl enable ceph-radosgw@{{ ansible_hostname }}.service
   failed_when: false
   changed_when: false
 
@@ -20,7 +27,7 @@
 
 - name: systemd start rgw container
   service:
-    name: ceph-rgw@{{ ansible_hostname }}
+    name: ceph-radosgw@{{ ansible_hostname }}
     state: started
     enabled: yes
   changed_when: false
diff --git a/roles/ceph-rgw/templates/ceph-radosgw.service.j2 b/roles/ceph-rgw/templates/ceph-radosgw.service.j2
new file mode 100644 (file)
index 0000000..944e994
--- /dev/null
@@ -0,0 +1,31 @@
+[Unit]
+Description=Ceph RGW
+After=docker.service
+
+[Service]
+EnvironmentFile=-/etc/environment
+ExecStartPre=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/docker rm ceph-rgw-{{ ansible_hostname }}
+ExecStart=/usr/bin/docker run --rm --net=host \
+   {% if not containerized_deployment_with_kv -%}
+   -v /var/lib/ceph:/var/lib/ceph \
+   -v /etc/ceph:/etc/ceph \
+   {% else -%}
+   -e KV_TYPE={{kv_type}} \
+   -e KV_IP={{kv_endpoint}} \
+   -e KV_PORT={{kv_port}} \
+   {% endif -%}
+   -v /etc/localtime:/etc/localtime:ro \
+   --privileged \
+   -e CEPH_DAEMON=RGW \
+   {{ ceph_rgw_docker_extra_env }} \
+   --name=ceph-rgw-{{ ansible_hostname }} \
+   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+ExecStopPost=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
+Restart=always
+RestartSec=10s
+TimeoutStartSec=120
+TimeoutStopSec=15
+
+[Install]
+WantedBy=multi-user.target
diff --git a/roles/ceph-rgw/templates/ceph-rgw.service.j2 b/roles/ceph-rgw/templates/ceph-rgw.service.j2
deleted file mode 100644 (file)
index 944e994..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-[Unit]
-Description=Ceph RGW
-After=docker.service
-
-[Service]
-EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
-ExecStartPre=-/usr/bin/docker rm ceph-rgw-{{ ansible_hostname }}
-ExecStart=/usr/bin/docker run --rm --net=host \
-   {% if not containerized_deployment_with_kv -%}
-   -v /var/lib/ceph:/var/lib/ceph \
-   -v /etc/ceph:/etc/ceph \
-   {% else -%}
-   -e KV_TYPE={{kv_type}} \
-   -e KV_IP={{kv_endpoint}} \
-   -e KV_PORT={{kv_port}} \
-   {% endif -%}
-   -v /etc/localtime:/etc/localtime:ro \
-   --privileged \
-   -e CEPH_DAEMON=RGW \
-   {{ ceph_rgw_docker_extra_env }} \
-   --name=ceph-rgw-{{ ansible_hostname }} \
-   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
-Restart=always
-RestartSec=10s
-TimeoutStartSec=120
-TimeoutStopSec=15
-
-[Install]
-WantedBy=multi-user.target