]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
ceph_volume: try to get ride of the dummy container
authorSébastien Han <seb@redhat.com>
Mon, 16 Jul 2018 16:09:33 +0000 (18:09 +0200)
committerGuillaume Abrioux <gabrioux@redhat.com>
Wed, 10 Oct 2018 20:08:41 +0000 (16:08 -0400)
If we run on a containerized deployment we pass an env variable which
contains the container image.

Signed-off-by: Sébastien Han <seb@redhat.com>
library/ceph_volume.py
roles/ceph-osd/tasks/docker/start_docker_osd.yml
roles/ceph-osd/tasks/scenarios/lvm.yml
roles/ceph-osd/templates/ceph-osd-run.sh.j2

index 0e965ecb2a3dbe75e82b3cdfdd9ff0d3327091d7..e7c573fcc0703878d138eb891842a944c604ddb6 100644 (file)
@@ -1,8 +1,8 @@
 #!/usr/bin/python
 import datetime
-import json
 import copy
-
+import json
+import os
 
 ANSIBLE_METADATA = {
     'metadata_version': '1.0',
@@ -158,6 +158,20 @@ EXAMPLES = '''
 from ansible.module_utils.basic import AnsibleModule  # noqa 4502
 
 
+def container_exec(binary, container_image):
+    '''
+    Build the CLI to run a command inside a container
+    '''
+
+    command_exec = ["docker", "run", "--rm", "--privileged", "--net=host",
+                    "-v", "/dev:/dev", "-v", "/etc/ceph:/etc/ceph:z",
+                    "-v", "/run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket",
+                    "-v", "/var/lib/ceph/:/var/lib/ceph/:z",
+                    os.path.join("--entrypoint=" + binary),
+                    container_image]
+    return command_exec
+
+
 def get_data(data, data_vg):
     if data_vg:
         data = "{0}/{1}".format(data_vg, data)
@@ -336,20 +350,26 @@ def batch(module):
     module.exit_json(**result)
 
 
-def ceph_volume_cmd(subcommand, containerized, cluster=None):
-    cmd = ['ceph-volume']
+def ceph_volume_cmd(subcommand, container_image, cluster=None):
+
+    if container_image:
+        binary = "ceph-volume"
+        cmd = container_exec(
+            binary, container_image)
+    else:
+        binary = ["ceph-volume"]
+        cmd = binary
+
     if cluster:
         cmd.extend(["--cluster", cluster])
+
     cmd.append('lvm')
     cmd.append(subcommand)
 
-    if containerized:
-        cmd = containerized.split() + cmd
-
     return cmd
 
 
-def activate_osd(module, containerized=None):
+def activate_osd(module, container_image=None):
     subcommand = "activate"
     cmd = ceph_volume_cmd(subcommand)
     cmd.append("--all")
@@ -370,10 +390,14 @@ def prepare_osd(module):
     wal_vg = module.params.get('wal_vg', None)
     crush_device_class = module.params.get('crush_device_class', None)
     dmcrypt = module.params['dmcrypt']
-    containerized = module.params.get('containerized', None)
     subcommand = "prepare"
 
-    cmd = ceph_volume_cmd(subcommand, containerized, cluster)
+    if "CEPH_CONTAINER_IMAGE" in os.environ:
+        container_image = os.getenv("CEPH_CONTAINER_IMAGE")
+    else:
+        container_image = None
+
+    cmd = ceph_volume_cmd(subcommand, container_image, cluster)
     cmd.extend(["--%s" % objectstore])
     cmd.append("--data")
 
@@ -417,9 +441,14 @@ def prepare_osd(module):
     # support for 'lvm list' and raw devices
     # was added with https://github.com/ceph/ceph/pull/20620 but
     # has not made it to a luminous release as of 12.2.4
-    ceph_volume_list_cmd = ["ceph-volume", "lvm", "list", data]
-    if containerized:
-        ceph_volume_list_cmd = containerized.split() + ceph_volume_list_cmd
+    ceph_volume_list_cmd_args = ["lvm", "list", data]
+    if container_image:
+        binary = "ceph-volume"
+        ceph_volume_list_cmd = container_exec(
+            binary, container_image) + ceph_volume_list_cmd_args
+    else:
+        binary = ["ceph-volume"]
+        ceph_volume_list_cmd = binary + ceph_volume_list_cmd_args
 
     rc, out, err = module.run_command(ceph_volume_list_cmd, encoding=None)
     if rc == 0:
index c24e7db019c80cb56f8d553b411d7e6823796f56..403bcc43a0ed0007e5c0b97efa7b3c3a6238ffdd 100644 (file)
   notify:
     - restart ceph osds
 
+- name: collect osd ids
+  shell: >
+    docker run --rm
+    --privileged=true
+    -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket
+    -v /etc/ceph:/etc/ceph:z
+    -v /dev:/dev
+    --entrypoint=ceph-volume
+    {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+    lvm list --format json | python -c 'import sys, json; print("\n".join(json.load(sys.stdin).keys()))'
+  changed_when: false
+  failed_when: false
+  register: ceph_osd_ids
+  when:
+    - containerized_deployment
+    - osd_scenario == 'lvm'
+
 - name: systemd start osd container
   systemd:
-    name: ceph-osd@{{ item | regex_replace('/dev/', '') }}
+    name: ceph-osd@{{ item | regex_replace('/dev/', '') if osd_scenario != 'lvm' else item }}
     state: started
     enabled: yes
     daemon_reload: yes
-  with_items: "{{ devices }}"
+  with_items: "{{ devices if osd_scenario != 'lvm' else ceph_osd_ids.stdout_lines }}"
\ No newline at end of file
index 67ad9c3415662d51518e53b1a66448e530db258e..bff1cf4cbcbbf4dabdd39f3726b6d2c8cb75ca29 100644 (file)
@@ -1,29 +1,4 @@
 ---
-- name: set_fact docker_exec_prepare_cmd
-  set_fact:
-    docker_exec_prepare_cmd: "docker exec ceph-volume-prepare"
-  when:
-    - containerized_deployment
-
-- name: run a ceph-volume prepare container (sleep 3000)
-  command: >
-    docker run \
-    --rm \
-    --privileged=true \
-    --net=host \
-    -v /dev:/dev \
-    -d \
-    -v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \
-    -v /var/lib/ceph/:/var/lib/ceph/:z \
-    -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \
-    --name ceph-volume-prepare \
-    --entrypoint=sleep \
-    {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
-    3000
-  changed_when: false
-  when:
-    - containerized_deployment
-
 - name: "use ceph-volume to create {{ osd_objectstore }} osds"
   ceph_volume:
     cluster: "{{ cluster }}"
@@ -38,8 +13,8 @@
     wal_vg: "{{ item.wal_vg|default(omit) }}"
     crush_device_class: "{{ item.crush_device_class|default(omit) }}"
     dmcrypt: "{{ dmcrypt|default(omit) }}"
-    containerized: "{{ docker_exec_prepare_cmd | default(False) }}"
     action: "{{ 'prepare' if containerized_deployment else 'create' }}"
   environment:
     CEPH_VOLUME_DEBUG: 1
+    CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
   with_items: "{{ lvm_volumes }}"
\ No newline at end of file
index ed3f47a3f0ce2e146e93034f121d447268ea4c8b..bd08f35cb13cabb18bdb514224deb737b1499e50 100644 (file)
@@ -1,8 +1,13 @@
 #!/bin/bash
 # {{ ansible_managed }}
 
+
+#############
+# VARIABLES #
+#############
 DOCKER_ENV=""
 
+
 #############
 # FUNCTIONS #
 #############
@@ -50,6 +55,16 @@ function expose_partitions {
 
 expose_partitions "$1"
 
+{% if osd_scenario == 'lvm' -%}
+function find_device_from_id {
+  OSD_ID="$1"
+  LV=$(docker run --privileged=true -v /dev:/dev -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /etc/ceph:/etc/ceph:z --entrypoint=ceph-volume {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} lvm list --format json | python -c "import sys, json; print(json.load(sys.stdin)[\"$OSD_ID\"][0][\"path\"])")
+  OSD_DEVICE=$(lvdisplay -m $LV | awk '/Physical volume/ {print $3}')
+}
+
+find_device_from_id $@
+{% endif -%}
+
 
 ########
 # MAIN #
@@ -103,16 +118,18 @@ expose_partitions "$1"
   -e OSD_DMCRYPT=1 \
   {% endif -%}
   -e CLUSTER={{ cluster }} \
-  -e OSD_DEVICE=/dev/${1} \
   {% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
   -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
   {% endif -%}
   {% if osd_scenario == 'lvm' -%}
   -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \
   -e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
+  -e OSD_DEVICE="$OSD_DEVICE" \
+  --name=ceph-osd-"$OSD_ID" \
   {% else -%}
   -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
+  -e OSD_DEVICE=/dev/"${1}" \
+  --name=ceph-osd-{{ ansible_hostname }}-"${1}" \
   {% endif -%}
   {{ ceph_osd_docker_extra_env }} \
-  --name=ceph-osd-{{ ansible_hostname }}-${1} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}