]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
osd: allow multi dedicated journals for containers 1724/head
authorSébastien Han <seb@redhat.com>
Tue, 25 Jul 2017 15:54:26 +0000 (17:54 +0200)
committerSébastien Han <seb@redhat.com>
Wed, 30 Aug 2017 10:34:06 +0000 (12:34 +0200)
Fix: https://bugzilla.redhat.com/show_bug.cgi?id=1475820
Signed-off-by: Sébastien Han <seb@redhat.com>
27 files changed:
Vagrantfile
group_vars/osds.yml.sample
infrastructure-playbooks/purge-docker-cluster.yml
infrastructure-playbooks/rolling_update.yml
infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml
roles/ceph-docker-common/tasks/fetch_configs.yml
roles/ceph-osd/defaults/main.yml
roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml [new file with mode: 0644]
roles/ceph-osd/tasks/check_devices.yml
roles/ceph-osd/tasks/check_devices_auto.yml
roles/ceph-osd/tasks/check_devices_static.yml
roles/ceph-osd/tasks/docker/start_docker_osd.yml
roles/ceph-osd/tasks/main.yml
roles/ceph-osd/tasks/scenarios/collocated.yml
roles/ceph-osd/tasks/scenarios/non-collocated.yml
roles/ceph-osd/templates/ceph-osd-run.sh.j2
tests/functional/centos/7/bs-crypt-jrn-col/group_vars/all
tests/functional/centos/7/bs-dock-crypt-jrn-col/group_vars/all
tests/functional/centos/7/bs-dock-ded-jrn/group_vars/all
tests/functional/centos/7/bs-docker/group_vars/all
tests/functional/centos/7/cluster/group_vars/all
tests/functional/centos/7/crypt-ded-jrn/group_vars/all
tests/functional/centos/7/crypt-jrn-col/group_vars/all
tests/functional/centos/7/docker-crypt-jrn-col/group_vars/all
tests/functional/centos/7/docker-ded-jrn/group_vars/all
tests/functional/centos/7/docker/group_vars/all
tests/functional/centos/7/jrn-col/group_vars/all

index cc45504c31c8ef400518b3ba5b70e7718be47274..b10d9396f51ce63ac194a5a4e1b2d7a3ab612729 100644 (file)
@@ -77,7 +77,6 @@ ansible_provision = proc do |ansible|
       containerized_deployment: 'true',
       monitor_interface: ETH,
       ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
-      ceph_osd_docker_devices: settings['disks'],
       devices: settings['disks'],
       ceph_docker_on_openstack: BOX == 'openstack',
       ceph_rgw_civetweb_port: 8080,
index 55f66daa8cd5a391773e5e8c283709552e94d528..777a5615b066936c92f23095d092f696c72836e9 100644 (file)
@@ -227,40 +227,22 @@ dummy:
 #ceph_config_keys: [] # DON'T TOUCH ME
 
 # PREPARE DEVICE
-# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly.
-# This is why we use [0] in the example.
 #
 # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
 #
-# Examples:
-# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1
-# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-#
-# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1
-# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
-# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
-# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-#
-#
 #ceph_osd_docker_devices: "{{ devices }}"
-#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
+#ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
 
 # ACTIVATE DEVICE
-# Examples:
-# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1
-# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
 #
-#ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }}
+#ceph_osd_docker_extra_env:
 #ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
 
 
 ###########
 # SYSTEMD #
 ###########
+
 # ceph_osd_systemd_overrides will override the systemd settings
 # for the ceph-osd services.
 # For example,to set "PrivateDevices=false" you can specify:
index 4558fc30ffa2d739afdb94b5827f16ceb2aebd12..7941752bdc27a2c8650ada31b171b60b502b1e9e 100644 (file)
       name: "ceph-osd@{{ item | basename }}"
       state: stopped
       enabled: no
-    with_items: "{{ ceph_osd_docker_devices }}"
+    with_items: "{{ devices }}"
     ignore_errors: true
 
   - name: remove ceph osd prepare container
       image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
       name: "ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
       state: absent
-    with_items: "{{ ceph_osd_docker_devices }}"
+    with_items: "{{ devices }}"
     ignore_errors: true
 
   - name: remove ceph osd container
       image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
       name: "ceph-osd-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
       state: absent
-    with_items: "{{ ceph_osd_docker_devices }}"
+    with_items: "{{ devices }}"
     ignore_errors: true
 
   - name: zap ceph osd disks
       {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
       zap_device
     with_items:
-      - "{{ ceph_osd_docker_devices }}"
+      - "{{ devices }}"
       - "{{ dedicated_devices|default([]) }}"
 
   - name: wait until the zap containers die
       name: "ceph-osd-zap-{{ ansible_hostname }}-dev{{ item | regex_replace('/', '') }}"
       state: absent
     with_items:
-      - "{{ ceph_osd_docker_devices }}"
+      - "{{ devices }}"
       - "{{ dedicated_devices|default([]) }}"
 
   - name: remove ceph osd service
index 535c2201dd607e6182bf3a4bb9bc9ed1648b9dd5..e569639b4d81c25b1ce2be160d9e96ee7363cd1b 100644 (file)
         name: ceph-osd@{{ item | basename }}
         state: restarted
         enabled: yes
-      with_items: "{{ ceph_osd_docker_devices }}"
+      with_items: "{{ devices }}"
       when:
         - ansible_service_mgr == 'systemd'
         - containerized_deployment
index 746f06595592256c156b73091c730e92b7f44bb3..6203ca29b717985029529423ceb6d6458f25aaea 100644 (file)
     - name: collect osd devices
       shell: |
         blkid | awk '/ceph data/ { sub ("1:", "", $1); print $1 }'
-      register: ceph_osd_docker_devices
+      register: devices
       changed_when: false
 
     - name: stop non-containerized ceph osd(s)
         docker ps | grep -sq {{ item | regex_replace('/', '') }}
       changed_when: false
       failed_when: false
-      with_items: "{{ ceph_osd_docker_devices.stdout_lines }}"
+      with_items: "{{ devices.stdout_lines }}"
       register: osd_running
 
     - name: unmount all the osd directories
       changed_when: false
       failed_when: false
       with_together:
-        - "{{ ceph_osd_docker_devices.stdout_lines }}"
+        - "{{ devices.stdout_lines }}"
         - "{{ osd_running.results }}"
       when:
         - item.1.get("rc", 0) != 0
 
-    - set_fact: ceph_osd_docker_devices={{ ceph_osd_docker_devices.stdout_lines }}
+    - set_fact: devices={{ devices.stdout_lines }}
 
   roles:
     - ceph-defaults
index d361782743346a09d7c1836ab912c8fa9de54010..9b73857308420e6b1e6f866fa44c2d6ef24ad42b 100644 (file)
@@ -56,5 +56,5 @@
   changed_when: false
   with_together:
     - "{{ ceph_config_keys }}"
-    - "{{ statconfig.results }}"
+    - "{{ statconfig.results | default([]) }}"
   when: item.1.stat.exists == true
index d6d3511ffd146f02fc9e6008a17af130b555f115..2c6588541bdf76e77701fe6b16689881991e051e 100644 (file)
@@ -219,40 +219,22 @@ lvm_volumes: []
 ceph_config_keys: [] # DON'T TOUCH ME
 
 # PREPARE DEVICE
-# Make sure you only pass a single device to dedicated_devices, otherwise this will fail horribly.
-# This is why we use [0] in the example.
 #
 # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above
 #
-# Examples:
-# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1
-# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-#
-# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1
-# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
-# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }}
-# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_FILESTORE=1
-#
-#
 ceph_osd_docker_devices: "{{ devices }}"
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }}
 
 # ACTIVATE DEVICE
-# Examples:
-# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1
-# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1
-# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
 #
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }}
+ceph_osd_docker_extra_env:
 ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command
 
 
 ###########
 # SYSTEMD #
 ###########
+
 # ceph_osd_systemd_overrides will override the systemd settings
 # for the ceph-osd services.
 # For example,to set "PrivateDevices=false" you can specify:
diff --git a/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml b/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml
new file mode 100644 (file)
index 0000000..2906095
--- /dev/null
@@ -0,0 +1,81 @@
+---
+- set_fact:
+    ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
+  when:
+    - osd_objectstore == 'bluestore'
+    - not dmcrypt
+    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+    - not containerized_deployment
+
+- set_fact:
+    ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
+  when:
+    - osd_objectstore == 'filestore'
+    - not dmcrypt
+    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+    - not containerized_deployment
+
+- set_fact:
+    ceph_disk_cli_options: "--cluster {{ cluster }}"
+  when:
+    - osd_objectstore == 'filestore'
+    - not dmcrypt
+    - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+    - not containerized_deployment
+
+- set_fact:
+    ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
+  when:
+    - osd_objectstore == 'bluestore'
+    - dmcrypt
+    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+    - not containerized_deployment
+
+- set_fact:
+    ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
+  when:
+    - osd_objectstore == 'filestore'
+    - dmcrypt
+    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+    - not containerized_deployment
+
+- set_fact:
+    ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
+  when:
+    - osd_objectstore == 'filestore'
+    - dmcrypt
+    - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+    - not containerized_deployment
+
+- set_fact:
+    docker_env_args: -e KV_TYPE={{ kv_type }} -e KV_IP={{ kv_endpoint }} -e KV_PORT={{ kv_port }}
+  when:
+    - containerized_deployment_with_kv
+
+- set_fact:
+    docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
+  when:
+    - containerized_deployment
+    - osd_objectstore == 'filestore'
+    - not dmcrypt
+
+- set_fact:
+    docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
+  when:
+    - containerized_deployment
+    - osd_objectstore == 'filestore'
+    - dmcrypt
+
+- set_fact:
+    docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
+  when:
+    - containerized_deployment
+    - osd_objectstore == 'bluestore'
+    - not dmcrypt
+
+- set_fact:
+    docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
+  when:
+    - containerized_deployment
+    - osd_objectstore == 'bluestore'
+    - dmcrypt
index 8d68cfd38c1f2530bcce3078aa057f75ea37bda5..36bdfa092db4b26326d23ad4cec0519547a0f158 100644 (file)
   always_run: true
   register: journal_partition_status
   when:
-    - osd_scenario == 'non-collocated'
     - item.0.rc != 0
+    - osd_scenario == 'non-collocated'
 
-- name: fix partitions gpt header or labels of the journal devices
+- name: fix partitions gpt header or labels of the journal device(s)
   shell: "sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }} || sgdisk --zap-all --clear --mbrtogpt -- {{ item.1 }}"
   with_together:
     - "{{ journal_partition_status.results }}"
   changed_when: false
   when:
     - not item.0.get("skipped")
+    - item.0.get("rc", 0) != 0
     - osd_scenario == 'non-collocated'
+    - not containerized_deployment
+
+- name: create gpt disk label of the journal device(s)
+  command: parted --script {{ item.1 }} mklabel gpt
+  with_together:
+    - "{{ osd_partition_status_results.results }}"
+    - "{{ dedicated_devices|unique }}"
+  changed_when: false
+  when:
+    - not item.0.get("skipped")
     - item.0.get("rc", 0) != 0
+    - osd_scenario == 'non-collocated'
+    - containerized_deployment
index e05f96dfd2e3bd95be2b59ae2f5c006ecb28d32c..7240b67f04d414a4305b9f27baa8bfea76bbe19f 100644 (file)
   changed_when: false
   when:
     - ansible_devices is defined
-    - item.0.item.value.removable == 0
-    - item.0.item.value.partitions|count == 0
-    - item.0.item.value.holders|count == 0
-    - item.0.rc != 0
+    - not item.0.get("skipped")
+    - item.0.get("rc", 0) != 0
+    - item.1.value.removable == 0
+    - item.1.value.partitions|count == 0
+    - item.1.value.holders|count == 0
+    - not containerized_deployment
+
+- name: create gpt disk label
+  command: parted --script {{ item.1 }} mklabel gpt
+  with_together:
+    - "{{ osd_partition_status_results.results }}"
+    - "{{ ansible_devices }}"
+  changed_when: false
+  when:
+    - ansible_devices is defined
+    - not item.0.get("skipped")
+    - item.0.get("rc", 0) != 0
+    - item.1.value.removable == 0
+    - item.1.value.partitions|count == 0
+    - item.1.value.holders|count == 0
+    - containerized_deployment
 
 - name: check if a partition named 'ceph' exists (autodiscover disks)
   shell: "parted --script /dev/{{ item.key }} print | egrep -sq '^ 1.*ceph'"
index cdcaff04e24a5bcc5b2e7c7f6b642ab76aa4228d..9d82ce335db7dd167aac9961890bbbf237f7dd5f 100644 (file)
   when:
     - not item.0.get("skipped")
     - item.0.get("rc", 0) != 0
+    - not containerized_deployment
+
+- name: create gpt disk label
+  command: parted --script {{ item.1 }} mklabel gpt
+  with_together:
+    - "{{ osd_partition_status_results.results }}"
+    - "{{ devices }}"
+  changed_when: false
+  when:
+    - not item.0.get("skipped")
+    - item.0.get("rc", 0) != 0
+    - containerized_deployment
 
 - name: check if a partition named 'ceph' exists
   shell: "parted --script {{ item.1 }} print | egrep -sq '^ 1.*ceph'"
index 13e248fde27c93bab3979312fdc49e025df68b38..27039092c2f8ff0d3f72bf26fa3343fff4bed617 100644 (file)
@@ -9,62 +9,6 @@
     state: unmounted
   when: ceph_docker_on_openstack
 
-- name: verify if the disk was already prepared
-  shell: "lsblk -o PARTLABEL {{ item }} | grep -sq 'ceph'"
-  failed_when: false
-  always_run: true
-  with_items: "{{ ceph_osd_docker_devices }}"
-  register: osd_prepared
-
-# use shell rather than docker module
-# to ensure osd disk prepare finishes before
-# starting the next task
-- name: prepare ceph osd disk
-  shell: |
-    docker run --net=host \
-    --pid=host \
-    --privileged=true \
-    --name="ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.0 | regex_replace('/', '') }}" \
-    -v /etc/ceph:/etc/ceph \
-    -v /var/lib/ceph/:/var/lib/ceph/ \
-    -v /dev:/dev \
-    -v /etc/localtime:/etc/localtime:ro \
-    -e "OSD_DEVICE={{ item.0 }}" \
-    -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-    {{ ceph_osd_docker_prepare_env }} \
-    "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
-  with_together:
-    - "{{ ceph_osd_docker_devices }}"
-    - "{{ osd_prepared.results }}"
-  when:
-    - item.1.get("rc", 0) != 0
-    - ceph_osd_docker_prepare_env is defined
-    - not containerized_deployment_with_kv
-
-- name: prepare ceph osd disk with kv_store
-  shell: |
-    docker run  --net=host   \
-    --pid=host \
-    --privileged=true \
-    --name="ceph-osd-prepare-{{ ansible_hostname }}-dev-{{ item.0 | regex_replace('/', '') }}" \
-    -v /dev:/dev \
-    -v /etc/localtime:/etc/localtime:ro \
-    -e "OSD_DEVICE={{ item.0 }}" \
-    -e "{{ ceph_osd_docker_prepare_env }}" \
-    -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
-    -e KV_TYPE={{kv_type}} \
-    -e KV_IP={{kv_endpoint}} \
-    -e KV_PORT={{kv_port}} \
-    {{ ceph_osd_docker_prepare_env }} \
-    "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" \
-  with_together:
-    - "{{ ceph_osd_docker_devices }}"
-    - "{{ osd_prepared.results }}"
-  when:
-    - item.1.get("rc", 0) != 0
-    - ceph_osd_docker_prepare_env is defined
-    - containerized_deployment_with_kv
-
 - name: generate ceph osd docker run script
   become: true
   template:
@@ -86,7 +30,7 @@
 - name: enable systemd unit file for osd instance
   shell: systemctl enable ceph-osd@{{ item | basename }}.service
   changed_when: false
-  with_items: "{{ ceph_osd_docker_devices }}"
+  with_items: "{{ devices }}"
 
 - name: reload systemd unit files
   shell: systemctl daemon-reload
@@ -98,4 +42,4 @@
     state: started
     enabled: yes
   changed_when: false
-  with_items: "{{ ceph_osd_docker_devices }}"
+  with_items: "{{ devices }}"
index 1784e27fc5f4738a49fff4dee28903394d1a0f1c..457d23e93af33c951f79a36f49959162d55befdb 100644 (file)
@@ -6,17 +6,19 @@
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
 
+- include: ceph_disk_cli_options_facts.yml
+
+- include: check_devices.yml
+
 - include: ./scenarios/collocated.yml
   when:
     - osd_scenario == 'collocated'
-    - not containerized_deployment
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
 
 - include: ./scenarios/non-collocated.yml
   when:
     - osd_scenario == 'non-collocated'
-    - not containerized_deployment
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
 
index b4119abeb8f04c5aa7b80a37a275bfa4da43f4a4..ff79d34bc8956639cf8eba2dd4a887c423f197cb 100644 (file)
@@ -1,63 +1,59 @@
 ---
-## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
-
-- include: ../check_devices.yml
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
-  when:
-    - osd_objectstore == 'bluestore'
-    - osd_scenario == 'collocated'
-    - not dmcrypt
-    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
-  when:
-    - osd_objectstore == 'filestore'
-    - osd_scenario == 'collocated'
-    - not dmcrypt
-    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }}"
-  when:
-    - osd_objectstore == 'filestore'
-    - osd_scenario == 'collocated'
-    - not dmcrypt
-    - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
-
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
-  when:
-    - osd_objectstore == 'bluestore'
-    - osd_scenario == 'collocated'
-    - dmcrypt
-    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
+# use shell rather than docker module
+# to ensure osd disk prepare finishes before
+# starting the next task
+- name: prepare ceph containerized osd disk collocated
+  shell: |
+    docker run --net=host \
+    --pid=host \
+    --privileged=true \
+    --name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \
+    -v /etc/ceph:/etc/ceph \
+    -v /var/lib/ceph/:/var/lib/ceph/ \
+    -v /dev:/dev \
+    -v /etc/localtime:/etc/localtime:ro \
+    -e CLUSTER={{ cluster }} \
+    -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
+    -e OSD_DEVICE={{ item.1 }} \
+    {{ docker_env_args }} \
+    {{ ceph_osd_docker_prepare_env }} \
+    {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+  with_together:
+    - "{{ parted_results.results }}"
+    - "{{ devices }}"
   when:
-    - osd_objectstore == 'filestore'
-    - osd_scenario == 'collocated'
-    - dmcrypt
-    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+    - not item.0.get("skipped")
+    - not osd_auto_discovery
+    - containerized_deployment
 
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
+- name: automatic prepare ceph containerized osd disk collocated
+  shell: |
+    docker run --net=host \
+    --pid=host \
+    --privileged=true \
+    --name=ceph-osd-prepare-{{ ansible_hostname }}-devdev{{ item.key }} \
+    -v /etc/ceph:/etc/ceph \
+    -v /var/lib/ceph/:/var/lib/ceph/ \
+    -v /dev:/dev \
+    -v /etc/localtime:/etc/localtime:ro \
+    -e CLUSTER={{ cluster }} \
+    -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
+    -e OSD_DEVICE=/dev/{{ item.key }} \
+    {{ docker_env_args }} \
+    {{ ceph_osd_docker_prepare_env }} \
+    {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+  with_dict: "{{ ansible_devices }}"
   when:
-    - osd_objectstore == 'filestore'
-    - osd_scenario == 'collocated'
-    - dmcrypt
-    - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+    - ansible_devices is defined
+    - item.value.removable == "0"
+    - item.value.partitions|count == 0
+    - item.value.holders|count == 0
+    - osd_auto_discovery
+    - containerized_deployment
 
 # NOTE (alahouze): if the device is a partition, the parted command below has
 # failed, this is why we check if the device is a partition too.
-- name: automatic prepare "{{ osd_objectstore }}" osd disk(s) without partitions with collocated osd data and journal
+- name: automatic prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) without partitions with collocated osd data and journal
   command: "ceph-disk prepare {{ ceph_disk_cli_options }} /dev/{{ item.key }}"
   register: prepared_osds
   with_dict: "{{ ansible_devices }}"
@@ -67,8 +63,9 @@
     - item.value.partitions|count == 0
     - item.value.holders|count == 0
     - osd_auto_discovery
+    - not containerized_deployment
 
-- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with collocated osd data and journal
+- name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with collocated osd data and journal
   command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }}"
   with_together:
     - "{{ parted_results.results }}"
@@ -80,5 +77,6 @@
     - item.0.get("rc", 0) != 0
     - item.1.get("rc", 0) != 0
     - not osd_auto_discovery
+    - not containerized_deployment
 
 - include: ../activate_osds.yml
index 5a181c7fa8829298c4506e7eb1b16add2a979371..9a744300c357a41cc781f83ed7f430ec5c910b50 100644 (file)
@@ -1,53 +1,64 @@
 ---
-- include: ../check_devices.yml
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
-  when:
-    - osd_objectstore == 'bluestore'
-    - not dmcrypt
-    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
-  when:
-    - osd_objectstore == 'filestore'
-    - not dmcrypt
-    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }}"
+# use shell rather than docker module
+# to ensure osd disk prepare finishes before
+# starting the next task
+- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated
+  shell: |
+    docker run --net=host \
+    --pid=host \
+    --privileged=true \
+    --name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \
+    -v /etc/ceph:/etc/ceph \
+    -v /var/lib/ceph/:/var/lib/ceph/ \
+    -v /dev:/dev \
+    -v /etc/localtime:/etc/localtime:ro \
+    -e CLUSTER={{ cluster }} \
+    -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
+    -e OSD_DEVICE={{ item.1 }} \
+    -e OSD_JOURNAL={{ item.2 }} \
+    {{ docker_env_args }} \
+    {{ ceph_osd_docker_prepare_env }} \
+    {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+  with_together:
+    - "{{ parted_results.results }}"
+    - "{{ devices }}"
+    - "{{ dedicated_devices }}"
   when:
+    - not item.0.get("skipped")
+    - not osd_auto_discovery
+    - containerized_deployment
     - osd_objectstore == 'filestore'
-    - not dmcrypt
-    - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
 
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
+- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db and wal
+  shell: |
+    docker run --net=host \
+    --pid=host \
+    --privileged=true \
+    --name=ceph-osd-prepare-{{ ansible_hostname }}-dev{{ item.1 | regex_replace('/', '') }} \
+    -v /etc/ceph:/etc/ceph \
+    -v /var/lib/ceph/:/var/lib/ceph/ \
+    -v /dev:/dev \
+    -v /etc/localtime:/etc/localtime:ro \
+    -e CLUSTER={{ cluster }} \
+    -e CEPH_DAEMON=OSD_CEPH_DISK_PREPARE \
+    -e OSD_DEVICE={{ item.1 }} \
+    -e OSD_BLUESTORE_BLOCK_DB={{ item.2 }} \
+    -e OSD_BLUESTORE_BLOCK_WAL={{ item.3 }} \
+    {{ docker_env_args }} \
+    {{ ceph_osd_docker_prepare_env }} \
+    {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+  with_together:
+    - "{{ parted_results.results }}"
+    - "{{ devices }}"
+    - "{{ dedicated_devices }}"
+    - "{{ bluestore_wal_devices }}"
   when:
+    - not item.0.get("skipped")
+    - not osd_auto_discovery
+    - containerized_deployment
     - osd_objectstore == 'bluestore'
-    - dmcrypt
-    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
-  when:
-    - osd_objectstore == 'filestore'
-    - dmcrypt
-    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
 
-- set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
-  when:
-    - osd_objectstore == 'filestore'
-    - dmcrypt
-    - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
-
-- name: prepare filestore osd disk(s) non-collocated
+- name: prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) non-collocated
   command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}"
   with_together:
     - "{{ parted_results.results }}"
@@ -60,9 +71,9 @@
     - not item.1.get("skipped")
     - item.1.get("rc", 0) != 0
     - osd_objectstore == 'filestore'
-    - not osd_auto_discovery
+    - not containerized_deployment
 
-- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with a dedicated device for db and wal
+- name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with a dedicated device for db and wal
   command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}"
   with_together:
     - "{{ parted_results.results }}"
@@ -73,6 +84,6 @@
     - not item.0.get("skipped")
     - item.0.get("rc", 0) != 0
     - osd_objectstore == 'bluestore'
-    - not osd_auto_discovery
+    - not containerized_deployment
 
 - include: ../activate_osds.yml
index fd00f69f31bfa593d95b6ed7b1ac2de17e5c36af..1ac606f5375df5bae93efd96d0efba8268ffde30 100644 (file)
@@ -2,22 +2,47 @@
 # {{ ansible_managed }}
 
 
+#############
+# VARIABLES #
+#############
+
+REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
+
+
 #############
 # FUNCTIONS #
 #############
 
-function create_dev_list {
-  local regex
+
+function expose_devices {
   local disks
-  regex="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
   # we use the prepare container to find the partitions to expose
-  disks=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo /dev/disk/by-partuuid/${regex} | uniq)
+  disks=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
   for disk in $disks; do
-    DEVICES="--device $disk "
+    DEVICES="--device=$disk "
   done
 }
 
-create_dev_list $1
+function expose_partitions {
+  local partition
+  for partition in Block.wal Block.db Journal; do
+    if docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep -Eo "$partition is GPT partition"; then
+      if [[ "$partition" == "Block.wal" ]]; then
+        part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
+        DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_WAL=$part"
+      elif [[ "$partition" == "Block.db" ]]; then
+        part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
+        DOCKER_ENV="$DOCKER_ENV -e OSD_BLUESTORE_BLOCK_DB=$part"
+      elif [[ "$partition" == "Journal" ]]; then
+        part=$(docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} |& grep "$partition is GPT partition" | grep -Eo /dev/disk/by-partuuid/${REGEX} | uniq)
+        DOCKER_ENV="$DOCKER_ENV -e OSD_JOURNAL=$part"
+      fi
+    fi
+  done
+}
+
+#expose_devices $1
+expose_partitions $1
 
 
 ########
@@ -28,29 +53,40 @@ create_dev_list $1
   --rm \
   --net=host \
   --privileged=true \
+  --pid=host \
+  -v /dev:/dev \
+  -v /etc/localtime:/etc/localtime:ro \
+  -v /var/lib/ceph:/var/lib/ceph \
+  -v /etc/ceph:/etc/ceph \
+  $DOCKER_ENV \
   {% if ansible_distribution == 'Ubuntu' -%}
   --security-opt apparmor:unconfined \
   {% endif -%}
-  --pid=host \
   {% if not containerized_deployment_with_kv -%}
-  -v /var/lib/ceph:/var/lib/ceph \
-  -v /etc/ceph:/etc/ceph \
   {% else -%}
-  -e KV_TYPE={{kv_type}} \
-  -e KV_IP={{kv_endpoint}} \
-  -e KV_PORT={{kv_port}} \
+  -e KV_TYPE={{ kv_type }} \
+  -e KV_IP={{ kv_endpoint }} \
+  -e KV_PORT={{ kv_port }} \
   {% endif -%}
-  -v /dev:/dev \
-  -v /etc/localtime:/etc/localtime:ro \
-  --device=/dev/${1} \
-  --device=/dev/${1}1 \
-  {% if dedicated_devices|length > 0 -%}
-  -e OSD_JOURNAL={{ dedicated_devices[0] }} \
-  {% else -%}
-  --device=/dev/${1}2 \
+  {% if osd_objectstore == 'filestore' and not dmcrypt -%}
+  -e OSD_FILESTORE=1 \
+  -e OSD_DMCRYPT=0 \
+  {% endif -%}
+  {% if osd_objectstore == 'filestore' and dmcrypt -%}
+  -e OSD_FILESTORE=1 \
+  -e OSD_DMCRYPT=1 \
+  {% endif -%}
+  {% if osd_objectstore == 'bluestore' and not dmcrypt -%}
+  -e OSD_BLUESTORE=1 \
+  -e OSD_DMCRYPT=0 \
   {% endif -%}
+  {% if osd_objectstore == 'bluestore' and dmcrypt -%}
+  -e OSD_BLUESTORE=1 \
+  -e OSD_DMCRYPT=1 \
+  {% endif -%}
+  -e CLUSTER={{ cluster }} \
   -e OSD_DEVICE=/dev/${1} \
+  -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
   {{ ceph_osd_docker_extra_env }} \
   --name=ceph-osd-{{ ansible_hostname }}-dev${1} \
-  -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
index 6b701dc86d1b9dac8042b1cc6c032b139d46ff9d..ddd17de7b7deeabe8b1c9e80f18f6cacdc77beae 100644 (file)
@@ -8,7 +8,7 @@ monitor_interface: eth1
 radosgw_interface: eth1
 osd_scenario: collocated
 dmcrypt: true
-osd_objectstore: "bluestore"
+osd_objectstore: bluestore
 devices:
   - '/dev/sda'
   - '/dev/sdb'
index 7b96a4f7ed80293c517464d7812e4592c4ecfeb7..507ab648b95f54376ef2a588ddb9c22b3c72a117 100644 (file)
@@ -10,7 +10,7 @@ monitor_interface: eth1
 radosgw_interface: eth1
 osd_scenario: collocated
 dmcrypt: true
-osd_objectstore: "bluestore"
+osd_objectstore: bluestore
 devices:
   - '/dev/sda'
   - '/dev/sdb'
@@ -24,5 +24,4 @@ ceph_conf_overrides:
   osd:
     bluestore block db size = 67108864
     bluestore block wal size = 1048576000
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
index 44232c7c6d145fc8e052a3581ce0224eef6512b4..2306ba6ee689bc966210f30d23b76a3916cc3881 100644 (file)
@@ -14,11 +14,11 @@ ceph_docker_on_openstack: False
 public_network: "192.168.15.0/24"
 cluster_network: "192.168.16.0/24"
 ceph_rgw_civetweb_port: 8080
-ceph_osd_docker_devices: "{{ devices }}"
+osd_scenario: non-collocated
+osd_objectstore: bluestore
 devices:
   - /dev/sda
 dedicated_devices:
   - /dev/sdb
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }} -e OSD_BLUESTORE=1
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
+ceph_osd_docker_prepare_env: -e OSD_FORCE_ZAP=1
 ceph_osd_docker_run_script_path: /var/tmp
index 6f34d9eaea68a0e16edde76b4ed540b489f3f512..94a47e260e9f1bd1cbbbec9eb2bb3ba7bd34418b 100644 (file)
@@ -15,9 +15,7 @@ public_network: "192.168.15.0/24"
 cluster_network: "192.168.16.0/24"
 osd_scenario: collocated
 ceph_rgw_civetweb_port: 8080
-ceph_osd_docker_devices: "{{ devices }}"
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_BLUESTORE=1
+ceph_osd_docker_prepare_env: -e OSD_FORCE_ZAP=1
 devices:
   - /dev/sda
   - /dev/sdb
index e90587a26a29b6013ef65f00c9c0f897217de665..7434438fc3d587ddafbea1726fd28b31f7b0d8b7 100644 (file)
@@ -6,7 +6,7 @@ public_network: "192.168.1.0/24"
 cluster_network: "192.168.2.0/24"
 journal_size: 100
 radosgw_interface: eth1
-osd_objectstore: "filestore"
+osd_objectstore: filestore
 devices:
   - '/dev/sda'
 dedicated_devices:
index ef1fa393c890d45b489d5c1e751893d16dea89e4..2e855dbbd64845b90cc60e3f900900bfde4ed6db 100644 (file)
@@ -7,7 +7,8 @@ journal_size: 100
 monitor_interface: eth1
 radosgw_interface: eth1
 osd_scenario: non-collocated
-osd_objectstore: "filestore"
+dmcrypt: true
+osd_objectstore: filestore
 devices:
   - '/dev/sda'
 dedicated_devices:
index e7c2134defbef7f1a96ad6a875a01c44ea60eb95..87b9eb1d28b1379b9fe8c707ef3768d5bc876886 100644 (file)
@@ -7,7 +7,8 @@ journal_size: 100
 monitor_interface: eth1
 radosgw_interface: eth1
 osd_scenario: collocated
-osd_objectstore: "filestore"
+osd_objectstore: filestore
+dmcrypt: true
 devices:
   - '/dev/sda'
   - '/dev/sdb'
index b042cb62d597e8247dd843b4bb8d95f77741fd7a..7bd64b54ab921cb09f491e47fdc77d791a9e294f 100644 (file)
@@ -14,11 +14,10 @@ ceph_docker_on_openstack: False
 public_network: "192.168.15.0/24"
 cluster_network: "192.168.16.0/24"
 osd_scenario: collocated
+osd_objectstore: filestore
 dmcrypt: true
 ceph_rgw_civetweb_port: 8080
-ceph_osd_docker_devices: "{{ devices }}"
 devices:
   - /dev/sda
   - /dev/sdb
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
index 74c84409f193af424c6c4ac22c160c037df139eb..9b34c6325366854a67735d5ab21c729d036b8d87 100644 (file)
@@ -14,11 +14,11 @@ ceph_docker_on_openstack: False
 public_network: "192.168.15.0/24"
 cluster_network: "192.168.16.0/24"
 ceph_rgw_civetweb_port: 8080
-ceph_osd_docker_devices: "{{ devices }}"
+osd_objectstore: filestore
 osd_scenario: non-collocated
 devices:
   - /dev/sda
 dedicated_devices:
   - /dev/sdb
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ dedicated_devices[0] }}
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
 ceph_osd_docker_run_script_path: /var/tmp
index 864ec8c9fa764f5bc4b230fcdfe8fc781314141a..a261b406ae22b4fc64ba8e7ab10d920978174eb7 100644 (file)
@@ -15,9 +15,8 @@ public_network: "192.168.15.0/24"
 cluster_network: "192.168.16.0/24"
 osd_scenario: collocated
 ceph_rgw_civetweb_port: 8080
-ceph_osd_docker_devices: "{{ devices }}"
-ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
-ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE
+osd_objectstore: filestore
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
 devices:
   - /dev/sda
   - /dev/sdb
index 88d7a335d44cbcd1582ab916730edde988bd9256..0a0575f2b47e8d8a2aa280c921e2507d8e9c5666 100644 (file)
@@ -7,7 +7,7 @@ cluster_network: "192.168.4.0/24"
 monitor_interface: eth1
 radosgw_interface: eth1
 journal_size: 100
-osd_objectstore: "filestore"
+osd_objectstore: filestore
 devices:
   - '/dev/sda'
   - '/dev/sdb'