]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
ceph-config: use the ceph_volume module to get num_osds for lvm batch
authorAndrew Schoen <aschoen@redhat.com>
Thu, 20 Sep 2018 18:32:00 +0000 (13:32 -0500)
committerSébastien Han <seb@redhat.com>
Tue, 9 Oct 2018 14:09:50 +0000 (10:09 -0400)
This gives us an accurate number of how many osds will be created.

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
library/ceph_volume.py
roles/ceph-config/tasks/main.yml

index ee3cae284e10e07774c20393928d389b8d0dcebb..21c651c255d15f797b62b1cd0589759a8c234664 100644 (file)
@@ -2,6 +2,7 @@
 import datetime
 import json
 
+
 ANSIBLE_METADATA = {
     'metadata_version': '1.0',
     'status': ['preview'],
@@ -207,7 +208,7 @@ def batch(module):
     if objectstore == "filestore":
         cmd.extend(["--journal-size", journal_size])
 
-    if objectstore == "bluestore" and block_db_size != -1:
+    if objectstore == "bluestore" and block_db_size != "-1":
         cmd.extend(["--block-db-size", block_db_size])
 
     if report:
@@ -442,8 +443,8 @@ def run_module():
         dmcrypt=dict(type='bool', required=False, default=False),
         batch_devices=dict(type='list', required=False, default=[]),
         osds_per_device=dict(type='int', required=False, default=1),
-        journal_size=dict(type='int', required=False, default=5120),
-        block_db_size=dict(type='int', required=False, default=-1),
+        journal_size=dict(type='str', required=False, default="5120"),
+        block_db_size=dict(type='str', required=False, default="-1"),
         report=dict(type='bool', required=False, default=False),
     )
 
index c12bce2e71cba14e84397102371d082c34c41859..9df56e44b035e388502e144f3178af6e16e7cb52 100644 (file)
         - lvm_volumes | default([]) | length > 0
         - osd_scenario == 'lvm'
 
-    # This is a best guess. Ideally we'd like to use `ceph-volume lvm batch --report` to get
-    # a more accurate number but the ceph.conf needs to be in place before that is possible.
-    # There is a tracker to add functionality to ceph-volume which would allow doing this
-    # without the need for a ceph.conf: http://tracker.ceph.com/issues/36088
-    - name: count number of osds for lvm batch scenario
+    - name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
+      ceph_volume:
+        cluster: "{{ cluster }}"
+        objectstore: "{{ osd_objectstore }}"
+        batch_devices: "{{ devices }}"
+        osds_per_device: "{{ osds_per_device | default(1) | int }}"
+        journal_size: "{{ journal_size }}"
+        block_db_size: "{{ block_db_size }}"
+        report: true
+        action: "batch"
+      register: lvm_batch_report
+      environment:
+        CEPH_VOLUME_DEBUG: 1
+      when:
+        - devices | default([]) | length > 0
+        - osd_scenario == 'lvm'
+
+    - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report'
       set_fact:
-        num_osds: "{{ devices | length | int * osds_per_device | default(1) }}"
+        num_osds: "{{ (lvm_batch_report.stdout | from_json).osds | length | int }}"
       when:
         - devices | default([]) | length > 0
         - osd_scenario == 'lvm'
+
     when:
       - inventory_hostname in groups.get(osd_group_name, [])