]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
rgw/rbdmirror: use service dump instead of ceph -s
authorDimitri Savineau <dsavinea@redhat.com>
Mon, 26 Oct 2020 21:49:47 +0000 (17:49 -0400)
committerGuillaume Abrioux <gabrioux@redhat.com>
Tue, 3 Nov 2020 13:32:09 +0000 (14:32 +0100)
The ceph status command returns a lot of information stored in variables
and/or facts which could consume resources for nothing.
When checking the rgw/rbdmirror services status, we're only using the
servicmap structure in the ceph status output.
To optimize this, we could use the ceph service dump command which contains
the same needed information.
This command returns less information and is slightly faster than the ceph
status command.

$ ceph status -f json | wc -c
2001
$ ceph service dump -f json | wc -c
1105
$ time ceph status -f json > /dev/null

real 0m0.557s
user 0m0.516s
sys 0m0.040s
$ time ceph service dump -f json > /dev/null

real 0m0.454s
user 0m0.434s
sys 0m0.020s

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit 3f9081931f8a369b075060083cdb225e3477f99a)

infrastructure-playbooks/shrink-rbdmirror.yml
infrastructure-playbooks/shrink-rgw.yml
roles/ceph-facts/tasks/facts.yml

index 9b01a1e007f0eb859ce54b1f566d57686e3d680c..d87130044bfe3424cfcf717427e92bfb17aabc1e 100644 (file)
@@ -67,7 +67,7 @@
         container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
 
     - name: exit playbook, if can not connect to the cluster
-      command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} -s -f json"
+      command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
       register: ceph_health
       until: ceph_health is succeeded
       retries: 5
     - name: set_fact rbdmirror_gids
       set_fact:
         rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [ item ] }}"
-      with_items: "{{  (ceph_health.stdout | from_json)['servicemap']['services']['rbd-mirror']['daemons'].keys() | list }}"
+      with_items: "{{  (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list }}"
       when: item != 'summary'
 
     - name: set_fact rbdmirror_to_kill_gid
       set_fact:
-        rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['servicemap']['services']['rbd-mirror']['daemons'][item]['gid'] }}"
+        rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['gid'] }}"
       with_items: "{{ rbdmirror_gids }}"
-      when: (ceph_health.stdout | from_json)['servicemap']['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname
+      when: (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname
 
   tasks:
     - name: stop rbdmirror service
 
   post_tasks:
     - name: get servicemap details
-      command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} -s -f json"
+      command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
       register: ceph_health
       failed_when:
-        - "'rbd-mirror' in (ceph_health.stdout | from_json)['servicemap']['services'].keys() | list"
-        - rbdmirror_to_kill_gid in (ceph_health.stdout | from_json)['servicemap']['services']['rbd-mirror']['daemons'].keys() | list
+        - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
+        - rbdmirror_to_kill_gid in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list
       until:
-        - "'rbd-mirror' in (ceph_health.stdout | from_json)['servicemap']['services'].keys() | list"
-        - rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['servicemap']['services']['rbd-mirror']['daemons'].keys() | list
+        - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
+        - rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list
       when: rbdmirror_to_kill_gid is defined
       retries: 12
       delay: 10
index e8e9a6cd8f544e2ae5098eb060d277ebbb2da04b..88231b2c21994de188913ec59a5afe0e4d1036be 100644 (file)
       delay: 2
 
     - name: get rgw instances
-      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
+      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
       register: rgw_instances
 
 
     - name: exit playbook, if the rgw_to_kill doesn't exist
-      when: rgw_to_kill not in (rgw_instances.stdout | from_json).servicemap.services.rgw.daemons.keys() | list
+      when: rgw_to_kill not in (rgw_instances.stdout | from_json).services.rgw.daemons.keys() | list
       fail:
         msg: >
           It seems that the rgw instance given is not part of the ceph cluster. Please
       delay: 2
 
     - name: exit if rgw_to_kill is reported in ceph status
-      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
+      command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
       register: ceph_status
       failed_when:
-        - (ceph_status.stdout | from_json).servicemap.services.rgw is defined
-        - rgw_to_kill in (ceph_status.stdout | from_json).servicemap.services.rgw.daemons.keys() | list
+        - (ceph_status.stdout | from_json).services.rgw is defined
+        - rgw_to_kill in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list
       until:
-        - (ceph_status.stdout | from_json).servicemap.services.rgw is defined
-        - rgw_to_kill not in (ceph_status.stdout | from_json).servicemap.services.rgw.daemons.keys() | list
+        - (ceph_status.stdout | from_json).services.rgw is defined
+        - rgw_to_kill not in (ceph_status.stdout | from_json).services.rgw.daemons.keys() | list
       retries: 3
       delay: 3
 
index 1dbcb9ea8bc310a0b112098eb9d8d1ee9761ae90..1eaa9aecbf0032f653bb0e67b102eeb63e432eb2 100644 (file)
       or inventory_hostname in groups.get(nfs_group_name, [])
   block:
     - name: get ceph current status
-      command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
+      command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
       changed_when: false
       failed_when: false
       check_mode: no
     - name: set_fact rgw_hostname
       set_fact:
         rgw_hostname: "{% set _value = ansible_hostname -%}
-        {% for key in (ceph_current_status['servicemap']['services']['rgw']['daemons'] | list) -%}
+        {% for key in (ceph_current_status['services']['rgw']['daemons'] | list) -%}
         {% if key == ansible_fqdn -%}
         {% set _value = key -%}
         {% endif -%}
         {% endfor -%}
         {{ _value }}"
       when:
-        - ceph_current_status['servicemap'] is defined
-        - ceph_current_status['servicemap']['services'] is defined
-        - ceph_current_status['servicemap']['services']['rgw'] is defined
+        - ceph_current_status['services'] is defined
+        - ceph_current_status['services']['rgw'] is defined
 
 - name: check if the ceph conf exists
   stat: