From e74d80e72fa5044569d30d5185fd16b7debf1dea Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Tue, 14 May 2019 14:51:32 +0200 Subject: [PATCH] rename docker_exec_cmd variable This commit renames the `docker_exec_cmd` variable to `container_exec_cmd` so it's more generic. Signed-off-by: Guillaume Abrioux --- group_vars/all.yml.sample | 2 +- group_vars/rhcs.yml.sample | 2 +- infrastructure-playbooks/add-osd.yml | 4 +-- infrastructure-playbooks/ceph-keys.yml | 12 ++++---- infrastructure-playbooks/rolling_update.yml | 30 +++++++++---------- infrastructure-playbooks/shrink-mon.yml | 14 ++++----- infrastructure-playbooks/shrink-osd.yml | 16 +++++----- ...inerized-to-containerized-ceph-daemons.yml | 2 +- .../untested-by-ci/replace-osd.yml | 12 ++++---- library/ceph_crush.py | 2 +- roles/ceph-client/tasks/create_users_keys.yml | 10 +++---- roles/ceph-defaults/defaults/main.yml | 2 +- roles/ceph-facts/tasks/facts.yml | 8 ++--- roles/ceph-iscsi-gw/tasks/common.yml | 10 +++---- roles/ceph-mds/tasks/containerized.yml | 6 ++-- .../ceph-mds/tasks/create_mds_filesystems.yml | 16 +++++----- roles/ceph-mds/tasks/main.yml | 4 +-- roles/ceph-mgr/tasks/main.yml | 4 +-- roles/ceph-mgr/tasks/mgr_modules.yml | 8 ++--- roles/ceph-mon/tasks/ceph_keys.yml | 2 +- roles/ceph-mon/tasks/crush_rules.yml | 8 ++--- roles/ceph-mon/tasks/deploy_monitors.yml | 2 +- roles/ceph-mon/tasks/main.yml | 4 +-- roles/ceph-mon/tasks/secure_cluster.yml | 4 +-- roles/ceph-nfs/tasks/create_rgw_nfs_user.yml | 8 ++--- roles/ceph-nfs/tasks/main.yml | 4 +-- roles/ceph-nfs/tasks/start_nfs.yml | 8 ++--- roles/ceph-osd/tasks/openstack_config.yml | 16 +++++----- roles/ceph-rbd-mirror/tasks/main.yml | 4 +-- roles/ceph-rgw/handlers/main.yml | 2 +- roles/ceph-rgw/tasks/main.yml | 6 ++-- roles/ceph-rgw/tasks/multisite/checks.yml | 8 ++--- roles/ceph-rgw/tasks/multisite/master.yml | 10 +++---- roles/ceph-rgw/tasks/multisite/secondary.yml | 12 ++++---- 34 files changed, 131 insertions(+), 131 deletions(-) diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 8e8743c61..39621d62c 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -557,7 +557,7 @@ dummy: ########## # DOCKER # ########## -#docker_exec_cmd: +#container_exec_cmd: #docker: false #ceph_docker_image: "ceph/daemon" #ceph_docker_image_tag: latest diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 8b132571a..eeb9b9a0a 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -557,7 +557,7 @@ ceph_rhcs_version: 4 ########## # DOCKER # ########## -#docker_exec_cmd: +#container_exec_cmd: #docker: false ceph_docker_image: "rhceph/rhceph-4-rhel8" ceph_docker_image_tag: "latest" diff --git a/infrastructure-playbooks/add-osd.yml b/infrastructure-playbooks/add-osd.yml index 90f9be6f2..352264729 100644 --- a/infrastructure-playbooks/add-osd.yml +++ b/infrastructure-playbooks/add-osd.yml @@ -82,7 +82,7 @@ add_osd: True - name: set noup flag - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set noup" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set noup" delegate_to: "{{ groups['mons'][0] }}" run_once: True changed_when: False @@ -113,7 +113,7 @@ post_tasks: - name: unset noup flag - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd unset noup" delegate_to: "{{ groups['mons'][0] }}" run_once: True changed_when: False diff --git a/infrastructure-playbooks/ceph-keys.yml b/infrastructure-playbooks/ceph-keys.yml index 08bdda89e..f08076b48 100644 --- a/infrastructure-playbooks/ceph-keys.yml +++ b/infrastructure-playbooks/ceph-keys.yml @@ -8,7 +8,7 @@ gather_facts: false vars: cluster: ceph - docker_exec_cmd: "docker exec ceph-nano" + container_exec_cmd: "docker exec ceph-nano" keys_to_info: - client.admin - mds.0 @@ -29,7 +29,7 @@ caps: "{{ item.caps }}" cluster: "{{ cluster }}" secret: "{{ item.key | default('') }}" - containerized: "{{ docker_exec_cmd | default(False) }}" + containerized: "{{ container_exec_cmd | default(False) }}" with_items: "{{ keys_to_create }}" - name: update ceph key(s) @@ -38,7 +38,7 @@ state: update caps: "{{ item.caps }}" cluster: "{{ cluster }}" - containerized: "{{ docker_exec_cmd | default(False) }}" + containerized: "{{ container_exec_cmd | default(False) }}" with_items: "{{ keys_to_create }}" - name: delete ceph key(s) @@ -46,7 +46,7 @@ name: "{{ item }}" state: absent cluster: "{{ cluster }}" - containerized: "{{ docker_exec_cmd | default(False) }}" + containerized: "{{ container_exec_cmd | default(False) }}" with_items: "{{ keys_to_delete }}" - name: info ceph key(s) @@ -54,7 +54,7 @@ name: "{{ item }}" state: info cluster: "{{ cluster }}" - containerized: "{{ docker_exec_cmd }}" + containerized: "{{ container_exec_cmd }}" register: key_info ignore_errors: true with_items: "{{ keys_to_info }}" @@ -63,7 +63,7 @@ ceph_key: state: list cluster: "{{ cluster }}" - containerized: "{{ docker_exec_cmd | default(False) }}" + containerized: "{{ container_exec_cmd | default(False) }}" register: list_keys ignore_errors: true diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 7257a0e16..69e23e8cc 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -421,13 +421,13 @@ - ceph_release in ["nautilus", "octopus"] - not containerized_deployment - - name: set_fact docker_exec_cmd_osd + - name: set_fact container_exec_cmd_osd set_fact: - docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: containerized_deployment - name: get osd versions - command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions" + command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions" register: ceph_versions delegate_to: "{{ groups[mon_group_name][0] }}" @@ -439,19 +439,19 @@ # length == 1 means there is a single osds versions entry # thus all the osds are running the same version - name: osd set sortbitwise - command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} osd set sortbitwise" + command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} osd set sortbitwise" delegate_to: "{{ groups[mon_group_name][0] }}" when: - (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1 - ceph_versions_osd | string is search("ceph version 10") - name: get num_pgs - non container - command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" + command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" register: ceph_pgs delegate_to: "{{ groups[mon_group_name][0] }}" - name: waiting for clean pgs... - command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" + command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" register: ceph_health_post until: > (((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) > 0) @@ -475,20 +475,20 @@ - import_role: name: ceph-facts - - name: set_fact docker_exec_cmd_osd + - name: set_fact container_exec_cmd_osd set_fact: - docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: containerized_deployment - name: unset osd flags - command: "{{ docker_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}" + command: "{{ container_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}" with_items: - noout - norebalance delegate_to: "{{ groups[mon_group_name][0] }}" - name: get osd versions - command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions" + command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions" register: ceph_versions delegate_to: "{{ groups[mon_group_name][0] }}" @@ -500,7 +500,7 @@ # length == 1 means there is a single osds versions entry # thus all the osds are running the same version - name: complete osds upgrade - command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} osd require-osd-release luminous" + command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} osd require-osd-release luminous" delegate_to: "{{ groups[mon_group_name][0] }}" when: - (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1 @@ -845,17 +845,17 @@ - import_role: name: ceph-defaults - - name: set_fact docker_exec_cmd_status + - name: set_fact container_exec_cmd_status set_fact: - docker_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: containerized_deployment - name: show ceph status - command: "{{ docker_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s" + command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s" run_once: True delegate_to: "{{ groups[mon_group_name][0] }}" - name: show all daemons version - command: "{{ docker_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} versions" + command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} versions" run_once: True delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/infrastructure-playbooks/shrink-mon.yml b/infrastructure-playbooks/shrink-mon.yml index 8f3d5a86c..712097c83 100644 --- a/infrastructure-playbooks/shrink-mon.yml +++ b/infrastructure-playbooks/shrink-mon.yml @@ -74,13 +74,13 @@ with_items: "{{ groups[mon_group_name] }}" when: item != mon_to_kill - - name: "set_fact docker_exec_cmd build {{ container_binary }} exec command (containerized)" + - name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)" set_fact: - docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}" + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}" when: containerized_deployment - name: exit playbook, if can not connect to the cluster - command: "{{ docker_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" + command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" register: ceph_health until: ceph_health.stdout.find("HEALTH") > -1 delegate_to: "{{ mon_host }}" @@ -106,7 +106,7 @@ delegate_to: "{{ mon_to_kill }}" - name: remove monitor from the quorum - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}" failed_when: false delegate_to: "{{ mon_host }}" @@ -116,7 +116,7 @@ # 'sleep 5' is not that bad and should be sufficient - name: verify the monitor is out of the cluster shell: | - {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["quorum_names"])' + {{ container_exec_cmd }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["quorum_names"])' delegate_to: "{{ mon_host }}" failed_when: false register: result @@ -138,9 +138,9 @@ when: mon_to_kill_hostname in result.stdout - name: show ceph health - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s" delegate_to: "{{ mon_host }}" - name: show ceph mon status - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} mon stat" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat" delegate_to: "{{ mon_host }}" diff --git a/infrastructure-playbooks/shrink-osd.yml b/infrastructure-playbooks/shrink-osd.yml index 6ec5ef0fa..6ef4281b0 100644 --- a/infrastructure-playbooks/shrink-osd.yml +++ b/infrastructure-playbooks/shrink-osd.yml @@ -62,13 +62,13 @@ name: ceph-facts post_tasks: - - name: set_fact docker_exec_cmd build docker exec command (containerized) + - name: set_fact container_exec_cmd build docker exec command (containerized) set_fact: - docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: containerized_deployment - name: exit playbook, if can not connect to the cluster - command: "{{ docker_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" + command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" register: ceph_health until: ceph_health.stdout.find("HEALTH") > -1 delegate_to: "{{ groups[mon_group_name][0] }}" @@ -76,7 +76,7 @@ delay: 2 - name: find the host(s) where the osd(s) is/are running on - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}" with_items: "{{ osd_to_kill.split(',') }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: find_osd_hosts @@ -87,7 +87,7 @@ with_items: "{{ find_osd_hosts.results }}" - name: mark osd(s) out of the cluster - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ osd_to_kill.replace(',', ' ') }}" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ osd_to_kill.replace(',', ' ') }}" run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" @@ -111,15 +111,15 @@ loop: "{{ osd_hosts }}" - name: purge osd(s) from the cluster - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it" run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ osd_to_kill.split(',') }}" - name: show ceph health - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s" delegate_to: "{{ groups[mon_group_name][0] }}" - name: show ceph osd tree - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd tree" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree" delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 786c81448..5e4692b72 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -136,7 +136,7 @@ post_tasks: - name: waiting for the monitor to join the quorum... - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s --format json" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s --format json" register: ceph_health_raw until: > hostvars[mon_host]['ansible_hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"] diff --git a/infrastructure-playbooks/untested-by-ci/replace-osd.yml b/infrastructure-playbooks/untested-by-ci/replace-osd.yml index bb9b67ce1..204202b00 100644 --- a/infrastructure-playbooks/untested-by-ci/replace-osd.yml +++ b/infrastructure-playbooks/untested-by-ci/replace-osd.yml @@ -58,13 +58,13 @@ name: ceph-defaults post_tasks: - - name: set_fact docker_exec_cmd build docker exec command (containerized) + - name: set_fact container_exec_cmd build docker exec command (containerized) set_fact: - docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: containerized_deployment - name: exit playbook, if can not connect to the cluster - command: "{{ docker_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" + command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" register: ceph_health until: ceph_health.stdout.find("HEALTH") > -1 delegate_to: "{{ groups[mon_group_name][0] }}" @@ -72,7 +72,7 @@ delay: 2 - name: find the host(s) where the osd(s) is/are running on - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}" with_items: "{{ osd_to_replace.split(',') }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: find_osd_hosts @@ -182,9 +182,9 @@ - "{{ osd_to_replace_disks.results }}" - name: show ceph health - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" delegate_to: "{{ groups[mon_group_name][0] }}" - name: show ceph osd tree - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree" delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/library/ceph_crush.py b/library/ceph_crush.py index 0923c3da9..4086cd757 100644 --- a/library/ceph_crush.py +++ b/library/ceph_crush.py @@ -54,7 +54,7 @@ EXAMPLES = ''' ceph_crush: cluster: "{{ cluster }}" location: "{{ hostvars[item]['osd_crush_location'] }}" - containerized: "{{ docker_exec_cmd }}" + containerized: "{{ container_exec_cmd }}" with_items: "{{ groups[osd_group_name] }}" when: crush_rule_config ''' diff --git a/roles/ceph-client/tasks/create_users_keys.yml b/roles/ceph-client/tasks/create_users_keys.yml index db6e2a3c9..bd1389584 100644 --- a/roles/ceph-client/tasks/create_users_keys.yml +++ b/roles/ceph-client/tasks/create_users_keys.yml @@ -70,7 +70,7 @@ block: - name: list existing pool(s) command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool get {{ item.name }} size with_items: "{{ pools }}" register: created_pools @@ -79,7 +79,7 @@ - name: create ceph pool(s) command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.0.name }} {{ item.0.pg_num | default(osd_pool_default_pg_num) }} {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }} @@ -100,7 +100,7 @@ - name: customize pool size command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }} with_items: "{{ pools | unique }}" delegate_to: "{{ delegated_node }}" @@ -111,7 +111,7 @@ - name: customize pool min_size command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }} with_items: "{{ pools | unique }}" delegate_to: "{{ delegated_node }}" @@ -121,7 +121,7 @@ - (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size - name: assign application to pool(s) - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}" with_items: "{{ pools | unique }}" changed_when: false delegate_to: "{{ delegated_node }}" diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index ec5c9d4fa..65c21d94e 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -549,7 +549,7 @@ ceph_tcmalloc_max_total_thread_cache: 0 ########## # DOCKER # ########## -docker_exec_cmd: +container_exec_cmd: docker: false ceph_docker_image: "ceph/daemon" ceph_docker_image_tag: latest diff --git a/roles/ceph-facts/tasks/facts.yml b/roles/ceph-facts/tasks/facts.yml index 2ede2a6f3..f21aad864 100644 --- a/roles/ceph-facts/tasks/facts.yml +++ b/roles/ceph-facts/tasks/facts.yml @@ -36,9 +36,9 @@ monitor_name: "{{ ansible_fqdn }}" when: mon_use_fqdn -- name: set_fact docker_exec_cmd +- name: set_fact container_exec_cmd set_fact: - docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}" + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}" when: - containerized_deployment - groups.get(mon_group_name, []) | length > 0 @@ -47,7 +47,7 @@ # because it blindly picks a mon, which may be down because # of the rolling update - name: is ceph running already? - command: "{{ timeout_command }} {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json" + command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --cluster {{ cluster }} -s -f json" changed_when: false failed_when: false check_mode: no @@ -76,7 +76,7 @@ when: cephx or generate_fsid - name: get current fsid - command: "{{ timeout_command }} {{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid" + command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid" register: rolling_update_fsid delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}" when: rolling_update diff --git a/roles/ceph-iscsi-gw/tasks/common.yml b/roles/ceph-iscsi-gw/tasks/common.yml index ce46d89dd..c5458eefc 100644 --- a/roles/ceph-iscsi-gw/tasks/common.yml +++ b/roles/ceph-iscsi-gw/tasks/common.yml @@ -18,14 +18,14 @@ src: "{{ role_path }}/templates/iscsi-gateway.cfg.j2" dest: /etc/ceph/iscsi-gateway.cfg -- name: set_fact docker_exec_cmd +- name: set_fact container_exec_cmd set_fact: - docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: containerized_deployment - name: check if a rbd pool exists - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls --format json" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls --format json" changed_when: false register: rbd_pool_exists delegate_to: "{{ groups[mon_group_name][0] }}" @@ -34,12 +34,12 @@ when: "'rbd' not in (rbd_pool_exists.stdout | from_json)" block: - name: create a rbd pool if it doesn't exist - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ osd_pool_default_pg_num }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ osd_pool_default_pg_num }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" - name: customize pool size - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default(osd_pool_default_size) }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default(osd_pool_default_size) }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false when: rbd_pool_size | default(osd_pool_default_size) != ceph_osd_pool_default_size diff --git a/roles/ceph-mds/tasks/containerized.yml b/roles/ceph-mds/tasks/containerized.yml index f8e220482..2b22bc3f3 100644 --- a/roles/ceph-mds/tasks/containerized.yml +++ b/roles/ceph-mds/tasks/containerized.yml @@ -1,7 +1,7 @@ --- -- name: set_fact docker_exec_cmd mds +- name: set_fact container_exec_cmd mds set_fact: - docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}" + container_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}" - name: set_fact admin_keyring set_fact: @@ -62,7 +62,7 @@ daemon_reload: yes - name: wait for mds socket to exist - command: "{{ docker_exec_cmd }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok'" + command: "{{ container_exec_cmd }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok'" changed_when: false register: multi_mds_socket retries: 5 diff --git a/roles/ceph-mds/tasks/create_mds_filesystems.yml b/roles/ceph-mds/tasks/create_mds_filesystems.yml index d00af283c..6689deb4f 100644 --- a/roles/ceph-mds/tasks/create_mds_filesystems.yml +++ b/roles/ceph-mds/tasks/create_mds_filesystems.yml @@ -9,7 +9,7 @@ cephfs_pool_names: "{{ cephfs_pools | map(attribute='name') | list }}" - name: get and store list of filesystem pools - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls" changed_when: false register: osd_pool_ls @@ -23,7 +23,7 @@ block: - name: create filesystem pools command: > - {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} + {{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pg_num | default(item.pgs) | default(osd_pool_default_pg_num) }} {{ item.pgp_num | default(item.pgs) | default(item.pg_num) | default(osd_pool_default_pg_num) }} @@ -38,19 +38,19 @@ - "{{ cephfs_pools }}" - name: customize pool size - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}" with_items: "{{ cephfs_pools | unique }}" changed_when: false when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size - name: customize pool min_size - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }}" with_items: "{{ cephfs_pools | unique }}" changed_when: false when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size - name: assign application to cephfs pools - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} cephfs" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item }} cephfs" with_items: - "{{ cephfs_data }}" - "{{ cephfs_metadata }}" @@ -60,18 +60,18 @@ delegate_to: "{{ groups[mon_group_name][0] }}" block: - name: check if ceph filesystem already exists - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}" register: check_existing_cephfs changed_when: false failed_when: false - name: create ceph filesystem - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}" changed_when: false when: check_existing_cephfs.rc != 0 - name: set max_mds - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" when: mds_max_mds > 1 diff --git a/roles/ceph-mds/tasks/main.yml b/roles/ceph-mds/tasks/main.yml index c7a6f4bfa..634736b73 100644 --- a/roles/ceph-mds/tasks/main.yml +++ b/roles/ceph-mds/tasks/main.yml @@ -5,9 +5,9 @@ - inventory_hostname == groups[mds_group_name] | first - not rolling_update -- name: set_fact docker_exec_cmd +- name: set_fact container_exec_cmd set_fact: - docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}" + container_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}" when: containerized_deployment - name: include common.yml diff --git a/roles/ceph-mgr/tasks/main.yml b/roles/ceph-mgr/tasks/main.yml index 29444d0b8..bce821b90 100644 --- a/roles/ceph-mgr/tasks/main.yml +++ b/roles/ceph-mgr/tasks/main.yml @@ -1,7 +1,7 @@ --- -- name: set_fact docker_exec_cmd +- name: set_fact container_exec_cmd set_fact: - docker_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + container_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: containerized_deployment - name: include common.yml diff --git a/roles/ceph-mgr/tasks/mgr_modules.yml b/roles/ceph-mgr/tasks/mgr_modules.yml index f044dcef4..2e6eac9c5 100644 --- a/roles/ceph-mgr/tasks/mgr_modules.yml +++ b/roles/ceph-mgr/tasks/mgr_modules.yml @@ -1,6 +1,6 @@ --- - name: wait for all mgr to be up - shell: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json | python -c 'import sys, json; print(json.load(sys.stdin)[\"available\"])'" + shell: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json | python -c 'import sys, json; print(json.load(sys.stdin)[\"available\"])'" register: mgr_dump retries: 30 delay: 5 @@ -10,7 +10,7 @@ - mgr_dump.stdout | bool - name: get enabled modules from ceph-mgr - command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls" + command: "{{ container_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls" check_mode: no changed_when: false register: _ceph_mgr_modules @@ -25,13 +25,13 @@ _disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}" - name: disable ceph mgr enabled modules - command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}" + command: "{{ container_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}" with_items: "{{ _ceph_mgr_modules.get('enabled_modules', []) }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: item not in ceph_mgr_modules - name: add modules to ceph-mgr - command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}" + command: "{{ container_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}" with_items: "{{ ceph_mgr_modules }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == []) diff --git a/roles/ceph-mon/tasks/ceph_keys.yml b/roles/ceph-mon/tasks/ceph_keys.yml index fd93c6024..29377b497 100644 --- a/roles/ceph-mon/tasks/ceph_keys.yml +++ b/roles/ceph-mon/tasks/ceph_keys.yml @@ -1,7 +1,7 @@ --- - name: waiting for the monitor(s) to form the quorum... command: > - {{ docker_exec_cmd }} + {{ container_exec_cmd }} ceph --cluster {{ cluster }} -n mon. diff --git a/roles/ceph-mon/tasks/crush_rules.yml b/roles/ceph-mon/tasks/crush_rules.yml index adeb769c5..87dd06695 100644 --- a/roles/ceph-mon/tasks/crush_rules.yml +++ b/roles/ceph-mon/tasks/crush_rules.yml @@ -3,7 +3,7 @@ ceph_crush: cluster: "{{ cluster }}" location: "{{ hostvars[item]['osd_crush_location'] }}" - containerized: "{{ docker_exec_cmd }}" + containerized: "{{ container_exec_cmd }}" with_items: "{{ groups[osd_group_name] }}" register: config_crush_hierarchy when: @@ -12,13 +12,13 @@ - hostvars[item]['osd_crush_location'] is defined - name: create configured crush rules - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd crush rule create-simple {{ item.name }} {{ item.root }} {{ item.type }}" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd crush rule create-simple {{ item.name }} {{ item.root }} {{ item.type }}" with_items: "{{ crush_rules | unique }}" changed_when: false when: inventory_hostname == groups.get(mon_group_name) | last - name: get id for new default crush rule - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd -f json crush rule dump {{ item.name }}" + command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd -f json crush rule dump {{ item.name }}" register: info_ceph_default_crush_rule changed_when: false with_items: "{{ crush_rules }}" @@ -38,7 +38,7 @@ - not item.get('skipped', false) - name: insert new default crush rule into daemon to prevent restart - command: "{{ hostvars[item]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}" + command: "{{ hostvars[item]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}" changed_when: false delegate_to: "{{ item }}" with_items: "{{ groups[mon_group_name] }}" diff --git a/roles/ceph-mon/tasks/deploy_monitors.yml b/roles/ceph-mon/tasks/deploy_monitors.yml index 2a104e33f..ea9b632e9 100644 --- a/roles/ceph-mon/tasks/deploy_monitors.yml +++ b/roles/ceph-mon/tasks/deploy_monitors.yml @@ -1,7 +1,7 @@ --- - name: check if monitor initial keyring already exists command: > - {{ docker_exec_cmd | default('') }} ceph --cluster ceph --name mon. -k + {{ container_exec_cmd | default('') }} ceph --cluster ceph --name mon. -k /var/lib/ceph/mon/{{ cluster }}-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}/keyring auth get-key mon. register: initial_mon_key diff --git a/roles/ceph-mon/tasks/main.yml b/roles/ceph-mon/tasks/main.yml index f8b6ef3f2..d8abf4333 100644 --- a/roles/ceph-mon/tasks/main.yml +++ b/roles/ceph-mon/tasks/main.yml @@ -1,7 +1,7 @@ --- -- name: set_fact docker_exec_cmd +- name: set_fact container_exec_cmd set_fact: - docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}" + container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}" when: containerized_deployment - name: include deploy_monitors.yml diff --git a/roles/ceph-mon/tasks/secure_cluster.yml b/roles/ceph-mon/tasks/secure_cluster.yml index f94005f54..63d3cca43 100644 --- a/roles/ceph-mon/tasks/secure_cluster.yml +++ b/roles/ceph-mon/tasks/secure_cluster.yml @@ -1,14 +1,14 @@ --- - name: collect all the pools command: > - {{ docker_exec_cmd }} rados --cluster {{ cluster }} lspools + {{ container_exec_cmd }} rados --cluster {{ cluster }} lspools changed_when: false register: ceph_pools check_mode: no - name: secure the cluster command: > - {{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true + {{ container_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true changed_when: false with_nested: - "{{ ceph_pools.stdout_lines|default([]) }}" diff --git a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml index c0b5a2e13..c6f9b350d 100644 --- a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml +++ b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml @@ -1,11 +1,11 @@ --- -- name: set_fact docker_exec_cmd_nfs +- name: set_fact container_exec_cmd_nfs set_fact: - docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: containerized_deployment - name: check if "{{ ceph_nfs_rgw_user }}" exists - command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}" + command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}" run_once: true register: rgwuser_exists changed_when: false @@ -14,7 +14,7 @@ when: nfs_obj_gw - name: create rgw nfs user "{{ ceph_nfs_rgw_user }}" - command: "{{ docker_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'" + command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'" run_once: true register: rgwuser changed_when: false diff --git a/roles/ceph-nfs/tasks/main.yml b/roles/ceph-nfs/tasks/main.yml index 6e0099482..7c3821ec8 100644 --- a/roles/ceph-nfs/tasks/main.yml +++ b/roles/ceph-nfs/tasks/main.yml @@ -1,7 +1,7 @@ --- -- name: set_fact docker_exec_cmd +- name: set_fact container_exec_cmd set_fact: - docker_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}" + container_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}" when: containerized_deployment - name: include common.yml diff --git a/roles/ceph-nfs/tasks/start_nfs.yml b/roles/ceph-nfs/tasks/start_nfs.yml index 9ea501a0e..419ba3daa 100644 --- a/roles/ceph-nfs/tasks/start_nfs.yml +++ b/roles/ceph-nfs/tasks/start_nfs.yml @@ -1,11 +1,11 @@ --- -- name: set_fact docker_exec_cmd_nfs +- name: set_fact container_exec_cmd_nfs set_fact: - docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" + container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}" when: containerized_deployment - name: check if rados index object exists - shell: "{{ docker_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}" + shell: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}" changed_when: false failed_when: false register: rados_index_exists @@ -15,7 +15,7 @@ run_once: true - name: create an empty rados index object - command: "{{ docker_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null" + command: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null" when: - ceph_nfs_rados_backend - rados_index_exists.rc != 0 diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml index a3f81c74d..0a28e54f3 100644 --- a/roles/ceph-osd/tasks/openstack_config.yml +++ b/roles/ceph-osd/tasks/openstack_config.yml @@ -1,9 +1,9 @@ --- - name: wait for all osd to be up shell: > - test "$({{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_osds"])')" -gt 0 && - test "$({{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_osds"])')" = - "$({{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_up_osds"])')" + test "$({{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_osds"])')" -gt 0 && + test "$({{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_osds"])')" = + "$({{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["osdmap"]["osdmap"]["num_up_osds"])')" register: wait_for_all_osds_up retries: "{{ nb_retry_wait_osd_up }}" delay: "{{ delay_wait_osd_up }}" @@ -15,7 +15,7 @@ block: - name: list existing pool(s) command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool get {{ item.name }} size with_items: "{{ openstack_pools | unique }}" register: created_pools @@ -24,7 +24,7 @@ - name: create openstack pool(s) command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.0.name }} {{ item.0.pg_num | default(osd_pool_default_pg_num) }} {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }} @@ -43,7 +43,7 @@ - name: customize pool size command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }} with_items: "{{ openstack_pools | unique }}" delegate_to: "{{ groups[mon_group_name][0] }}" @@ -52,7 +52,7 @@ - name: customize pool min_size command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} + {{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} min_size {{ item.min_size | default(osd_pool_default_min_size) }} with_items: "{{ openstack_pools | unique }}" delegate_to: "{{ groups[mon_group_name][0] }}" @@ -60,7 +60,7 @@ when: (item.min_size | default(osd_pool_default_min_size))|int > ceph_osd_pool_default_min_size - name: assign application to pool(s) - command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}" + command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}" with_items: "{{ openstack_pools | unique }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/roles/ceph-rbd-mirror/tasks/main.yml b/roles/ceph-rbd-mirror/tasks/main.yml index a696baa7f..e099b9ef2 100644 --- a/roles/ceph-rbd-mirror/tasks/main.yml +++ b/roles/ceph-rbd-mirror/tasks/main.yml @@ -20,9 +20,9 @@ - name: tasks for containerized deployment when: containerized_deployment block: - - name: set_fact docker_exec_cmd + - name: set_fact container_exec_cmd set_fact: - docker_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}" + container_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}" - name: include docker/main.yml include_tasks: docker/main.yml diff --git a/roles/ceph-rgw/handlers/main.yml b/roles/ceph-rgw/handlers/main.yml index ce906fa95..803b9ec3d 100644 --- a/roles/ceph-rgw/handlers/main.yml +++ b/roles/ceph-rgw/handlers/main.yml @@ -1,6 +1,6 @@ --- - name: update period - command: "{{ docker_exec_cmd }} radosgw-admin --cluster {{ cluster }} period update --commit" + command: "{{ container_exec_cmd }} radosgw-admin --cluster {{ cluster }} period update --commit" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index 643bf11c0..b3221f0b4 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -26,7 +26,7 @@ when: rgw_create_pools is defined block: - name: create rgw pools if rgw_create_pools is defined - command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}" + command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}" changed_when: false with_dict: "{{ rgw_create_pools }}" delegate_to: "{{ groups[mon_group_name][0] }}" @@ -35,7 +35,7 @@ run_once: true - name: customize pool size - command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.size | default(osd_pool_default_size) }}" + command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.size | default(osd_pool_default_size) }}" with_dict: "{{ rgw_create_pools }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false @@ -45,7 +45,7 @@ when: item.size | default(osd_pool_default_size) != ceph_osd_pool_default_size - name: set the rgw_create_pools pools application to rgw - command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw" + command: "{{ container_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw" changed_when: false with_dict: "{{ rgw_create_pools }}" delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/roles/ceph-rgw/tasks/multisite/checks.yml b/roles/ceph-rgw/tasks/multisite/checks.yml index b1e8f9e23..35b357c4c 100644 --- a/roles/ceph-rgw/tasks/multisite/checks.yml +++ b/roles/ceph-rgw/tasks/multisite/checks.yml @@ -1,6 +1,6 @@ --- - name: check if the realm already exists - command: "{{ docker_exec_cmd }} radosgw-admin realm get --rgw-realm={{ rgw_realm }}" + command: "{{ container_exec_cmd }} radosgw-admin realm get --rgw-realm={{ rgw_realm }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: realmcheck failed_when: False @@ -8,7 +8,7 @@ check_mode: no - name: check if the zonegroup already exists - command: "{{ docker_exec_cmd }} radosgw-admin zonegroup get --rgw-zonegroup={{ rgw_zonegroup }}" + command: "{{ container_exec_cmd }} radosgw-admin zonegroup get --rgw-zonegroup={{ rgw_zonegroup }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: zonegroupcheck failed_when: False @@ -16,7 +16,7 @@ check_mode: no - name: check if the zone already exists - command: "{{ docker_exec_cmd }} radosgw-admin zone get --rgw-zone={{ rgw_zone }}" + command: "{{ container_exec_cmd }} radosgw-admin zone get --rgw-zone={{ rgw_zone }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: zonecheck failed_when: False @@ -24,7 +24,7 @@ check_mode: no - name: check if the system user already exists - command: "{{ docker_exec_cmd }} radosgw-admin user info --uid={{ rgw_zone_user }}" + command: "{{ container_exec_cmd }} radosgw-admin user info --uid={{ rgw_zone_user }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: usercheck failed_when: False diff --git a/roles/ceph-rgw/tasks/multisite/master.yml b/roles/ceph-rgw/tasks/multisite/master.yml index 5c1836ece..6659df01b 100644 --- a/roles/ceph-rgw/tasks/multisite/master.yml +++ b/roles/ceph-rgw/tasks/multisite/master.yml @@ -1,31 +1,31 @@ --- - name: create the realm - command: "{{ docker_exec_cmd }} radosgw-admin realm create --rgw-realm={{ rgw_realm }} --default" + command: "{{ container_exec_cmd }} radosgw-admin realm create --rgw-realm={{ rgw_realm }} --default" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: "'No such file or directory' in realmcheck.stderr" - name: create the zonegroup - command: "{{ docker_exec_cmd }} radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --master --default" + command: "{{ container_exec_cmd }} radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --master --default" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: "'No such file or directory' in zonegroupcheck.stderr" - name: create the zone - command: "{{ docker_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master" + command: "{{ container_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: "'No such file or directory' in zonecheck.stderr" - name: create the zone user - command: "{{ docker_exec_cmd }} radosgw-admin user create --uid={{ rgw_zone_user }} --display-name=\"Zone User\" --access-key={{ system_access_key }} --secret={{ system_secret_key }} --system" + command: "{{ container_exec_cmd }} radosgw-admin user create --uid={{ rgw_zone_user }} --display-name=\"Zone User\" --access-key={{ system_access_key }} --secret={{ system_secret_key }} --system" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: "'could not fetch user info: no user info saved' in usercheck.stderr" notify: update period - name: add other endpoints to the zone - command: "{{ docker_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}" + command: "{{ container_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: rgw_multisite_endpoints_list is defined diff --git a/roles/ceph-rgw/tasks/multisite/secondary.yml b/roles/ceph-rgw/tasks/multisite/secondary.yml index 53ee83bd9..468e2c229 100644 --- a/roles/ceph-rgw/tasks/multisite/secondary.yml +++ b/roles/ceph-rgw/tasks/multisite/secondary.yml @@ -1,37 +1,37 @@ --- - name: fetch the realm - command: "{{ docker_exec_cmd }} radosgw-admin realm pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}" + command: "{{ container_exec_cmd }} radosgw-admin realm pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: "'No such file or directory' in realmcheck.stderr" - name: fetch the period - command: "{{ docker_exec_cmd }} radosgw-admin period pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}" + command: "{{ container_exec_cmd }} radosgw-admin period pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: "'No such file or directory' in realmcheck.stderr" - name: set default realm - command: "{{ docker_exec_cmd }} radosgw-admin realm default --rgw-realm={{ rgw_realm }}" + command: "{{ container_exec_cmd }} radosgw-admin realm default --rgw-realm={{ rgw_realm }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - name: set default zonegroup - command: "{{ docker_exec_cmd }} radosgw-admin zonegroup default --rgw-zonegroup={{ rgw_zonegroup }}" + command: "{{ container_exec_cmd }} radosgw-admin zonegroup default --rgw-zonegroup={{ rgw_zonegroup }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - name: create the zone - command: "{{ docker_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default" + command: "{{ container_exec_cmd }} radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints={{ rgw_multisite_proto }}://{{ rgw_multisite_endpoint_addr }}:{{ radosgw_frontend_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: "'No such file or directory' in zonecheck.stderr" notify: update period - name: add other endpoints to the zone - command: "{{ docker_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}" + command: "{{ container_exec_cmd }} radosgw-admin zone modify --rgw-zone={{ rgw_zone }} --endpoints {{ rgw_multisite_endpoints_list }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true when: rgw_multisite_endpoints_list is defined -- 2.39.5