From: Guillaume Abrioux Date: Wed, 10 Oct 2018 19:24:22 +0000 (-0400) Subject: remove jewel support X-Git-Tag: v3.2.0beta6~17 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=40b7747af7b3d139b3017b53f78ab52fd1082a92;p=ceph-ansible.git remove jewel support As of now, we should no longer support Jewel in ceph-ansible. The latest ceph-ansible release supporting Jewel is `stable-3.1`. Signed-off-by: Guillaume Abrioux --- diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index f81fbd61d..0dfdd0a52 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -19,7 +19,6 @@ become: false vars: - mgr_group_name: mgrs - - jewel_minor_update: False vars_prompt: - name: ireallymeanit @@ -41,7 +40,6 @@ fail: msg: "Please add a mgr host to your inventory." when: - - not jewel_minor_update - groups.get(mgr_group_name, []) | length == 0 @@ -243,7 +241,6 @@ - not containerized_deployment - cephx - groups.get(mgr_group_name, []) | length > 0 - - ceph_release_num[ceph_release] >= ceph_release_num.luminous delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ groups.get(mgr_group_name, []) }}" @@ -262,7 +259,6 @@ - cephx - groups.get(mgr_group_name, []) | length > 0 - inventory_hostname == groups[mon_group_name]|last - - ceph_release_num[ceph_release] >= ceph_release_num.luminous delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ groups.get(mgr_group_name, []) }}" @@ -294,9 +290,7 @@ - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config - - { role: ceph-mgr, - when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or - (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" } + - ceph-mgr post_tasks: - name: start ceph mgr @@ -323,7 +317,6 @@ health_osd_check_retries: 40 health_osd_check_delay: 30 upgrade_ceph_packages: True - jewel_minor_update: False hosts: - "{{ osd_group_name|default('osds') }}" @@ -397,15 +390,11 @@ command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions" register: ceph_versions delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - not jewel_minor_update - name: set_fact ceph_versions_osd set_fact: ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}" delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - not jewel_minor_update # length == 1 means there is a single osds versions entry # thus all the osds are running the same version @@ -415,7 +404,6 @@ when: - (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1 - ceph_versions_osd | string is search("ceph version 10") - - not jewel_minor_update - name: get num_pgs - non container command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json" @@ -437,8 +425,6 @@ - name: unset osd flags - vars: - - jewel_minor_update: False hosts: - "{{ mon_group_name|default('mons') }}" @@ -466,15 +452,11 @@ command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions" register: ceph_versions delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - not jewel_minor_update - name: set_fact ceph_versions_osd set_fact: ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}" delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - not jewel_minor_update # length == 1 means there is a single osds versions entry # thus all the osds are running the same version @@ -484,8 +466,6 @@ when: - (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1 - ceph_versions_osd | string is search("ceph version 12") - - not jewel_minor_update - - name: upgrade ceph mdss cluster @@ -666,9 +646,7 @@ - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config - - { role: ceph-nfs, - when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or - (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" } + - ceph-nfs post_tasks: - name: start nfs gateway @@ -722,9 +700,7 @@ - { role: ceph-common, when: not containerized_deployment } - { role: ceph-docker-common, when: containerized_deployment } - ceph-config - - { role: ceph-iscsi-gw, - when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or - (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" } + - ceph-iscsi-gw post_tasks: - name: start rbd-target-gw diff --git a/roles/ceph-defaults/tasks/facts.yml b/roles/ceph-defaults/tasks/facts.yml index ab899d648..8c1ddf652 100644 --- a/roles/ceph-defaults/tasks/facts.yml +++ b/roles/ceph-defaults/tasks/facts.yml @@ -238,4 +238,4 @@ - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, []) - ceph_current_status['servicemap'] is defined - ceph_current_status['servicemap']['services'] is defined - - ceph_current_status['servicemap']['services']['rgw'] is defined # that's the way to cover ceph_release_num[ceph_release] >= ceph_release_num['luminous'] + - ceph_current_status['servicemap']['services']['rgw'] is defined diff --git a/roles/ceph-fetch-keys/tasks/main.yml b/roles/ceph-fetch-keys/tasks/main.yml index 4990deb38..61f2f3a58 100644 --- a/roles/ceph-fetch-keys/tasks/main.yml +++ b/roles/ceph-fetch-keys/tasks/main.yml @@ -14,11 +14,6 @@ with_items: - "{{ ceph_keys.stdout_lines }}" -- name: set_fact bootstrap_rbd_keyring - set_fact: - bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" - when: ceph_release_num[ceph_release] >= ceph_release_num.luminous - - name: copy keys to the ansible server fetch: src: "{{ item }}" @@ -30,4 +25,4 @@ - "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring" - "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring" - "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring" - - "{{ bootstrap_rbd_keyring | default([]) }}" + - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" diff --git a/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 b/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 index 73a87086b..44d019bfe 100644 --- a/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 +++ b/roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2 @@ -6,13 +6,10 @@ RBD_MIRROR_NAME="{{ ansible_hostname }}" {% if containerized_deployment %} DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}" {% endif %} -{% if ceph_release_num[ceph_release] < ceph_release_num['luminous'] %} -SOCKET=/var/run/ceph/{{ cluster }}-client.admin.asok -{% else %} + # Backward compatibility $DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok $DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok -{% endif %} # First, restart the daemon systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME} diff --git a/roles/ceph-infra/tasks/configure_firewall.yml b/roles/ceph-infra/tasks/configure_firewall.yml index c0e41d823..53329af34 100644 --- a/roles/ceph-infra/tasks/configure_firewall.yml +++ b/roles/ceph-infra/tasks/configure_firewall.yml @@ -48,7 +48,6 @@ state: enabled notify: restart firewalld when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - mgr_group_name is defined - mgr_group_name in group_names - firewalld_pkg_query.rc == 0 diff --git a/roles/ceph-mds/tasks/create_mds_filesystems.yml b/roles/ceph-mds/tasks/create_mds_filesystems.yml index 79bc81281..8418a5cc7 100644 --- a/roles/ceph-mds/tasks/create_mds_filesystems.yml +++ b/roles/ceph-mds/tasks/create_mds_filesystems.yml @@ -29,7 +29,6 @@ delegate_to: "{{ groups[mon_group_name][0] }}" when: - check_existing_cephfs.rc != 0 - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - name: allow multimds command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it" @@ -43,5 +42,4 @@ changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" when: - - ceph_release_num[ceph_release] >= ceph_release_num.jewel - mds_max_mds > 1 diff --git a/roles/ceph-mgr/tasks/main.yml b/roles/ceph-mgr/tasks/main.yml index 410fa1d9a..9b5f1a41a 100644 --- a/roles/ceph-mgr/tasks/main.yml +++ b/roles/ceph-mgr/tasks/main.yml @@ -20,20 +20,14 @@ command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls" register: _ceph_mgr_modules delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - name: set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict) set_fact: _ceph_mgr_modules: "{{ _ceph_mgr_modules.get('stdout', '{}') | from_json }}" - when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - name: set _disabled_ceph_mgr_modules fact set_fact: _disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}" - when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - name: disable ceph mgr enabled modules command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}" @@ -44,12 +38,10 @@ when: - item not in ceph_mgr_modules - not _ceph_mgr_modules.get('skipped') - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - name: add modules to ceph-mgr command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}" with_items: "{{ ceph_mgr_modules }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: - - (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == []) - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] + - (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == []) \ No newline at end of file diff --git a/roles/ceph-mon/tasks/ceph_keys.yml b/roles/ceph-mon/tasks/ceph_keys.yml index 23b12c6a8..1d998a0a6 100644 --- a/roles/ceph-mon/tasks/ceph_keys.yml +++ b/roles/ceph-mon/tasks/ceph_keys.yml @@ -5,15 +5,6 @@ check_mode: no when: - cephx - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - -- name: collect admin and bootstrap keys - command: ceph-create-keys --cluster {{ cluster }} -i {{ monitor_name }} - changed_when: false - check_mode: no - when: - - cephx - - ceph_release_num[ceph_release] < ceph_release_num.luminous # NOTE (leseb): wait for mon discovery and quorum resolution # the admin key is not instantaneously created so we have to wait a bit @@ -81,7 +72,6 @@ - cephx - groups.get(mgr_group_name, []) | length > 0 - inventory_hostname == groups[mon_group_name]|last - - ceph_release_num[ceph_release] > ceph_release_num.jewel with_items: "{{ groups.get(mgr_group_name, []) }}" # once this gets backported github.com/ceph/ceph/pull/20983 @@ -108,8 +98,6 @@ - name: set_fact bootstrap_rbd_keyring set_fact: bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - name: copy keys to the ansible server fetch: diff --git a/roles/ceph-mon/tasks/crush_rules.yml b/roles/ceph-mon/tasks/crush_rules.yml index 96b449dda..2da7e8254 100644 --- a/roles/ceph-mon/tasks/crush_rules.yml +++ b/roles/ceph-mon/tasks/crush_rules.yml @@ -38,9 +38,9 @@ - inventory_hostname == groups.get(mon_group_name) | last - not item.get('skipped', false) -- name: set_fact osd_pool_default_crush_rule to osd_pool_default_crush_replicated_ruleset if release < luminous else osd_pool_default_crush_rule +- name: set_fact osd_pool_default_crush_rule set_fact: - osd_pool_default_crush_rule: "{{ 'osd_pool_default_crush_replicated_ruleset' if ceph_release_num[ceph_release] < ceph_release_num.luminous else 'osd_pool_default_crush_rule' }}" + osd_pool_default_crush_rule: "osd_pool_default_crush_rule" - name: insert new default crush rule into daemon to prevent restart command: "{{ hostvars[item]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set {{ osd_pool_default_crush_rule }} {{ info_ceph_default_crush_rule_yaml.rule_id }}" diff --git a/roles/ceph-mon/tasks/deploy_monitors.yml b/roles/ceph-mon/tasks/deploy_monitors.yml index e3de5a6fe..d792db1b4 100644 --- a/roles/ceph-mon/tasks/deploy_monitors.yml +++ b/roles/ceph-mon/tasks/deploy_monitors.yml @@ -48,7 +48,7 @@ mode: "0755" recurse: true -- name: set_fact client_admin_ceph_authtool_cap >= ceph_release_num.luminous +- name: set_fact client_admin_ceph_authtool_cap set_fact: client_admin_ceph_authtool_cap: mon: allow * @@ -56,18 +56,6 @@ mds: allow mgr: allow * when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - - cephx - - admin_secret != 'admin_secret' - -- name: set_fact client_admin_ceph_authtool_cap < ceph_release_num.luminous - set_fact: - client_admin_ceph_authtool_cap: - mon: allow * - osd: allow * - mds: allow - when: - - ceph_release_num[ceph_release] < ceph_release_num.luminous - cephx - admin_secret != 'admin_secret' diff --git a/roles/ceph-mon/tasks/docker/copy_configs.yml b/roles/ceph-mon/tasks/docker/copy_configs.yml index b7407a2b3..bcf0d3294 100644 --- a/roles/ceph-mon/tasks/docker/copy_configs.yml +++ b/roles/ceph-mon/tasks/docker/copy_configs.yml @@ -7,17 +7,7 @@ - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring - -- name: register rbd bootstrap key - set_fact: - bootstrap_rbd_keyring: - - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" - when: ceph_release_num[ceph_release] >= ceph_release_num.luminous - -- name: merge rbd bootstrap key to config and keys paths - set_fact: - ceph_config_keys: "{{ ceph_config_keys + bootstrap_rbd_keyring }}" - when: ceph_release_num[ceph_release] >= ceph_release_num.luminous + - /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring - name: stat for ceph config and keys local_action: diff --git a/roles/ceph-mon/tasks/docker/main.yml b/roles/ceph-mon/tasks/docker/main.yml index 5703761c7..032d49bf9 100644 --- a/roles/ceph-mon/tasks/docker/main.yml +++ b/roles/ceph-mon/tasks/docker/main.yml @@ -119,4 +119,3 @@ when: - not rolling_update - inventory_hostname == groups[mon_group_name]|last - - ceph_release_num[ceph_release] >= ceph_release_num.luminous diff --git a/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml b/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml index 11f4ede5e..d29a034eb 100644 --- a/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml +++ b/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml @@ -5,7 +5,6 @@ when: - osd_objectstore == 'bluestore' - not dmcrypt - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - not containerized_deployment - name: set_fact ceph_disk_cli_options 'ceph_disk_cli_options' @@ -14,16 +13,6 @@ when: - osd_objectstore == 'filestore' - not dmcrypt - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - - not containerized_deployment - -- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }}' - set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }}" - when: - - osd_objectstore == 'filestore' - - not dmcrypt - - ceph_release_num[ceph_release] < ceph_release_num.luminous - not containerized_deployment - name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --bluestore --dmcrypt' @@ -32,7 +21,6 @@ when: - osd_objectstore == 'bluestore' - dmcrypt - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - not containerized_deployment - name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --filestore --dmcrypt' @@ -41,16 +29,6 @@ when: - osd_objectstore == 'filestore' - dmcrypt - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - - not containerized_deployment - -- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --dmcrypt' - set_fact: - ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt" - when: - - osd_objectstore == 'filestore' - - dmcrypt - - ceph_release_num[ceph_release] < ceph_release_num.luminous - not containerized_deployment - name: set_fact docker_env_args '-e KV_TYPE={{ kv_type }} -e KV_IP={{ kv_endpoint }} -e KV_PORT={{ kv_port }}' diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml index a74cdb39e..80fb571ad 100644 --- a/roles/ceph-osd/tasks/openstack_config.yml +++ b/roles/ceph-osd/tasks/openstack_config.yml @@ -18,27 +18,13 @@ delegate_to: "{{ groups[mon_group_name][0] }}" failed_when: false -- name: set_fact rule_name before luminous - set_fact: - rule_name: "replicated_ruleset" - when: - - ceph_release_num[ceph_release] < ceph_release_num['luminous'] - - not rolling_update - -- name: set_fact rule_name from luminous - set_fact: - rule_name: "replicated_rule" - when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - or (ceph_release_num[ceph_release] < ceph_release_num['luminous'] and rolling_update) - - name: create openstack pool(s) command: > {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.0.name }} {{ item.0.pg_num }} {{ item.0.pgp_num | default(item.0.pg_num) }} - {{ rule_name if item.0.rule_name | default(rule_name) == '' else item.0.rule_name | default(rule_name) }} + {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }} {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %} {{ item.0.erasure_profile }} @@ -58,7 +44,6 @@ changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - item.application is defined - name: create openstack cephx key(s) diff --git a/roles/ceph-rbd-mirror/tasks/common.yml b/roles/ceph-rbd-mirror/tasks/common.yml index fa1912302..3750e2d06 100644 --- a/roles/ceph-rbd-mirror/tasks/common.yml +++ b/roles/ceph-rbd-mirror/tasks/common.yml @@ -1,21 +1,4 @@ --- -- name: set_fact copy_admin_key - true when ceph_release_num[ceph_release] < ceph_release_num.luminous - set_fact: - copy_admin_key: True - when: - - ceph_release_num[ceph_release] < ceph_release_num.luminous - -- name: copy ceph admin keyring when ceph_release_num[ceph_release] < ceph_release_num.luminous - copy: - src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring" - dest: "/etc/ceph/" - owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}" - group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" - mode: "{{ ceph_keyring_permissions }}" - when: - - cephx - - copy_admin_key - - name: copy rbd-mirror bootstrap key copy: src: "{{ fetch_directory }}/{{ fsid }}/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" @@ -24,5 +7,4 @@ group: "{{ ceph_uid if containerized_deployment else 'ceph' }}" mode: "{{ ceph_keyring_permissions }}" when: - - cephx - - ceph_release_num[ceph_release] >= ceph_release_num.luminous + - cephx \ No newline at end of file diff --git a/roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml b/roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml index 2133cbe4a..fe3c777ca 100644 --- a/roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml +++ b/roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml @@ -1,15 +1,9 @@ --- -- name: set_fact bootstrap_rbd_keyring - set_fact: - bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring" - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - - name: set_fact ceph_config_keys set_fact: ceph_config_keys: - /etc/ceph/{{ cluster }}.client.admin.keyring - - "{{ bootstrap_rbd_keyring | default('') }}" + - /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring - name: stat for ceph config and keys local_action: diff --git a/roles/ceph-rbd-mirror/tasks/pre_requisite.yml b/roles/ceph-rbd-mirror/tasks/pre_requisite.yml index ddefb2a50..29f917b89 100644 --- a/roles/ceph-rbd-mirror/tasks/pre_requisite.yml +++ b/roles/ceph-rbd-mirror/tasks/pre_requisite.yml @@ -8,17 +8,6 @@ tags: - package-install -- name: copy ceph admin key - copy: - src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring" - dest: "/etc/ceph/{{ cluster }}.client.admin.keyring" - owner: "{{ ceph_uid }}" - group: "{{ ceph_uid }}" - mode: "0600" - when: - - cephx - - ceph_release_num[ceph_release] < ceph_release_num.luminous - - name: create rbd-mirror keyring command: ceph --cluster {{ cluster }} --name client.bootstrap-rbd --keyring /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring auth get-or-create client.rbd-mirror.{{ ansible_hostname }} mon 'profile rbd' osd 'profile rbd' -o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring args: @@ -26,7 +15,6 @@ changed_when: false when: - cephx - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - name: set rbd-mirror key permissions file: @@ -35,6 +23,4 @@ group: "ceph" mode: "{{ ceph_keyring_permissions }}" when: - - cephx - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - + - cephx \ No newline at end of file diff --git a/roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml b/roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml index 15def3bdb..0269bf57e 100644 --- a/roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml +++ b/roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml @@ -17,23 +17,12 @@ - ceph_rbd_mirror_systemd_overrides is defined - ansible_service_mgr == 'systemd' -- name: start and add that the rbd mirror service to the init sequence - service: - name: "ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}" - state: started - enabled: yes - changed_when: false - when: - - ceph_release_num[ceph_release] < ceph_release_num.luminous - - name: stop and remove the generic rbd-mirror service instance service: name: "ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}" state: stopped enabled: no changed_when: false - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous # This task is a workaround for rbd-mirror not starting after reboot # The upstream fix is: https://github.com/ceph/ceph/pull/17969 @@ -45,13 +34,11 @@ enabled: yes changed_when: false when: - - ceph_release_num[ceph_release] <= ceph_release_num.luminous + - ceph_release_num[ceph_release] == ceph_release_num.luminous - name: start and add the rbd-mirror service instance service: name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}" state: started enabled: yes - changed_when: false - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous + changed_when: false \ No newline at end of file diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index 9d86b1c56..88935e0f7 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -19,7 +19,6 @@ when: - rgw_zone != "" - rgw_multisite - - ceph_release_num[ceph_release] >= ceph_release_num.jewel - name: include_tasks docker/main.yml include_tasks: docker/main.yml diff --git a/site-docker.yml.sample b/site-docker.yml.sample index ebf6927cc..b12385a06 100644 --- a/site-docker.yml.sample +++ b/site-docker.yml.sample @@ -122,11 +122,7 @@ - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - role: ceph-mgr - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous post_tasks: - name: set ceph manager install 'Complete' run_once: true @@ -238,11 +234,7 @@ - role: ceph-docker-common - role: ceph-config tags: ['ceph_update_config'] - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - role: ceph-nfs - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous post_tasks: - name: set ceph nfs install 'Complete' run_once: true @@ -359,8 +351,8 @@ - { role: ceph-defaults, tags: ['ceph_update_config'] } - role: ceph-handler - ceph-docker-common - - { role: ceph-config, tags: ['ceph_update_config'], when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" } - - { role: ceph-iscsi-gw, when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" } + - { role: ceph-config, tags: ['ceph_update_config'] } + - ceph-iscsi-gw post_tasks: - name: set ceph iscsi gw install 'Complete' run_once: true diff --git a/site.yml.sample b/site.yml.sample index 769aac56f..5ecba702c 100644 --- a/site.yml.sample +++ b/site.yml.sample @@ -124,11 +124,7 @@ - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - role: ceph-mgr - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous post_tasks: - name: set ceph manager install 'Complete' run_once: true @@ -268,11 +264,7 @@ - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - role: ceph-nfs - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous post_tasks: - name: set ceph nfs install 'Complete' run_once: true @@ -390,11 +382,7 @@ - role: ceph-common - role: ceph-config tags: ['ceph_update_config'] - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous - role: ceph-iscsi-gw - when: - - ceph_release_num[ceph_release] >= ceph_release_num.luminous post_tasks: - name: set ceph iscsi gw install 'Complete' run_once: true diff --git a/tests/conftest.py b/tests/conftest.py index 324887f69..03d329321 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -57,22 +57,6 @@ def node(host, request): pytest.skip( "Not a valid test for non-containerized deployments or atomic hosts") # noqa E501 - if "mgrs" in group_names and ceph_stable_release == "jewel": - pytest.skip("mgr nodes can not be tested with ceph release jewel") - - if "nfss" in group_names and ceph_stable_release == "jewel": - pytest.skip("nfs nodes can not be tested with ceph release jewel") - - if group_names == ["iscsigws"] and ceph_stable_release == "jewel": - pytest.skip("iscsigws nodes can not be tested with ceph release jewel") # noqa E501 - - if request.node.get_closest_marker("from_luminous") and ceph_release_num[ceph_stable_release] < ceph_release_num['luminous']: # noqa E501 - pytest.skip( - "This test is only valid for releases starting from Luminous and above") # noqa E501 - - if request.node.get_closest_marker("before_luminous") and ceph_release_num[ceph_stable_release] >= ceph_release_num['luminous']: # noqa E501 - pytest.skip("This test is only valid for release before Luminous") - journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated" if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test: # noqa E501 pytest.skip("Scenario is not using journal collocation") diff --git a/tests/functional/tests/rbd-mirror/test_rbd_mirror.py b/tests/functional/tests/rbd-mirror/test_rbd_mirror.py index 578b11beb..7bdd15494 100644 --- a/tests/functional/tests/rbd-mirror/test_rbd_mirror.py +++ b/tests/functional/tests/rbd-mirror/test_rbd_mirror.py @@ -8,63 +8,28 @@ class TestRbdMirrors(object): def test_rbd_mirror_is_installed(self, node, host): assert host.package("rbd-mirror").is_installed - @pytest.mark.no_docker - @pytest.mark.before_luminous - def test_rbd_mirror_service_is_running_before_luminous(self, node, host): - service_name = "ceph-rbd-mirror@admin" - assert host.service(service_name).is_running - @pytest.mark.docker - @pytest.mark.before_luminous - def test_rbd_mirror_service_is_running_docker_before_luminous(self, node, host): + def test_rbd_mirror_service_is_running_docker(self, node, host): service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( hostname=node["vars"]["inventory_hostname"] ) assert host.service(service_name).is_running - @pytest.mark.docker - @pytest.mark.from_luminous - def test_rbd_mirror_service_is_running_docker_from_luminous(self, node, host): + def test_rbd_mirror_service_is_running(self, node, host): service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( hostname=node["vars"]["inventory_hostname"] ) assert host.service(service_name).is_running - @pytest.mark.from_luminous - def test_rbd_mirror_service_is_running_from_luminous(self, node, host): + def test_rbd_mirror_service_is_enabled(self, node, host): service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( hostname=node["vars"]["inventory_hostname"] ) - assert host.service(service_name).is_running - - @pytest.mark.no_docker - @pytest.mark.before_luminous - def test_rbd_mirror_service_is_enabled_before_luminous(self, node, host): - service_name = "ceph-rbd-mirror@admin" assert host.service(service_name).is_enabled - @pytest.mark.docker - @pytest.mark.before_luminous - def test_rbd_mirror_service_is_enabled_docker_before_luminous(self, node, host): - service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( - hostname=node["vars"]["inventory_hostname"] - ) - assert host.service(service_name).is_enabled - - @pytest.mark.from_luminous - def test_rbd_mirror_service_is_enabled_from_luminous(self, node, host): - service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format( - hostname=node["vars"]["inventory_hostname"] - ) - assert host.service(service_name).is_enabled - - @pytest.mark.from_luminous def test_rbd_mirror_is_up(self, node, host): - ceph_release_num=node['ceph_release_num'] - ceph_stable_release=node['ceph_stable_release'] hostname=node["vars"]["inventory_hostname"] cluster=node["cluster_name"] - rolling_update=node["rolling_update"] daemons = [] if node['docker']: docker_exec_cmd = 'docker exec ceph-rbd-mirror-{hostname}'.format(hostname=hostname) @@ -80,10 +45,6 @@ class TestRbdMirrors(object): output = host.check_output(cmd) status = json.loads(output) daemon_ids = [i for i in status["servicemap"]["services"]["rbd-mirror"]["daemons"].keys() if i != "summary"] - if ceph_release_num[ceph_stable_release] > ceph_release_num['luminous'] or (ceph_release_num[ceph_stable_release] == ceph_release_num['luminous'] and rolling_update=='True'): - for daemon_id in daemon_ids: - daemons.append(status["servicemap"]["services"]["rbd-mirror"]["daemons"][daemon_id]["metadata"]["hostname"]) - result = hostname in daemons - else: - result = hostname in daemon_ids - assert result \ No newline at end of file + for daemon_id in daemon_ids: + daemons.append(status["servicemap"]["services"]["rbd-mirror"]["daemons"][daemon_id]["metadata"]["hostname"]) + assert hostname in daemons \ No newline at end of file diff --git a/tests/functional/tests/rgw/test_rgw.py b/tests/functional/tests/rgw/test_rgw.py index 69bd001f2..c940d260b 100644 --- a/tests/functional/tests/rgw/test_rgw.py +++ b/tests/functional/tests/rgw/test_rgw.py @@ -22,7 +22,6 @@ class TestRGWs(object): ) assert host.service(service_name).is_enabled - @pytest.mark.from_luminous def test_rgw_is_up(self, node, host): hostname=node["vars"]["inventory_hostname"] cluster=node["cluster_name"] diff --git a/tox.ini b/tox.ini index 0d1f2ddb9..5d8b0ffdc 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,5 @@ [tox] -envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation} - {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,bluestore_lvm_osds_container,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch,lvm_osds_container,lvm_batch_container} +envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation,filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch} infra_lv_create skipsdist = True @@ -183,11 +182,6 @@ setenv= shrink_osd: COPY_ADMIN_KEY = True rhcs: CEPH_STABLE_RELEASE = luminous - jewel: CEPH_STABLE_RELEASE = jewel - jewel: CEPH_DOCKER_IMAGE_TAG = latest-jewel - jewel: UPDATE_CEPH_STABLE_RELEASE = luminous - jewel: UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-luminous - jewel: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-jewel luminous: CEPH_STABLE_RELEASE = luminous luminous: CEPH_DOCKER_IMAGE_TAG = latest-luminous luminous: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-luminous