]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
remove jewel support
authorGuillaume Abrioux <gabrioux@redhat.com>
Wed, 10 Oct 2018 19:24:22 +0000 (15:24 -0400)
committermergify[bot] <mergify[bot]@users.noreply.github.com>
Fri, 12 Oct 2018 23:38:17 +0000 (23:38 +0000)
As of now, we should no longer support Jewel in ceph-ansible.
The latest ceph-ansible release supporting Jewel is `stable-3.1`.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
25 files changed:
infrastructure-playbooks/rolling_update.yml
roles/ceph-defaults/tasks/facts.yml
roles/ceph-fetch-keys/tasks/main.yml
roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2
roles/ceph-infra/tasks/configure_firewall.yml
roles/ceph-mds/tasks/create_mds_filesystems.yml
roles/ceph-mgr/tasks/main.yml
roles/ceph-mon/tasks/ceph_keys.yml
roles/ceph-mon/tasks/crush_rules.yml
roles/ceph-mon/tasks/deploy_monitors.yml
roles/ceph-mon/tasks/docker/copy_configs.yml
roles/ceph-mon/tasks/docker/main.yml
roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml
roles/ceph-osd/tasks/openstack_config.yml
roles/ceph-rbd-mirror/tasks/common.yml
roles/ceph-rbd-mirror/tasks/docker/copy_configs.yml
roles/ceph-rbd-mirror/tasks/pre_requisite.yml
roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml
roles/ceph-rgw/tasks/main.yml
site-docker.yml.sample
site.yml.sample
tests/conftest.py
tests/functional/tests/rbd-mirror/test_rbd_mirror.py
tests/functional/tests/rgw/test_rgw.py
tox.ini

index f81fbd61d64b8b4ab2a39e18aef9ee6e723e0bad..0dfdd0a52839791e6516a7a21c1a35b309d0760f 100644 (file)
@@ -19,7 +19,6 @@
   become: false
   vars:
     - mgr_group_name: mgrs
-    - jewel_minor_update: False
 
   vars_prompt:
     - name: ireallymeanit
@@ -41,7 +40,6 @@
       fail:
         msg: "Please add a mgr host to your inventory."
       when:
-        - not jewel_minor_update
         - groups.get(mgr_group_name, []) | length == 0
 
 
         - not containerized_deployment
         - cephx
         - groups.get(mgr_group_name, []) | length > 0
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
       delegate_to: "{{ groups[mon_group_name][0] }}"
       with_items: "{{ groups.get(mgr_group_name, []) }}"
 
         - cephx
         - groups.get(mgr_group_name, []) | length > 0
         - inventory_hostname == groups[mon_group_name]|last
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
       delegate_to: "{{ groups[mon_group_name][0] }}"
       with_items: "{{ groups.get(mgr_group_name, []) }}"
 
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
     - ceph-config
-    - { role: ceph-mgr,
-        when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or
-               (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" }
+    - ceph-mgr
 
   post_tasks:
     - name: start ceph mgr
     health_osd_check_retries: 40
     health_osd_check_delay: 30
     upgrade_ceph_packages: True
-    jewel_minor_update: False
 
   hosts:
     - "{{ osd_group_name|default('osds') }}"
       command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
       register: ceph_versions
       delegate_to: "{{ groups[mon_group_name][0] }}"
-      when:
-        - not jewel_minor_update
 
     - name: set_fact ceph_versions_osd
       set_fact:
         ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}"
       delegate_to: "{{ groups[mon_group_name][0] }}"
-      when:
-        - not jewel_minor_update
 
     # length == 1 means there is a single osds versions entry
     # thus all the osds are running the same version
       when:
         - (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1
         - ceph_versions_osd | string is search("ceph version 10")
-        - not jewel_minor_update
 
     - name: get num_pgs - non container
       command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json"
 
 
 - name: unset osd flags
-  vars:
-    - jewel_minor_update: False
 
   hosts:
     - "{{ mon_group_name|default('mons') }}"
       command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
       register: ceph_versions
       delegate_to: "{{ groups[mon_group_name][0] }}"
-      when:
-        - not jewel_minor_update
 
     - name: set_fact ceph_versions_osd
       set_fact:
         ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}"
       delegate_to: "{{ groups[mon_group_name][0] }}"
-      when:
-        - not jewel_minor_update
 
     # length == 1 means there is a single osds versions entry
     # thus all the osds are running the same version
       when:
         - (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1
         - ceph_versions_osd | string is search("ceph version 12")
-        - not jewel_minor_update
-
 
 - name: upgrade ceph mdss cluster
 
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
     - ceph-config
-    - { role: ceph-nfs,
-        when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or
-               (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" }
+    - ceph-nfs
 
   post_tasks:
     - name: start nfs gateway
     - { role: ceph-common, when: not containerized_deployment }
     - { role: ceph-docker-common, when: containerized_deployment }
     - ceph-config
-    - { role: ceph-iscsi-gw,
-        when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or
-               (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" }
+    - ceph-iscsi-gw
 
   post_tasks:
     - name: start rbd-target-gw
index ab899d648ad85d360ef379a60f5c71fe66892bb3..8c1ddf6525f869c45211f7ce908645bcac7ad0c6 100644 (file)
     - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
     - ceph_current_status['servicemap'] is defined
     - ceph_current_status['servicemap']['services'] is defined
-    - ceph_current_status['servicemap']['services']['rgw'] is defined # that's the way to cover ceph_release_num[ceph_release] >= ceph_release_num['luminous']
+    - ceph_current_status['servicemap']['services']['rgw'] is defined
index 4990deb389f9568f1b2a21a961068d062af589d4..61f2f3a58e5c9bccdf6038760f63e33f5d495889 100644 (file)
   with_items:
     - "{{ ceph_keys.stdout_lines }}"
 
-- name: set_fact bootstrap_rbd_keyring
-  set_fact:
-    bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
-  when: ceph_release_num[ceph_release] >= ceph_release_num.luminous
-
 - name: copy keys to the ansible server
   fetch:
     src: "{{ item }}"
@@ -30,4 +25,4 @@
     - "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring"
     - "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
     - "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring"
-    - "{{ bootstrap_rbd_keyring | default([]) }}"
+    - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
index 73a87086b396cac0375e3807846b8f1d887f70a7..44d019bfebfddd42ab3f3cdad72fa2d225848cfe 100644 (file)
@@ -6,13 +6,10 @@ RBD_MIRROR_NAME="{{ ansible_hostname }}"
 {% if containerized_deployment %}
 DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
 {% endif %}
-{% if ceph_release_num[ceph_release] < ceph_release_num['luminous'] %}
-SOCKET=/var/run/ceph/{{ cluster }}-client.admin.asok
-{% else %}
+
 # Backward compatibility
 $DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok
 $DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok
-{% endif %}
 
 # First, restart the daemon
 systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
index c0e41d8238425ab09cb0e2713be4af46a1998d94..53329af346abe214516b1e31e9c6407fc981a8ec 100644 (file)
@@ -48,7 +48,6 @@
     state: enabled
   notify: restart firewalld
   when:
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
     - mgr_group_name is defined
     - mgr_group_name in group_names
     - firewalld_pkg_query.rc == 0
index 79bc812818e082e0b35e2b70c1a73067f70cf2e3..8418a5cc70ce5393b6bb0a8b78743afab0f837e7 100644 (file)
@@ -29,7 +29,6 @@
   delegate_to: "{{ groups[mon_group_name][0] }}"
   when:
     - check_existing_cephfs.rc != 0
-    - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
 
 - name: allow multimds
   command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it"
@@ -43,5 +42,4 @@
   changed_when: false
   delegate_to: "{{ groups[mon_group_name][0] }}"
   when:
-    - ceph_release_num[ceph_release] >= ceph_release_num.jewel
     - mds_max_mds > 1
index 410fa1d9a9750e82e80fa92e2a1d6ce38c3a555c..9b5f1a41a143101b437c852462d2b619b3565e7f 100644 (file)
   command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls"
   register: _ceph_mgr_modules
   delegate_to: "{{ groups[mon_group_name][0] }}"
-  when:
-    - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
 
 - name: set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict)
   set_fact:
     _ceph_mgr_modules: "{{ _ceph_mgr_modules.get('stdout', '{}') | from_json }}"
-  when:
-    - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
 
 - name: set _disabled_ceph_mgr_modules fact
   set_fact:
     _disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}"
-  when:
-    - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
 
 - name: disable ceph mgr enabled modules
   command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}"
   when:
     - item not in ceph_mgr_modules
     - not _ceph_mgr_modules.get('skipped')
-    - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
 
 - name: add modules to ceph-mgr
   command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}"
   with_items: "{{ ceph_mgr_modules }}"
   delegate_to: "{{ groups[mon_group_name][0] }}"
   when:
-    - (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])
-    - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
+    - (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])
\ No newline at end of file
index 23b12c6a8db3906523774ae52e5952a70247beb0..1d998a0a604868cbc16e27786d4c1fc14a76e21b 100644 (file)
@@ -5,15 +5,6 @@
   check_mode: no
   when:
     - cephx
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
-
-- name: collect admin and bootstrap keys
-  command: ceph-create-keys --cluster {{ cluster }} -i {{ monitor_name }}
-  changed_when: false
-  check_mode: no
-  when:
-    - cephx
-    - ceph_release_num[ceph_release] < ceph_release_num.luminous
 
 # NOTE (leseb): wait for mon discovery and quorum resolution
 # the admin key is not instantaneously created so we have to wait a bit
@@ -81,7 +72,6 @@
     - cephx
     - groups.get(mgr_group_name, []) | length > 0
     - inventory_hostname == groups[mon_group_name]|last
-    - ceph_release_num[ceph_release] > ceph_release_num.jewel
   with_items: "{{ groups.get(mgr_group_name, []) }}"
 
 # once this gets backported github.com/ceph/ceph/pull/20983
 - name: set_fact bootstrap_rbd_keyring
   set_fact:
     bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
-  when:
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
 
 - name: copy keys to the ansible server
   fetch:
index 96b449dda7727df500602e7912c18159f548a5e6..2da7e8254a7c9b856d710d883268988437feecb3 100644 (file)
@@ -38,9 +38,9 @@
     - inventory_hostname == groups.get(mon_group_name) | last
     - not item.get('skipped', false)
 
-- name: set_fact osd_pool_default_crush_rule to osd_pool_default_crush_replicated_ruleset if release < luminous else osd_pool_default_crush_rule
+- name: set_fact osd_pool_default_crush_rule
   set_fact:
-    osd_pool_default_crush_rule: "{{ 'osd_pool_default_crush_replicated_ruleset' if ceph_release_num[ceph_release] < ceph_release_num.luminous else 'osd_pool_default_crush_rule' }}"
+    osd_pool_default_crush_rule: "osd_pool_default_crush_rule"
 
 - name: insert new default crush rule into daemon to prevent restart
   command: "{{ hostvars[item]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set {{ osd_pool_default_crush_rule }} {{ info_ceph_default_crush_rule_yaml.rule_id }}"
index e3de5a6fed57c3968826d0c5312250d591523dfa..d792db1b46d3f67469a081e539eab4ad35834d85 100644 (file)
@@ -48,7 +48,7 @@
     mode: "0755"
     recurse: true
 
-- name: set_fact client_admin_ceph_authtool_cap >= ceph_release_num.luminous
+- name: set_fact client_admin_ceph_authtool_cap
   set_fact:
     client_admin_ceph_authtool_cap:
       mon: allow *
       mds: allow
       mgr: allow *
   when:
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
-    - cephx
-    - admin_secret != 'admin_secret'
-
-- name: set_fact client_admin_ceph_authtool_cap < ceph_release_num.luminous
-  set_fact:
-    client_admin_ceph_authtool_cap:
-      mon: allow *
-      osd: allow *
-      mds: allow
-  when:
-    - ceph_release_num[ceph_release] < ceph_release_num.luminous
     - cephx
     - admin_secret != 'admin_secret'
 
index b7407a2b3aa7bee0cb7bb2742dfb4a9a0aae1606..bcf0d3294c3a33cf9d811e7c573d556fd38e066e 100644 (file)
@@ -7,17 +7,7 @@
       - /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
       - /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
       - /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
-
-- name: register rbd bootstrap key
-  set_fact:
-    bootstrap_rbd_keyring:
-      - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
-  when: ceph_release_num[ceph_release] >= ceph_release_num.luminous
-
-- name: merge rbd bootstrap key to config and keys paths
-  set_fact:
-    ceph_config_keys: "{{ ceph_config_keys + bootstrap_rbd_keyring }}"
-  when: ceph_release_num[ceph_release] >= ceph_release_num.luminous
+      - /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring
 
 - name: stat for ceph config and keys
   local_action:
index 5703761c7b0b7aa38a6d37040b618f9c46aba2d8..032d49bf97c76b7e843a381b4c078f9d1ea83108 100644 (file)
   when:
     - not rolling_update
     - inventory_hostname == groups[mon_group_name]|last
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
index 11f4ede5ea8be5d227c8f85ef8d4aed15d3fa419..d29a034eba66e1ebe25acf894bb4c0336e9e6aaa 100644 (file)
@@ -5,7 +5,6 @@
   when:
     - osd_objectstore == 'bluestore'
     - not dmcrypt
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
     - not containerized_deployment
 
 - name: set_fact ceph_disk_cli_options 'ceph_disk_cli_options'
   when:
     - osd_objectstore == 'filestore'
     - not dmcrypt
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
-    - not containerized_deployment
-
-- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }}'
-  set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }}"
-  when:
-    - osd_objectstore == 'filestore'
-    - not dmcrypt
-    - ceph_release_num[ceph_release] < ceph_release_num.luminous
     - not containerized_deployment
 
 - name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --bluestore --dmcrypt'
@@ -32,7 +21,6 @@
   when:
     - osd_objectstore == 'bluestore'
     - dmcrypt
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
     - not containerized_deployment
 
 - name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --filestore --dmcrypt'
   when:
     - osd_objectstore == 'filestore'
     - dmcrypt
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
-    - not containerized_deployment
-
-- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --dmcrypt'
-  set_fact:
-    ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
-  when:
-    - osd_objectstore == 'filestore'
-    - dmcrypt
-    - ceph_release_num[ceph_release] < ceph_release_num.luminous
     - not containerized_deployment
 
 - name: set_fact docker_env_args '-e KV_TYPE={{ kv_type }} -e KV_IP={{ kv_endpoint }} -e KV_PORT={{ kv_port }}'
index a74cdb39eea331aaba4b43ef0fdbd62bd1f2b1a5..80fb571adb9099b6c4de561dd9669f1cd94cd2cf 100644 (file)
   delegate_to: "{{ groups[mon_group_name][0] }}"
   failed_when: false
 
-- name: set_fact rule_name before luminous
-  set_fact:
-    rule_name: "replicated_ruleset"
-  when:
-    - ceph_release_num[ceph_release] < ceph_release_num['luminous']
-    - not rolling_update
-
-- name: set_fact rule_name from luminous
-  set_fact:
-    rule_name: "replicated_rule"
-  when:
-    - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
-      or (ceph_release_num[ceph_release] < ceph_release_num['luminous'] and rolling_update)
-
 - name: create openstack pool(s)
   command: >
     {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
     osd pool create {{ item.0.name }}
     {{ item.0.pg_num }}
     {{ item.0.pgp_num | default(item.0.pg_num) }}
-    {{ rule_name if item.0.rule_name | default(rule_name) == '' else item.0.rule_name | default(rule_name) }}
+    {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
     {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
     {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
     {{ item.0.erasure_profile }}
@@ -58,7 +44,6 @@
   changed_when: false
   delegate_to: "{{ groups[mon_group_name][0] }}"
   when:
-    - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
     - item.application is defined
 
 - name: create openstack cephx key(s)
index fa191230242722a0c044f977f977d77e864a5187..3750e2d06a7b31c1f3503a2f5f7fbce996d83406 100644 (file)
@@ -1,21 +1,4 @@
 ---
-- name: set_fact copy_admin_key -  true when ceph_release_num[ceph_release] < ceph_release_num.luminous
-  set_fact:
-    copy_admin_key: True
-  when:
-    - ceph_release_num[ceph_release] < ceph_release_num.luminous
-
-- name: copy ceph admin keyring when ceph_release_num[ceph_release] < ceph_release_num.luminous
-  copy:
-    src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
-    dest: "/etc/ceph/"
-    owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
-    group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
-    mode: "{{ ceph_keyring_permissions }}"
-  when:
-    - cephx
-    - copy_admin_key
-
 - name: copy rbd-mirror bootstrap key
   copy:
     src: "{{ fetch_directory }}/{{ fsid }}/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
@@ -24,5 +7,4 @@
     group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
     mode: "{{ ceph_keyring_permissions }}"
   when:
-    - cephx
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
+    - cephx
\ No newline at end of file
index 2133cbe4a181c01ac412814e62072c91c6ad4125..fe3c777ca8ca7a39cfc3ee8c9ac129003c83fb90 100644 (file)
@@ -1,15 +1,9 @@
 ---
-- name: set_fact bootstrap_rbd_keyring
-  set_fact:
-    bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
-  when:
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
-
 - name: set_fact ceph_config_keys
   set_fact:
     ceph_config_keys:
       - /etc/ceph/{{ cluster }}.client.admin.keyring
-      - "{{ bootstrap_rbd_keyring | default('') }}"
+      - /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring
 
 - name: stat for ceph config and keys
   local_action:
index ddefb2a5023e738cdbf6c772bad87ca2e5895b30..29f917b8981ce25494ec26ac41b70dac82da7608 100644 (file)
@@ -8,17 +8,6 @@
   tags:
     - package-install
 
-- name: copy ceph admin key
-  copy:
-    src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
-    dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
-    owner: "{{ ceph_uid }}"
-    group: "{{ ceph_uid }}"
-    mode: "0600"
-  when:
-    - cephx
-    - ceph_release_num[ceph_release] < ceph_release_num.luminous
-
 - name: create rbd-mirror keyring
   command: ceph --cluster {{ cluster }} --name client.bootstrap-rbd --keyring /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring auth get-or-create client.rbd-mirror.{{ ansible_hostname }} mon 'profile rbd' osd 'profile rbd' -o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
   args:
@@ -26,7 +15,6 @@
   changed_when: false
   when:
     - cephx
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
 
 - name: set rbd-mirror key permissions
   file:
@@ -35,6 +23,4 @@
     group: "ceph"
     mode: "{{ ceph_keyring_permissions }}"
   when:
-    - cephx
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
-
+    - cephx
\ No newline at end of file
index 15def3bdb8d14c9b9b925e5447cae48aea12b3d2..0269bf57e54fd5ef98f049a9454b56dccffb2bab 100644 (file)
     - ceph_rbd_mirror_systemd_overrides is defined
     - ansible_service_mgr == 'systemd'
 
-- name: start and add that the rbd mirror service to the init sequence
-  service:
-    name: "ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}"
-    state: started
-    enabled: yes
-  changed_when: false
-  when:
-    - ceph_release_num[ceph_release] < ceph_release_num.luminous
-
 - name: stop and remove the generic rbd-mirror service instance
   service:
     name: "ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}"
     state: stopped
     enabled: no
   changed_when: false
-  when:
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
 
 # This task is a workaround for rbd-mirror not starting after reboot
 # The upstream fix is: https://github.com/ceph/ceph/pull/17969
     enabled: yes
   changed_when: false
   when:
-    - ceph_release_num[ceph_release] <= ceph_release_num.luminous
+    - ceph_release_num[ceph_release] == ceph_release_num.luminous
 
 - name: start and add the rbd-mirror service instance
   service:
     name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
     state: started
     enabled: yes
-  changed_when: false
-  when:
-    - ceph_release_num[ceph_release] >= ceph_release_num.luminous
+  changed_when: false
\ No newline at end of file
index 9d86b1c566e8bce7710afa50f05628e3b75633a0..88935e0f7129e15a33e2851e6ddc03c08adc7072 100644 (file)
@@ -19,7 +19,6 @@
   when:
     - rgw_zone != ""
     - rgw_multisite
-    - ceph_release_num[ceph_release] >= ceph_release_num.jewel
 
 - name: include_tasks docker/main.yml
   include_tasks: docker/main.yml
index ebf6927cc1477fc295389596d4abdea85506b20d..b12385a062ec73c701d8e8c0f5f14d3c6c00897f 100644 (file)
     - role: ceph-docker-common
     - role: ceph-config
       tags: ['ceph_update_config']
-      when:
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
     - role: ceph-mgr
-      when:
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
   post_tasks:
     - name: set ceph manager install 'Complete'
       run_once: true
     - role: ceph-docker-common
     - role: ceph-config
       tags: ['ceph_update_config']
-      when:
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
     - role: ceph-nfs
-      when:
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
   post_tasks:
     - name: set ceph nfs install 'Complete'
       run_once: true
     - { role: ceph-defaults, tags: ['ceph_update_config'] }
     - role: ceph-handler
     - ceph-docker-common
-    - { role: ceph-config, tags: ['ceph_update_config'], when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" }
-    - { role: ceph-iscsi-gw, when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" }
+    - { role: ceph-config, tags: ['ceph_update_config'] }
+    - ceph-iscsi-gw
   post_tasks:
     - name: set ceph iscsi gw install 'Complete'
       run_once: true
index 769aac56f69a0889afa4bb8b8c08dab3cf60e7ef..5ecba702c1ca3caec4d3f4fd1118ad16798f5472 100644 (file)
     - role: ceph-common
     - role: ceph-config
       tags: ['ceph_update_config']
-      when:
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
     - role: ceph-mgr
-      when:
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
   post_tasks:
     - name: set ceph manager install 'Complete'
       run_once: true
     - role: ceph-common
     - role: ceph-config
       tags: ['ceph_update_config']
-      when:
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
     - role: ceph-nfs
-      when:
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
   post_tasks:
     - name: set ceph nfs install 'Complete'
       run_once: true
     - role: ceph-common
     - role: ceph-config
       tags: ['ceph_update_config']
-      when:
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
     - role: ceph-iscsi-gw
-      when:
-        - ceph_release_num[ceph_release] >= ceph_release_num.luminous
   post_tasks:
     - name: set ceph iscsi gw install 'Complete'
       run_once: true
index 324887f69f749797f30eb25a68b9c84cdad1ee0e..03d3293212e9f6305c0d7f4914709e09d80d18b7 100644 (file)
@@ -57,22 +57,6 @@ def node(host, request):
         pytest.skip(
             "Not a valid test for non-containerized deployments or atomic hosts")  # noqa E501
 
-    if "mgrs" in group_names and ceph_stable_release == "jewel":
-        pytest.skip("mgr nodes can not be tested with ceph release jewel")
-
-    if "nfss" in group_names and ceph_stable_release == "jewel":
-        pytest.skip("nfs nodes can not be tested with ceph release jewel")
-
-    if group_names == ["iscsigws"] and ceph_stable_release == "jewel":
-        pytest.skip("iscsigws nodes can not be tested with ceph release jewel")  # noqa E501
-
-    if request.node.get_closest_marker("from_luminous") and ceph_release_num[ceph_stable_release] < ceph_release_num['luminous']:  # noqa E501
-        pytest.skip(
-            "This test is only valid for releases starting from Luminous and above")  # noqa E501
-
-    if request.node.get_closest_marker("before_luminous") and ceph_release_num[ceph_stable_release] >= ceph_release_num['luminous']:  # noqa E501
-        pytest.skip("This test is only valid for release before Luminous")
-
     journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated"
     if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test:  # noqa E501
         pytest.skip("Scenario is not using journal collocation")
index 578b11beb81adb8223bc54e0718108f3ef16480f..7bdd1549443e17ccdbe9b20e979bfa7dd34c3280 100644 (file)
@@ -8,63 +8,28 @@ class TestRbdMirrors(object):
     def test_rbd_mirror_is_installed(self, node, host):
         assert host.package("rbd-mirror").is_installed
 
-    @pytest.mark.no_docker
-    @pytest.mark.before_luminous
-    def test_rbd_mirror_service_is_running_before_luminous(self, node, host):
-        service_name = "ceph-rbd-mirror@admin"
-        assert host.service(service_name).is_running
-
     @pytest.mark.docker
-    @pytest.mark.before_luminous
-    def test_rbd_mirror_service_is_running_docker_before_luminous(self, node, host):
+    def test_rbd_mirror_service_is_running_docker(self, node, host):
         service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
             hostname=node["vars"]["inventory_hostname"]
         )
         assert host.service(service_name).is_running
 
-    @pytest.mark.docker
-    @pytest.mark.from_luminous
-    def test_rbd_mirror_service_is_running_docker_from_luminous(self, node, host):
+    def test_rbd_mirror_service_is_running(self, node, host):
         service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
             hostname=node["vars"]["inventory_hostname"]
         )
         assert host.service(service_name).is_running
 
-    @pytest.mark.from_luminous
-    def test_rbd_mirror_service_is_running_from_luminous(self, node, host):
+    def test_rbd_mirror_service_is_enabled(self, node, host):
         service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
             hostname=node["vars"]["inventory_hostname"]
         )
-        assert host.service(service_name).is_running
-
-    @pytest.mark.no_docker
-    @pytest.mark.before_luminous
-    def test_rbd_mirror_service_is_enabled_before_luminous(self, node, host):
-        service_name = "ceph-rbd-mirror@admin"
         assert host.service(service_name).is_enabled
 
-    @pytest.mark.docker
-    @pytest.mark.before_luminous
-    def test_rbd_mirror_service_is_enabled_docker_before_luminous(self, node, host):
-        service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
-            hostname=node["vars"]["inventory_hostname"]
-        )
-        assert host.service(service_name).is_enabled
-
-    @pytest.mark.from_luminous
-    def test_rbd_mirror_service_is_enabled_from_luminous(self, node, host):
-        service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
-            hostname=node["vars"]["inventory_hostname"]
-        )
-        assert host.service(service_name).is_enabled
-
-    @pytest.mark.from_luminous
     def test_rbd_mirror_is_up(self, node, host):
-        ceph_release_num=node['ceph_release_num']
-        ceph_stable_release=node['ceph_stable_release']
         hostname=node["vars"]["inventory_hostname"]
         cluster=node["cluster_name"]
-        rolling_update=node["rolling_update"]
         daemons = []
         if node['docker']:
             docker_exec_cmd = 'docker exec ceph-rbd-mirror-{hostname}'.format(hostname=hostname)
@@ -80,10 +45,6 @@ class TestRbdMirrors(object):
         output = host.check_output(cmd)
         status = json.loads(output)
         daemon_ids = [i for i in status["servicemap"]["services"]["rbd-mirror"]["daemons"].keys() if i != "summary"]
-        if ceph_release_num[ceph_stable_release] > ceph_release_num['luminous'] or (ceph_release_num[ceph_stable_release] == ceph_release_num['luminous'] and rolling_update=='True'):
-            for daemon_id in daemon_ids:
-                daemons.append(status["servicemap"]["services"]["rbd-mirror"]["daemons"][daemon_id]["metadata"]["hostname"])
-            result = hostname in daemons
-        else:
-            result = hostname in daemon_ids
-        assert result
\ No newline at end of file
+        for daemon_id in daemon_ids:
+            daemons.append(status["servicemap"]["services"]["rbd-mirror"]["daemons"][daemon_id]["metadata"]["hostname"])
+        assert hostname in daemons
\ No newline at end of file
index 69bd001f22fb61f8c849dee5d9c53c1b50c58d33..c940d260b30f6c92373f97ac86b17344acc4f77e 100644 (file)
@@ -22,7 +22,6 @@ class TestRGWs(object):
         )
         assert host.service(service_name).is_enabled
 
-    @pytest.mark.from_luminous
     def test_rgw_is_up(self, node, host):
         hostname=node["vars"]["inventory_hostname"]
         cluster=node["cluster_name"]
diff --git a/tox.ini b/tox.ini
index 0d1f2ddb90cc31f070f5dcf83bb35377bfd1b36b..5d8b0ffdc8d2a167f21993856254969f81088daf 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,5 @@
 [tox]
-envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation}
-  {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,bluestore_lvm_osds_container,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch,lvm_osds_container,lvm_batch_container}
+envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation,filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch}
   infra_lv_create
 
 skipsdist = True
@@ -183,11 +182,6 @@ setenv=
   shrink_osd: COPY_ADMIN_KEY = True
 
   rhcs: CEPH_STABLE_RELEASE = luminous
-  jewel: CEPH_STABLE_RELEASE = jewel
-  jewel: CEPH_DOCKER_IMAGE_TAG = latest-jewel
-  jewel: UPDATE_CEPH_STABLE_RELEASE = luminous
-  jewel: UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-luminous
-  jewel: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-jewel
   luminous: CEPH_STABLE_RELEASE = luminous
   luminous: CEPH_DOCKER_IMAGE_TAG = latest-luminous
   luminous: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-luminous