As of now, we should no longer support Jewel in ceph-ansible.
The latest ceph-ansible release supporting Jewel is `stable-3.1`.
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
become: false
vars:
- mgr_group_name: mgrs
- - jewel_minor_update: False
vars_prompt:
- name: ireallymeanit
fail:
msg: "Please add a mgr host to your inventory."
when:
- - not jewel_minor_update
- groups.get(mgr_group_name, []) | length == 0
- not containerized_deployment
- cephx
- groups.get(mgr_group_name, []) | length > 0
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ groups.get(mgr_group_name, []) }}"
- cephx
- groups.get(mgr_group_name, []) | length > 0
- inventory_hostname == groups[mon_group_name]|last
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ groups.get(mgr_group_name, []) }}"
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- ceph-config
- - { role: ceph-mgr,
- when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or
- (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" }
+ - ceph-mgr
post_tasks:
- name: start ceph mgr
health_osd_check_retries: 40
health_osd_check_delay: 30
upgrade_ceph_packages: True
- jewel_minor_update: False
hosts:
- "{{ osd_group_name|default('osds') }}"
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
register: ceph_versions
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - not jewel_minor_update
- name: set_fact ceph_versions_osd
set_fact:
ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - not jewel_minor_update
# length == 1 means there is a single osds versions entry
# thus all the osds are running the same version
when:
- (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1
- ceph_versions_osd | string is search("ceph version 10")
- - not jewel_minor_update
- name: get num_pgs - non container
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} -s --format json"
- name: unset osd flags
- vars:
- - jewel_minor_update: False
hosts:
- "{{ mon_group_name|default('mons') }}"
command: "{{ docker_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
register: ceph_versions
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - not jewel_minor_update
- name: set_fact ceph_versions_osd
set_fact:
ceph_versions_osd: "{{ (ceph_versions.stdout|from_json).osd }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - not jewel_minor_update
# length == 1 means there is a single osds versions entry
# thus all the osds are running the same version
when:
- (ceph_versions.get('stdout', '{}')|from_json).get('osd', {}) | length == 1
- ceph_versions_osd | string is search("ceph version 12")
- - not jewel_minor_update
-
- name: upgrade ceph mdss cluster
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- ceph-config
- - { role: ceph-nfs,
- when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or
- (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" }
+ - ceph-nfs
post_tasks:
- name: start nfs gateway
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- ceph-config
- - { role: ceph-iscsi-gw,
- when: "(ceph_release_num[ceph_release] >= ceph_release_num.luminous) or
- (ceph_release_num[ceph_release] < ceph_release_num.luminous and rolling_update)" }
+ - ceph-iscsi-gw
post_tasks:
- name: start rbd-target-gw
- inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
- ceph_current_status['servicemap'] is defined
- ceph_current_status['servicemap']['services'] is defined
- - ceph_current_status['servicemap']['services']['rgw'] is defined # that's the way to cover ceph_release_num[ceph_release] >= ceph_release_num['luminous']
+ - ceph_current_status['servicemap']['services']['rgw'] is defined
with_items:
- "{{ ceph_keys.stdout_lines }}"
-- name: set_fact bootstrap_rbd_keyring
- set_fact:
- bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
- when: ceph_release_num[ceph_release] >= ceph_release_num.luminous
-
- name: copy keys to the ansible server
fetch:
src: "{{ item }}"
- "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring"
- "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
- "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring"
- - "{{ bootstrap_rbd_keyring | default([]) }}"
+ - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
{% if containerized_deployment %}
DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
{% endif %}
-{% if ceph_release_num[ceph_release] < ceph_release_num['luminous'] %}
-SOCKET=/var/run/ceph/{{ cluster }}-client.admin.asok
-{% else %}
+
# Backward compatibility
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok
-{% endif %}
# First, restart the daemon
systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
state: enabled
notify: restart firewalld
when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- mgr_group_name is defined
- mgr_group_name in group_names
- firewalld_pkg_query.rc == 0
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- check_existing_cephfs.rc != 0
- - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
- name: allow multimds
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- - ceph_release_num[ceph_release] >= ceph_release_num.jewel
- mds_max_mds > 1
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls"
register: _ceph_mgr_modules
delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
- name: set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict)
set_fact:
_ceph_mgr_modules: "{{ _ceph_mgr_modules.get('stdout', '{}') | from_json }}"
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
- name: set _disabled_ceph_mgr_modules fact
set_fact:
_disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}"
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
- name: disable ceph mgr enabled modules
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module disable {{ item }}"
when:
- item not in ceph_mgr_modules
- not _ceph_mgr_modules.get('skipped')
- - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
- name: add modules to ceph-mgr
command: "{{ docker_exec_cmd_mgr | default('') }} ceph --cluster {{ cluster }} mgr module enable {{ item }}"
with_items: "{{ ceph_mgr_modules }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- - (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])
- - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
+ - (item in _disabled_ceph_mgr_modules or _disabled_ceph_mgr_modules == [])
\ No newline at end of file
check_mode: no
when:
- cephx
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
-
-- name: collect admin and bootstrap keys
- command: ceph-create-keys --cluster {{ cluster }} -i {{ monitor_name }}
- changed_when: false
- check_mode: no
- when:
- - cephx
- - ceph_release_num[ceph_release] < ceph_release_num.luminous
# NOTE (leseb): wait for mon discovery and quorum resolution
# the admin key is not instantaneously created so we have to wait a bit
- cephx
- groups.get(mgr_group_name, []) | length > 0
- inventory_hostname == groups[mon_group_name]|last
- - ceph_release_num[ceph_release] > ceph_release_num.jewel
with_items: "{{ groups.get(mgr_group_name, []) }}"
# once this gets backported github.com/ceph/ceph/pull/20983
- name: set_fact bootstrap_rbd_keyring
set_fact:
bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- name: copy keys to the ansible server
fetch:
- inventory_hostname == groups.get(mon_group_name) | last
- not item.get('skipped', false)
-- name: set_fact osd_pool_default_crush_rule to osd_pool_default_crush_replicated_ruleset if release < luminous else osd_pool_default_crush_rule
+- name: set_fact osd_pool_default_crush_rule
set_fact:
- osd_pool_default_crush_rule: "{{ 'osd_pool_default_crush_replicated_ruleset' if ceph_release_num[ceph_release] < ceph_release_num.luminous else 'osd_pool_default_crush_rule' }}"
+ osd_pool_default_crush_rule: "osd_pool_default_crush_rule"
- name: insert new default crush rule into daemon to prevent restart
command: "{{ hostvars[item]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[item]['monitor_name'] }} config set {{ osd_pool_default_crush_rule }} {{ info_ceph_default_crush_rule_yaml.rule_id }}"
mode: "0755"
recurse: true
-- name: set_fact client_admin_ceph_authtool_cap >= ceph_release_num.luminous
+- name: set_fact client_admin_ceph_authtool_cap
set_fact:
client_admin_ceph_authtool_cap:
mon: allow *
mds: allow
mgr: allow *
when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- - cephx
- - admin_secret != 'admin_secret'
-
-- name: set_fact client_admin_ceph_authtool_cap < ceph_release_num.luminous
- set_fact:
- client_admin_ceph_authtool_cap:
- mon: allow *
- osd: allow *
- mds: allow
- when:
- - ceph_release_num[ceph_release] < ceph_release_num.luminous
- cephx
- admin_secret != 'admin_secret'
- /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
-
-- name: register rbd bootstrap key
- set_fact:
- bootstrap_rbd_keyring:
- - "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
- when: ceph_release_num[ceph_release] >= ceph_release_num.luminous
-
-- name: merge rbd bootstrap key to config and keys paths
- set_fact:
- ceph_config_keys: "{{ ceph_config_keys + bootstrap_rbd_keyring }}"
- when: ceph_release_num[ceph_release] >= ceph_release_num.luminous
+ - /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring
- name: stat for ceph config and keys
local_action:
when:
- not rolling_update
- inventory_hostname == groups[mon_group_name]|last
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
when:
- osd_objectstore == 'bluestore'
- not dmcrypt
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- not containerized_deployment
- name: set_fact ceph_disk_cli_options 'ceph_disk_cli_options'
when:
- osd_objectstore == 'filestore'
- not dmcrypt
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- - not containerized_deployment
-
-- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }}'
- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }}"
- when:
- - osd_objectstore == 'filestore'
- - not dmcrypt
- - ceph_release_num[ceph_release] < ceph_release_num.luminous
- not containerized_deployment
- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --bluestore --dmcrypt'
when:
- osd_objectstore == 'bluestore'
- dmcrypt
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- not containerized_deployment
- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --filestore --dmcrypt'
when:
- osd_objectstore == 'filestore'
- dmcrypt
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- - not containerized_deployment
-
-- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --dmcrypt'
- set_fact:
- ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
- when:
- - osd_objectstore == 'filestore'
- - dmcrypt
- - ceph_release_num[ceph_release] < ceph_release_num.luminous
- not containerized_deployment
- name: set_fact docker_env_args '-e KV_TYPE={{ kv_type }} -e KV_IP={{ kv_endpoint }} -e KV_PORT={{ kv_port }}'
delegate_to: "{{ groups[mon_group_name][0] }}"
failed_when: false
-- name: set_fact rule_name before luminous
- set_fact:
- rule_name: "replicated_ruleset"
- when:
- - ceph_release_num[ceph_release] < ceph_release_num['luminous']
- - not rolling_update
-
-- name: set_fact rule_name from luminous
- set_fact:
- rule_name: "replicated_rule"
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
- or (ceph_release_num[ceph_release] < ceph_release_num['luminous'] and rolling_update)
-
- name: create openstack pool(s)
command: >
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
{{ item.0.pg_num }}
{{ item.0.pgp_num | default(item.0.pg_num) }}
- {{ rule_name if item.0.rule_name | default(rule_name) == '' else item.0.rule_name | default(rule_name) }}
+ {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
{{ item.0.erasure_profile }}
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
- item.application is defined
- name: create openstack cephx key(s)
---
-- name: set_fact copy_admin_key - true when ceph_release_num[ceph_release] < ceph_release_num.luminous
- set_fact:
- copy_admin_key: True
- when:
- - ceph_release_num[ceph_release] < ceph_release_num.luminous
-
-- name: copy ceph admin keyring when ceph_release_num[ceph_release] < ceph_release_num.luminous
- copy:
- src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
- dest: "/etc/ceph/"
- owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
- group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
- mode: "{{ ceph_keyring_permissions }}"
- when:
- - cephx
- - copy_admin_key
-
- name: copy rbd-mirror bootstrap key
copy:
src: "{{ fetch_directory }}/{{ fsid }}/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
when:
- - cephx
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
+ - cephx
\ No newline at end of file
---
-- name: set_fact bootstrap_rbd_keyring
- set_fact:
- bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
-
- name: set_fact ceph_config_keys
set_fact:
ceph_config_keys:
- /etc/ceph/{{ cluster }}.client.admin.keyring
- - "{{ bootstrap_rbd_keyring | default('') }}"
+ - /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring
- name: stat for ceph config and keys
local_action:
tags:
- package-install
-- name: copy ceph admin key
- copy:
- src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.admin.keyring"
- dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
- owner: "{{ ceph_uid }}"
- group: "{{ ceph_uid }}"
- mode: "0600"
- when:
- - cephx
- - ceph_release_num[ceph_release] < ceph_release_num.luminous
-
- name: create rbd-mirror keyring
command: ceph --cluster {{ cluster }} --name client.bootstrap-rbd --keyring /var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring auth get-or-create client.rbd-mirror.{{ ansible_hostname }} mon 'profile rbd' osd 'profile rbd' -o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
args:
changed_when: false
when:
- cephx
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- name: set rbd-mirror key permissions
file:
group: "ceph"
mode: "{{ ceph_keyring_permissions }}"
when:
- - cephx
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
-
+ - cephx
\ No newline at end of file
- ceph_rbd_mirror_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
-- name: start and add that the rbd mirror service to the init sequence
- service:
- name: "ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}"
- state: started
- enabled: yes
- changed_when: false
- when:
- - ceph_release_num[ceph_release] < ceph_release_num.luminous
-
- name: stop and remove the generic rbd-mirror service instance
service:
name: "ceph-rbd-mirror@{{ ceph_rbd_mirror_local_user }}"
state: stopped
enabled: no
changed_when: false
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
# This task is a workaround for rbd-mirror not starting after reboot
# The upstream fix is: https://github.com/ceph/ceph/pull/17969
enabled: yes
changed_when: false
when:
- - ceph_release_num[ceph_release] <= ceph_release_num.luminous
+ - ceph_release_num[ceph_release] == ceph_release_num.luminous
- name: start and add the rbd-mirror service instance
service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
state: started
enabled: yes
- changed_when: false
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
+ changed_when: false
\ No newline at end of file
when:
- rgw_zone != ""
- rgw_multisite
- - ceph_release_num[ceph_release] >= ceph_release_num.jewel
- name: include_tasks docker/main.yml
include_tasks: docker/main.yml
- role: ceph-docker-common
- role: ceph-config
tags: ['ceph_update_config']
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- role: ceph-mgr
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
post_tasks:
- name: set ceph manager install 'Complete'
run_once: true
- role: ceph-docker-common
- role: ceph-config
tags: ['ceph_update_config']
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- role: ceph-nfs
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
post_tasks:
- name: set ceph nfs install 'Complete'
run_once: true
- { role: ceph-defaults, tags: ['ceph_update_config'] }
- role: ceph-handler
- ceph-docker-common
- - { role: ceph-config, tags: ['ceph_update_config'], when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" }
- - { role: ceph-iscsi-gw, when: "ceph_release_num[ceph_release] >= ceph_release_num.luminous" }
+ - { role: ceph-config, tags: ['ceph_update_config'] }
+ - ceph-iscsi-gw
post_tasks:
- name: set ceph iscsi gw install 'Complete'
run_once: true
- role: ceph-common
- role: ceph-config
tags: ['ceph_update_config']
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- role: ceph-mgr
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
post_tasks:
- name: set ceph manager install 'Complete'
run_once: true
- role: ceph-common
- role: ceph-config
tags: ['ceph_update_config']
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- role: ceph-nfs
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
post_tasks:
- name: set ceph nfs install 'Complete'
run_once: true
- role: ceph-common
- role: ceph-config
tags: ['ceph_update_config']
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
- role: ceph-iscsi-gw
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num.luminous
post_tasks:
- name: set ceph iscsi gw install 'Complete'
run_once: true
pytest.skip(
"Not a valid test for non-containerized deployments or atomic hosts") # noqa E501
- if "mgrs" in group_names and ceph_stable_release == "jewel":
- pytest.skip("mgr nodes can not be tested with ceph release jewel")
-
- if "nfss" in group_names and ceph_stable_release == "jewel":
- pytest.skip("nfs nodes can not be tested with ceph release jewel")
-
- if group_names == ["iscsigws"] and ceph_stable_release == "jewel":
- pytest.skip("iscsigws nodes can not be tested with ceph release jewel") # noqa E501
-
- if request.node.get_closest_marker("from_luminous") and ceph_release_num[ceph_stable_release] < ceph_release_num['luminous']: # noqa E501
- pytest.skip(
- "This test is only valid for releases starting from Luminous and above") # noqa E501
-
- if request.node.get_closest_marker("before_luminous") and ceph_release_num[ceph_stable_release] >= ceph_release_num['luminous']: # noqa E501
- pytest.skip("This test is only valid for release before Luminous")
-
journal_collocation_test = ansible_vars.get("osd_scenario") == "collocated"
if request.node.get_closest_marker("journal_collocation") and not journal_collocation_test: # noqa E501
pytest.skip("Scenario is not using journal collocation")
def test_rbd_mirror_is_installed(self, node, host):
assert host.package("rbd-mirror").is_installed
- @pytest.mark.no_docker
- @pytest.mark.before_luminous
- def test_rbd_mirror_service_is_running_before_luminous(self, node, host):
- service_name = "ceph-rbd-mirror@admin"
- assert host.service(service_name).is_running
-
@pytest.mark.docker
- @pytest.mark.before_luminous
- def test_rbd_mirror_service_is_running_docker_before_luminous(self, node, host):
+ def test_rbd_mirror_service_is_running_docker(self, node, host):
service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
assert host.service(service_name).is_running
- @pytest.mark.docker
- @pytest.mark.from_luminous
- def test_rbd_mirror_service_is_running_docker_from_luminous(self, node, host):
+ def test_rbd_mirror_service_is_running(self, node, host):
service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
assert host.service(service_name).is_running
- @pytest.mark.from_luminous
- def test_rbd_mirror_service_is_running_from_luminous(self, node, host):
+ def test_rbd_mirror_service_is_enabled(self, node, host):
service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
hostname=node["vars"]["inventory_hostname"]
)
- assert host.service(service_name).is_running
-
- @pytest.mark.no_docker
- @pytest.mark.before_luminous
- def test_rbd_mirror_service_is_enabled_before_luminous(self, node, host):
- service_name = "ceph-rbd-mirror@admin"
assert host.service(service_name).is_enabled
- @pytest.mark.docker
- @pytest.mark.before_luminous
- def test_rbd_mirror_service_is_enabled_docker_before_luminous(self, node, host):
- service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
- hostname=node["vars"]["inventory_hostname"]
- )
- assert host.service(service_name).is_enabled
-
- @pytest.mark.from_luminous
- def test_rbd_mirror_service_is_enabled_from_luminous(self, node, host):
- service_name = "ceph-rbd-mirror@rbd-mirror.{hostname}".format(
- hostname=node["vars"]["inventory_hostname"]
- )
- assert host.service(service_name).is_enabled
-
- @pytest.mark.from_luminous
def test_rbd_mirror_is_up(self, node, host):
- ceph_release_num=node['ceph_release_num']
- ceph_stable_release=node['ceph_stable_release']
hostname=node["vars"]["inventory_hostname"]
cluster=node["cluster_name"]
- rolling_update=node["rolling_update"]
daemons = []
if node['docker']:
docker_exec_cmd = 'docker exec ceph-rbd-mirror-{hostname}'.format(hostname=hostname)
output = host.check_output(cmd)
status = json.loads(output)
daemon_ids = [i for i in status["servicemap"]["services"]["rbd-mirror"]["daemons"].keys() if i != "summary"]
- if ceph_release_num[ceph_stable_release] > ceph_release_num['luminous'] or (ceph_release_num[ceph_stable_release] == ceph_release_num['luminous'] and rolling_update=='True'):
- for daemon_id in daemon_ids:
- daemons.append(status["servicemap"]["services"]["rbd-mirror"]["daemons"][daemon_id]["metadata"]["hostname"])
- result = hostname in daemons
- else:
- result = hostname in daemon_ids
- assert result
\ No newline at end of file
+ for daemon_id in daemon_ids:
+ daemons.append(status["servicemap"]["services"]["rbd-mirror"]["daemons"][daemon_id]["metadata"]["hostname"])
+ assert hostname in daemons
\ No newline at end of file
)
assert host.service(service_name).is_enabled
- @pytest.mark.from_luminous
def test_rgw_is_up(self, node, host):
hostname=node["vars"]["inventory_hostname"]
cluster=node["cluster_name"]
[tox]
-envlist = {dev,jewel,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation}
- {dev,luminous,mimic,rhcs}-{filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,bluestore_lvm_osds_container,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch,lvm_osds_container,lvm_batch_container}
+envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,docker_cluster,update_cluster,cluster,update_docker_cluster,switch_to_containers,purge_filestore_osds_container,purge_filestore_osds_non_container,purge_cluster_non_container,purge_cluster_container,ooo_collocation,filestore_osds_non_container,filestore_osds_container,bluestore_osds_container,bluestore_osds_non_container,bluestore_lvm_osds,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,purge_bluestore_osds_non_container,purge_bluestore_osds_container,lvm_batch}
infra_lv_create
skipsdist = True
shrink_osd: COPY_ADMIN_KEY = True
rhcs: CEPH_STABLE_RELEASE = luminous
- jewel: CEPH_STABLE_RELEASE = jewel
- jewel: CEPH_DOCKER_IMAGE_TAG = latest-jewel
- jewel: UPDATE_CEPH_STABLE_RELEASE = luminous
- jewel: UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-luminous
- jewel: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-jewel
luminous: CEPH_STABLE_RELEASE = luminous
luminous: CEPH_DOCKER_IMAGE_TAG = latest-luminous
luminous: CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-luminous