---
- name: include pre_requisite.yml
include_tasks: pre_requisite.yml
+ when: groups.get(mon_group_name, []) | length > 0
- name: include create_users_keys.yml
include_tasks: create_users_keys.yml
cluster_interface = "ens7"
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
- num_mons = len(ansible_vars["groups"]["mons"])
+ num_mons = len(ansible_vars["groups"].get('mons', []))
if osd_auto_discovery:
num_osds = 3
else:
- block:
- name: set_fact group_vars_path
set_fact:
- group_vars_path: "{{ change_dir + '/hosts' if 'ooo-collocation' in change_dir.split('/') else change_dir + '/group_vars' }}"
+ group_vars_path: "{{ change_dir + '/hosts' if 'ooo-collocation' in change_dir.split('/') else change_dir + '/inventory/group_vars' if 'external_clients' in change_dir.split('/') else change_dir + '/group_vars' }}"
- block:
- name: change ceph_repository to 'dev'
+++ /dev/null
-[clients]
-client0
-client1
\ No newline at end of file
+++ /dev/null
----
-# this is only here to let the CI tests know
-# that this scenario is using docker
-docker: True
-
-containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_num_instances: 2
-ceph_mon_docker_subnet: "{{ public_network }}"
-ceph_docker_on_openstack: False
-public_network: "192.168.31.0/24"
-cluster_network: "192.168.32.0/24"
-rgw_override_bucket_index_max_shards: 16
-rgw_bucket_default_quota_max_objects: 1638400
-openstack_config: True
-dashboard_enabled: false
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- mon_allow_pool_size_one: true
- mon_warn_on_pool_no_redundancy: false
- osd_pool_default_size: 1
-handler_health_mon_check_delay: 10
-handler_health_osd_check_delay: 10
-rgw_create_pools:
- foo:
- pg_num: 16
- bar:
- pg_num: 16
-ceph_osd_docker_run_script_path: /var/tmp
-osd_objectstore: "bluestore"
-lvm_volumes:
- - data: data-lv1
- data_vg: test_group
- - data: data-lv2
- data_vg: test_group
- db: journal1
- db_vg: journals
-fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
-generate_fsid: false
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-mon1
-mon2
-
-[mgrs]
-mon0
--- /dev/null
+[clients]
+client0
+client1
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+radosgw_num_instances: 2
+ceph_mon_docker_subnet: "{{ public_network }}"
+ceph_docker_on_openstack: False
+public_network: "192.168.31.0/24"
+cluster_network: "192.168.32.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+openstack_config: True
+dashboard_enabled: false
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ bar:
+ pg_num: 16
+ceph_osd_docker_run_script_path: /var/tmp
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
+generate_fsid: false
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: True
+user_config: True
+test:
+ name: "test"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ rule_name: "replicated_rule"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+ size: "{{ osd_pool_default_size }}"
+test2:
+ name: "test2"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ rule_name: "replicated_rule"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+ size: "{{ osd_pool_default_size }}"
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[mgrs]
+mon0
+++ /dev/null
-[clients]
-client0
-client1
\ No newline at end of file
+++ /dev/null
----
-containerized_deployment: False
-ceph_origin: repository
-ceph_repository: community
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-ceph_mon_docker_subnet: "{{ public_network }}"
-ceph_docker_on_openstack: False
-openstack_config: True
-dashboard_enabled: False
-public_network: "192.168.31.0/24"
-cluster_network: "192.168.32.0/24"
-rgw_override_bucket_index_max_shards: 16
-rgw_bucket_default_quota_max_objects: 1638400
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- mon_allow_pool_size_one: true
- mon_warn_on_pool_no_redundancy: false
- osd_pool_default_size: 1
-handler_health_mon_check_delay: 10
-handler_health_osd_check_delay: 10
-ceph_osd_docker_run_script_path: /var/tmp
-osd_objectstore: "bluestore"
-lvm_volumes:
- - data: data-lv1
- data_vg: test_group
- - data: data-lv2
- data_vg: test_group
- db: journal1
- db_vg: journals
-rgw_create_pools:
- foo:
- pg_num: 16
- bar:
- pg_num: 16
-fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
-generate_fsid: false
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-mon1
-mon2
-
-[mgrs]
-mon0
+++ /dev/null
-hosts
\ No newline at end of file
--- /dev/null
+[clients]
+client0
+client1
\ No newline at end of file
--- /dev/null
+---
+containerized_deployment: False
+ceph_origin: repository
+ceph_repository: community
+monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+ceph_docker_on_openstack: False
+openstack_config: True
+dashboard_enabled: False
+public_network: "192.168.31.0/24"
+cluster_network: "192.168.32.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ mon_allow_pool_size_one: true
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+ceph_osd_docker_run_script_path: /var/tmp
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ bar:
+ pg_num: 16
+fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
+generate_fsid: false
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: True
+user_config: True
+test:
+ name: "test"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ rule_name: "replicated_rule"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+ size: "{{ osd_pool_default_size }}"
+test2:
+ name: "test2"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+ size: "{{ osd_pool_default_size }}"
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
\ No newline at end of file
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[mgrs]
+mon0
--- /dev/null
+hosts
\ No newline at end of file
--- /dev/null
+---
+- hosts: clients
+ gather_facts: false
+ become: yes
+ tasks:
+
+ - name: get keys from monitors
+ command: "{{ 'podman exec ceph-mon-mon0' if containerized_deployment | bool else '' }} ceph --cluster ceph auth get client.admin"
+ register: _key
+ delegate_to: "{{ groups.get('mons')[0] }}"
+ run_once: true
+
+ - name: create /etc/ceph
+ file:
+ path: /etc/ceph
+ state: directory
+ owner: 167
+ group: 167
+ mode: "0755"
+
+ - name: copy ceph key(s) if needed
+ copy:
+ dest: "/etc/ceph/ceph.client.admin.keyring"
+ content: "{{ _key.stdout + '\n' }}"
+ owner: 167
+ group: 167
+ mode: "0600"
# Set the vagrant box image to use
centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8
centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/8
- ubuntu: CEPH_ANSIBLE_VAGRANT_BOX = guits/ubuntu-bionic64
- # Set the ansible inventory host file to be used according to which distrib we are running on
- ubuntu: _INVENTORY = hosts-ubuntu
- INVENTORY = {env:_INVENTORY:hosts}
container: CONTAINER_DIR = /container
container: PLAYBOOK = site-container.yml.sample
non_container: PLAYBOOK = site.yml.sample
bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
+ ansible-playbook -vv -i {changedir}/inventory {toxinidir}/tests/functional/setup.yml
# configure lvm
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
+ ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/tests/functional/lvm_setup.yml
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\
+ ansible-playbook -vv -i {changedir}/inventory/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --limit 'all:!clients' --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
ceph_docker_image_tag=latest-octopus \
"
- ansible-playbook -vv -i {changedir}/external_clients-hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
+ ansible-playbook -vv -i {changedir}/inventory {toxinidir}/tests/functional/external_clients_admin_key.yml
+
+ ansible-playbook -vv -i {changedir}/inventory/external_clients-hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
fsid=40358a87-ab6e-4bdc-83db-1d909147861c \
ceph_docker_image_tag=latest-octopus \
"
- bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
+ bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/inventory/external_clients-hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests/test_install.py::TestCephConf"
vagrant destroy --force