--- /dev/null
+../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+{
+ "ceph_conf_overrides": {
+ "global": {
+ "osd_pool_default_pg_num": 12,
+ "osd_pool_default_size": 1,
+ "mon_warn_on_pool_no_redundancy": false
+ }
+ },
+ "cephfs_pools": [
+ {
+ "name": "cephfs_data",
+ "pg_num": 8,
+ "pgp_num": 8,
+ "rule_name": "replicated_rule",
+ "type": 1,
+ "erasure_profile": "",
+ "expected_num_objects": "",
+ "application": "cephfs",
+ "size": 3,
+ "min_size": 0
+ },
+ {
+ "name": "cephfs_metadata",
+ "pg_num": 8,
+ "pgp_num": 8,
+ "rule_name": "replicated_rule",
+ "type": 1,
+ "erasure_profile": "",
+ "expected_num_objects": "",
+ "application": "cephfs",
+ "size": 3,
+ "min_size": 0
+ }
+ ],
+ "ceph_mon_docker_memory_limit": "2g"
+}
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+ceph_docker_on_openstack: False
+public_network: "192.168.58.0/24"
+cluster_network: "192.168.59.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ mon_warn_on_pool_no_redundancy: false
+ osd_pool_default_size: 1
+openstack_config: False
+openstack_glance_pool:
+ name: "images"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+ size: 1
+openstack_cinder_pool:
+ name: "volumes"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+ size: 1
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
--- /dev/null
+---
+user_config: True
+copy_admin_key: True
+test:
+ name: "test"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+test2:
+ name: "test2"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
--- /dev/null
+---
+generate_crt: True
--- /dev/null
+---
+create_crush_tree: False
+crush_rule_config: False
+crush_rule_hdd:
+ name: HDD
+ root: default
+ type: host
+ class: hdd
+ default: true
+crush_rules:
+ - "{{ crush_rule_hdd }}"
--- /dev/null
+---
+ceph_osd_docker_run_script_path: /var/tmp
+osd_objectstore: "bluestore"
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: True
+rgw_create_pools:
+ foo:
+ pg_num: 16
+ bar:
+ pg_num: 16
--- /dev/null
+[mons]
+mon0
+
+[osds]
+osd0
+
+[grafana-server]
+mon0
+
+[mgrs]
+mon0
+
+#[all:vars]
+#ansible_python_interpreter=/usr/bin/python3
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.58
+cluster_subnet: 192.168.59
+
+# MEMORY
+# set 1024 for CentOS
+memory: 2048
+
+vagrant_box: centos/7
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
--- /dev/null
+[tox]
+envlist = centos-container-docker_to_podman
+
+skipsdist = True
+
+[testenv]
+whitelist_externals =
+ vagrant
+ bash
+ pip
+ sleep
+ rm
+passenv=*
+sitepackages=True
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
+ ANSIBLE_CONFIG = {toxinidir}/ansible.cfg
+ ANSIBLE_CALLBACK_WHITELIST = profile_tasks
+ ANSIBLE_KEEP_REMOTE_FILES = 1
+ ANSIBLE_CACHE_PLUGIN = memory
+ ANSIBLE_GATHERING = implicit
+ # only available for ansible >= 2.5
+ ANSIBLE_STDOUT_CALLBACK = yaml
+ # Set the vagrant box image to use
+ CEPH_ANSIBLE_VAGRANT_BOX = centos/7
+
+deps= -r{toxinidir}/tests/requirements.txt
+changedir= {toxinidir}/tests/functional/docker2podman
+
+commands=
+ bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}
+ bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}
+
+ # configure lvm
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/setup.yml
+
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/site-container.yml.sample --extra-vars "\
+ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
+ fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
+ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
+ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
+ ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-nautilus} \
+ "
+
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/docker-to-podman.yml
+
+ py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+
+ vagrant destroy -f