* ``UPDATE_CEPH_STABLE_RELEASE``: (default: ``kraken``) This would configure the ``ceph-ansible`` variable ``ceph_stable_relese`` during an ``update``
scenario. This is set automatically when using the ``jewel-*`` or ``kraken-*`` testing scenarios.
-* ``CEPH_DOCKER_REGISTRY``: (default: ``docker.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``.
+* ``CEPH_DOCKER_REGISTRY``: (default: ``quay.ceph.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``.
-* ``CEPH_DOCKER_IMAGE``: (default: ``ceph/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``.
+* ``CEPH_DOCKER_IMAGE``: (default: ``ceph-ci/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``.
* ``CEPH_DOCKER_IMAGE_TAG``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image_name``.
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-dashboard_enabled: False
\ No newline at end of file
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-dashboard_enabled: False
\ No newline at end of file
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-dashboard_enabled: False
\ No newline at end of file
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-dashboard_enabled: False
\ No newline at end of file
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-dashboard_enabled: False
\ No newline at end of file
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
osd_pool_default_size: 1
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
-dashboard_enabled: False
\ No newline at end of file
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
- data: data-lv2
data_vg: test_group
db: journal1
- db_vg: journals
\ No newline at end of file
+ db_vg: journals
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
handler_health_osd_check_delay: 10
mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B!
-grafana_admin_password: +xFRe+RES@7vg24n
\ No newline at end of file
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
+node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
\ No newline at end of file
handler_health_osd_check_delay: 10
mds_max_mds: 2
dashboard_admin_password: $sX!cD$rYU6qR^B!
-grafana_admin_password: +xFRe+RES@7vg24n
\ No newline at end of file
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
+node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
\ No newline at end of file
public_network: "192.168.30.0/24"
cluster_network: "192.168.31.0/24"
dashboard_admin_password: $sX!cD$rYU6qR^B!
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
dashboard_admin_user_ro: true
-grafana_admin_password: +xFRe+RES@7vg24n
\ No newline at end of file
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
+node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
\ No newline at end of file
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
dashboard_admin_user_ro: true
-grafana_admin_password: +xFRe+RES@7vg24n
\ No newline at end of file
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
+node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
\ No newline at end of file
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
-grafana_admin_password: +xFRe+RES@7vg24n
\ No newline at end of file
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
db: journal1
db_vg: journals
fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
-generate_fsid: false
\ No newline at end of file
+generate_fsid: false
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
osd_pool_default_size: 1
dashboard_enabled: False
handler_health_mon_check_delay: 10
-handler_health_osd_check_delay: 10
\ No newline at end of file
+handler_health_osd_check_delay: 10
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
osd_pool_default_size: 1
dashboard_enabled: False
handler_health_mon_check_delay: 10
-handler_health_osd_check_delay: 10
\ No newline at end of file
+handler_health_osd_check_delay: 10
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
osd_pool_default_size: 1
dashboard_enabled: False
handler_health_mon_check_delay: 10
-handler_health_osd_check_delay: 10
\ No newline at end of file
+handler_health_osd_check_delay: 10
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
application: rbd
openstack_pools:
- "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
\ No newline at end of file
+ - "{{ openstack_cinder_pool }}"
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
rgw_keystone_admin_user: swift, rgw_keystone_api_version: 3, rgw_keystone_implicit_tenants: 'true',
rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0}
cluster: mycluster
- ceph_docker_image: ceph/daemon
- ceph_docker_image_tag: latest-master
- ceph_docker_registry: docker.io
+ ceph_docker_image: ceph-ci/daemon
+ ceph_docker_image_tag: latest-octopus
+ ceph_docker_registry: quay.ceph.io
cephfs_data_pool:
name: 'manila_data'
pg_num: "{{ osd_pool_default_pg_num }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
-grafana_admin_password: +xFRe+RES@7vg24n
\ No newline at end of file
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-dashboard_enabled: False
\ No newline at end of file
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-dashboard_enabled: False
\ No newline at end of file
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
-copy_admin_key: True
\ No newline at end of file
+copy_admin_key: True
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False
-dashboard_enabled: False
\ No newline at end of file
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False
-dashboard_enabled: False
\ No newline at end of file
+dashboard_enabled: False
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
-copy_admin_key: True
\ No newline at end of file
+copy_admin_key: True
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
-copy_admin_key: True
\ No newline at end of file
+copy_admin_key: True
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
osd_pool_default_size: 1
openstack_config: False
dashboard_enabled: False
-copy_admin_key: True
\ No newline at end of file
+copy_admin_key: True
+ceph_docker_registry: quay.ceph.io
+ceph_docker_image: ceph-ci/daemon
+ceph_docker_image_tag: latest-octopus
\ No newline at end of file
fake_args = ['arg']
fake_user = "fake-user"
fake_key = "/tmp/my-key"
- fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-octopus"
expected_command_list = ['docker',
'run',
'--rm',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
- 'docker.io/ceph/daemon:latest-luminous',
+ 'quay.ceph.io/ceph-ci/daemon:latest-octopus',
'-n',
"fake-user",
'-k',
fake_dest = "/fake/ceph"
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
- fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-octopus"
expected_command_list = ['docker',
'run',
'--rm',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool',
- 'docker.io/ceph/daemon:latest-luminous',
+ 'quay.ceph.io/ceph-ci/daemon:latest-octopus',
'--create-keyring',
fake_file_destination,
'--name',
fake_import_key = True
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
- fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-octopus"
expected_command_list = [
['docker', # noqa E128
'run',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool',
- 'docker.io/ceph/daemon:latest-luminous',
+ 'quay.ceph.io/ceph-ci/daemon:latest-octopus',
'--create-keyring', fake_file_destination,
'--name', fake_name,
'--add-key', fake_secret,
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
- 'docker.io/ceph/daemon:latest-luminous',
+ 'quay.ceph.io/ceph-ci/daemon:latest-octopus',
'-n', 'client.admin',
'-k', '/etc/ceph/fake.client.admin.keyring',
'--cluster', fake_cluster,
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
# create_key passes (one for ceph-authtool and one for itself) itw own array so the expected result is an array within an array # noqa E501
- fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-octopus"
expected_command_list = [['docker', # noqa E128
'run',
'--rm',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-authtool',
- 'docker.io/ceph/daemon:latest-luminous',
+ 'quay.ceph.io/ceph-ci/daemon:latest-octopus',
'--create-keyring',
fake_file_destination,
'--name',
def test_delete_key_container(self):
fake_cluster = "fake"
fake_name = "client.fake"
- fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-octopus"
expected_command_list = [['docker', # noqa E128
'run',
'--rm',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
- 'docker.io/ceph/daemon:latest-luminous',
+ 'quay.ceph.io/ceph-ci/daemon:latest-octopus',
'-n', 'client.admin',
'-k', '/etc/ceph/fake.client.admin.keyring',
'--cluster', fake_cluster,
fake_user = "fake-user"
fake_key = "/tmp/my-key"
fake_output_format = "json"
- fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-octopus"
expected_command_list = [['docker', # noqa E128
'run',
'--rm',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
- 'docker.io/ceph/daemon:latest-luminous',
+ 'quay.ceph.io/ceph-ci/daemon:latest-octopus',
'-n', "fake-user",
'-k', "/tmp/my-key",
'--cluster', fake_cluster,
def test_get_key_container(self):
fake_cluster = "fake"
fake_name = "client.fake"
- fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-octopus"
fake_dest = "/fake/ceph"
fake_keyring_filename = fake_cluster + "." + fake_name + ".keyring"
fake_file_destination = os.path.join(fake_dest, fake_keyring_filename)
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
- 'docker.io/ceph/daemon:latest-luminous',
+ 'quay.ceph.io/ceph-ci/daemon:latest-octopus',
'-n', "client.admin",
'-k', "/etc/ceph/fake.client.admin.keyring", # noqa E501
'--cluster', fake_cluster,
fake_user = "mon."
fake_keyring_dirname = fake_cluster + "-" + fake_hostname
fake_key = os.path.join("/var/lib/ceph/mon/", fake_keyring_dirname, 'keyring') # noqa E501
- fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-octopus"
expected_command_list = [['docker', # noqa E128
'run',
'--rm',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
- 'docker.io/ceph/daemon:latest-luminous',
+ 'quay.ceph.io/ceph-ci/daemon:latest-octopus',
'-n', "mon.",
'-k', "/var/lib/ceph/mon/fake-mon01/keyring", # noqa E501
'--cluster', fake_cluster,
fake_cluster = "fake"
fake_user = "fake-user"
fake_key = "/tmp/my-key"
- fake_container_image = "docker.io/ceph/daemon:latest-luminous"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-octopus"
expected_command_list = [['docker', # noqa E128
'run',
'--rm',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph',
- 'docker.io/ceph/daemon:latest-luminous',
+ 'quay.ceph.io/ceph-ci/daemon:latest-octopus',
'-n', "fake-user",
'-k', "/tmp/my-key",
'--cluster', fake_cluster,
fake_user_key = '/etc/ceph/ceph.client.admin.keyring'
fake_pool_name = 'foo'
fake_cluster_name = 'ceph'
-fake_container_image_name = 'docker.io/ceph/daemon:latest-luminous'
+fake_container_image_name = 'quay.ceph.io/ceph-ci/daemon:latest-octopus'
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'podman'})
def test_container_exec(self):
fake_binary = "ceph-volume"
- fake_container_image = "docker.io/ceph/daemon:latest"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image]
result = ceph_volume.container_exec(fake_binary, fake_container_image)
assert result == expected_command_list
def test_zap_osd_container(self):
fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda'}
- fake_container_image = "docker.io/ceph/daemon:latest"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
'--cluster',
'ceph',
def test_list_osd_container(self):
fake_module = MagicMock()
fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
- fake_container_image = "docker.io/ceph/daemon:latest"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
'--cluster',
'ceph',
def test_list_storage_inventory_container(self):
fake_module = MagicMock()
- fake_container_image = "docker.io/ceph/daemon:latest"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
'--cluster',
'ceph',
'cluster': 'ceph', }
fake_action = "create"
- fake_container_image = "docker.io/ceph/daemon:latest"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
'--cluster',
'ceph',
'cluster': 'ceph', }
fake_action = "prepare"
- fake_container_image = "docker.io/ceph/daemon:latest"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
'--cluster',
'ceph',
'cluster': 'ceph',
'batch_devices': ["/dev/sda", "/dev/sdb"]}
- fake_container_image = "docker.io/ceph/daemon:latest"
+ fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest"
expected_command_list = container_cmd + [fake_container_image,
'--cluster',
'ceph',
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/cephadm.yml --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/ceph} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:v15.2} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ansible-playbook -vv -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/docker-to-podman.yml --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image=ceph/daemon \
- ceph_docker_image_tag=latest-octopus \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
external_cluster_mon_ips=192.168.31.10,192.168.31.11,192.168.31.12 \
generate_fsid=false \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image=ceph/daemon \
- ceph_docker_image_tag=latest-octopus \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
bash -c "CEPH_STABLE_RELEASE={env:CEPH_STABLE_RELEASE:octopus} py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=0 \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=1 \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=2 \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=3 \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=4 \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=5 \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=6 \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=7 \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
[shrink-osd-multiple]
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill=0,1,2,3,4,5,6,7 \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
[testenv]
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:UPDATE_CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:UPDATE_CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:UPDATE_CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
# can be redployed to.
[purge]
commands=
- ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
- "
+ ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\
ireallymeanit=yes \
remove_packages=yes \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
# re-setup lvm
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
remove_packages=yes \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
# test that the cluster can be redeployed in a healthy state
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/shrink-osd.yml --extra-vars "\
ireallymeanit=yes \
osd_to_kill={env:OSD_TO_KILL:0} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
[shrink-mgr]
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
ireallymeanit=yes \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/secondary/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
"
- ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest}"
- ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest}"
+ ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rgw_multisite.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest}"
+ ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/rgw_multisite.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.ceph.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph-ci/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest}"
bash -c "cd {changedir}/secondary && vagrant destroy --force"
# clean rule after the scenario is complete
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1 state=absent'
ireallymeanit=yes \
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
"
[testenv]
# configure lvm
!lvm_batch-!lvm_auto_discovery-!ooo_collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml
- rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
+ rhcs: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"
ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml
delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \
fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} \
ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} \
- ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} \
- ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} \
- ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-octopus} \
ceph_docker_registry_auth=True \
ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \
ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \
all_daemons: py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
# handlers/idempotency test
- all_daemons,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-octopus}" --extra-vars @ceph-override.json
+ all_daemons,all_in_one: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} fetch_directory={env:FETCH_DIRECTORY:{changedir}/fetch} ceph_stable_release={env:CEPH_STABLE_RELEASE:octopus} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-octopus}" --extra-vars @ceph-override.json
purge: {[purge]commands}
switch_to_containers: {[switch-to-containers]commands}