ceph_stable_release: jewel
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
- monitor_interface: ens6
+ monitor_interface: eth1
devices:
- '/dev/sda'
- '/dev/sdb'
The ``node`` fixture contains a few useful pieces of information about the node
where the test is being executed, this is captured once, before tests run:
-* ``address``: The IP for the ``ens6`` interface
+* ``address``: The IP for the ``eth1`` interface
* ``subnet``: The subnet that ``address`` belongs to
* ``vars``: all the Ansible vars set for the current run
* ``osd_ids``: a list of all the OSD IDs
container_binary = ""
osd_ids = []
osds = []
- public_interface = "ens6"
- cluster_interface = "ens7"
ansible_vars = host.ansible.get_variables()
ansible_facts = host.ansible("setup")
group_names = ansible_vars["group_names"]
fsid = ansible_vars.get("fsid")
+ ansible_distribution = ansible_facts["ansible_facts"]["ansible_distribution"]
+
subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
num_mons = len(ansible_vars["groups"]["mons"])
if osd_auto_discovery:
osds_per_device = ansible_vars.get("osds_per_device", 1)
num_osds = num_osds * osds_per_device
+ if ansible_distribution == "RedHat":
+ public_interface = "ens6"
+ cluster_interface = "ens7"
+ else:
+ public_interface = "eth1"
+ cluster_interface = "eth2"
+
# If number of devices doesn't map to number of OSDs, allow tests to define
# that custom number, defaulting it to ``num_devices``
num_osds = ansible_vars.get('num_osds', num_osds)
cluster: ceph
public_network: "192.168.55.0/24"
cluster_network: "192.168.56.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
cluster: ceph
public_network: "192.168.53.0/24"
cluster_network: "192.168.54.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
docker: True
containerized_deployment: True
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.17.0/24"
[mons]
mon0 monitor_address=192.168.17.10
-mon1 monitor_interface=ens6
+mon1 monitor_interface=eth1
mon2 monitor_address=192.168.17.12
[osds]
[mons]
mon0 monitor_address=192.168.17.10
-mon1 monitor_interface=ens6
+mon1 monitor_interface=eth1
mon2 monitor_address=192.168.17.12
[osds]
ceph_repository: community
public_network: "192.168.1.0/24"
cluster_network: "192.168.2.0/24"
-radosgw_interface: ens6
+radosgw_interface: eth1
ceph_conf_overrides:
global:
osd_pool_default_size: 1
[mons]
mon0 monitor_address=192.168.1.10
-mon1 monitor_interface=ens6
+mon1 monitor_interface=eth1
mon2 monitor_address=192.168.1.12
[osds]
[mons]
mon0 monitor_address=192.168.1.10
-mon1 monitor_interface=ens6
+mon1 monitor_interface=eth1
mon2 monitor_address=192.168.1.12
[osds]
[mons]
mon0 monitor_address=192.168.1.10
-mon1 monitor_interface=ens6
+mon1 monitor_interface=eth1
mon2 monitor_address=192.168.1.12
[osds]
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
containerized_deployment: True
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: ens6
+monitor_interface: eth1
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
ceph_repository: community
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: ens6
+monitor_interface: eth1
osd_objectstore: "bluestore"
osd_scenario: lvm
copy_admin_key: true
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
docker: True
containerized_deployment: True
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
radosgw_num_instances: 2
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
---
containerized_deployment: False
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.15.0/24"
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
crush_device_class: test
memory: 2048
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
osd_objectstore: "bluestore"
crush_device_class: test
osd_scenario: lvm
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
crush_device_class: test
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
osd_objectstore: "bluestore"
crush_device_class: test
osd_scenario: lvm
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
ceph_repository: community
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
journal_size: 100
osd_objectstore: "filestore"
copy_admin_key: true
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
ceph_repository: community
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
journal_size: 100
osd_objectstore: "filestore"
copy_admin_key: true
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
docker: True
containerized_deployment: True
-monitor_interface: "ens6"
-radosgw_interface: "ens6"
+monitor_interface: "{{ 'ens6' if ansible_distribution == 'RedHat' else 'eth1' }}"
+radosgw_interface: "{{ 'ens6' if ansible_distribution == 'RedHat' else 'eth1' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.30.0/24"
cluster: ceph
public_network: "192.168.105.0/24"
cluster_network: "192.168.106.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
cluster: ceph
public_network: "192.168.107.0/24"
cluster_network: "192.168.108.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
cluster: ceph
public_network: "192.168.101.0/24"
cluster_network: "192.168.102.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
cluster: ceph
public_network: "192.168.103.0/24"
cluster_network: "192.168.104.0/24"
-monitor_interface: ens6
-radosgw_interface: ens6
+monitor_interface: eth1
+radosgw_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
- name: set MTU on eth0
command: "ifconfig eth0 mtu 1400 up"
- - name: set MTU on ens6
- command: "ifconfig ens6 mtu 1400 up"
+ - name: set MTU on eth1
+ command: "ifconfig eth1 mtu 1400 up"
- name: install docker
package:
docker: True
containerized_deployment: True
-monitor_interface: ens6
+monitor_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.17.0/24"
[mons]
mon0 monitor_address=192.168.1.10
-mon1 monitor_interface=ens6
+mon1 monitor_interface=eth1
mon2 monitor_address=192.168.1.12
[osds]
[mons]
mon0 monitor_address=192.168.1.10
-mon1 monitor_interface=ens6
+mon1 monitor_interface=eth1
mon2 monitor_address=192.168.1.12
[osds]
[mons]
mon0 monitor_address=192.168.1.10
-mon1 monitor_interface=ens6
+mon1 monitor_interface=eth1
mon2 monitor_address=192.168.1.12
[osds]
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
docker: True
containerized_deployment: True
-monitor_interface: ens6
+monitor_interface: eth1
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.73.0/24"
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
@pytest.mark.no_docker
def test_rgw_http_endpoint(self, node, host):
- # rgw frontends ip_addr is configured on ens6
- ip_addr = host.interface("ens6").addresses[0]
+ # rgw frontends ip_addr is configured on eth1
+ ip_addr = host.interface("eth1").addresses[0]
for i in range(int(node["radosgw_num_instances"])):
assert host.socket(
"tcp://{ip_addr}:{port}".format(ip_addr=ip_addr,
memory: 1024
# Ethernet interface name
-# use ens6 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'ens6'
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
# Disks
# For Xenial use disks: [ '/dev/sdb', '/dev/sdc' ]