]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
ceph-osd: fix autodetection activation 1803/head
authorSébastien Han <seb@redhat.com>
Thu, 24 Aug 2017 09:17:56 +0000 (11:17 +0200)
committerGuillaume Abrioux <gabrioux@redhat.com>
Thu, 7 Sep 2017 15:47:37 +0000 (17:47 +0200)
Prior to this patch this activation sequence for autodetection was
always skipped because we were asking to activate on device without
partitions, which doesn't make sense.

We also fix the way we lookup for a device, since the data partition is
always numbered 1, we take the min element of the dict.

Closes: https://github.com/ceph/ceph-ansible/issues/1782
Signed-off-by: Sébastien Han <seb@redhat.com>
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
18 files changed:
roles/ceph-osd/tasks/activate_osds.yml
roles/ceph-osd/tasks/check_devices.yml
roles/ceph-osd/tasks/check_devices_auto.yml [deleted file]
roles/ceph-osd/tasks/main.yml
roles/ceph-osd/tasks/scenarios/collocated.yml
tests/conftest.py
tests/functional/centos/7/jrn-col-auto-dm/Vagrantfile [new symlink]
tests/functional/centos/7/jrn-col-auto-dm/group_vars/all [new file with mode: 0644]
tests/functional/centos/7/jrn-col-auto-dm/group_vars/osds [new file with mode: 0644]
tests/functional/centos/7/jrn-col-auto-dm/hosts [new file with mode: 0644]
tests/functional/centos/7/jrn-col-auto-dm/vagrant_variables.yml [new file with mode: 0644]
tests/functional/centos/7/jrn-col-auto/Vagrantfile [new symlink]
tests/functional/centos/7/jrn-col-auto/group_vars/all [new file with mode: 0644]
tests/functional/centos/7/jrn-col-auto/group_vars/osds [new file with mode: 0644]
tests/functional/centos/7/jrn-col-auto/hosts [new file with mode: 0644]
tests/functional/centos/7/jrn-col-auto/vagrant_variables.yml [new file with mode: 0644]
tests/functional/tests/osd/test_journal_collocation.py
tox.ini

index f420dfdafa9ab96d4d21d8dda576f2d49501ba9e..13a53e0fd8d7985a019e4905db9935927c794584 100644 (file)
@@ -2,18 +2,6 @@
 # NOTE (leseb) : this task is for disk devices only because of the explicit use of the first
 # partition.
 
-- name: automatically activate osd disk(s) without partitions
-  command: ceph-disk activate "/dev/{{ item.key | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1"
-  ignore_errors: true
-  with_dict: "{{ ansible_devices }}"
-  when:
-    - ansible_devices is defined
-    - item.value.removable == "0"
-    - item.value.partitions|count == 0
-    - item.value.holders|count == 0
-    - osd_scenario == 'collocated'
-    - osd_auto_discovery
-
 - name: activate osd(s) when device is a disk
   command: ceph-disk activate {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
   with_together:
   register: activate_osd_disk
   when:
     - not item.0.get("skipped")
-    - item.0.get("rc", 0) != 0
+    - item.0.get("rc", 0) != "0"
     - not osd_auto_discovery
-    - osd_scenario == 'non-collocated'
-
-- name: automatically activate osd disk(s) without partitions (dmcrypt)
-  command: ceph-disk activate --dmcrypt "/dev/{{ item.key }}"
-  ignore_errors: true
-  with_dict: "{{ ansible_devices }}"
-  when:
-    - ansible_devices is defined
-    - item.value.removable == "0"
-    - item.value.partitions|count == 0
-    - item.value.holders|count == 0
-    - osd_auto_discovery
-    - dmcrypt
-    - osd_scenario == 'collocated'
 
 - name: activate osd(s) when device is a disk (dmcrypt)
   command: ceph-disk activate --dmcrypt {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
   register: activate_osd_disk_dmcrypt
   when:
     - not item.0.get("skipped")
-    - item.0.get("rc", 0) != 0
+    - item.0.get("rc", 0) != "0"
     - not osd_auto_discovery
     - dmcrypt
-    - osd_scenario == 'non-collocated'
 
 # NOTE (leseb): we must do this because of
 # https://github.com/ansible/ansible/issues/4297
@@ -79,5 +52,5 @@
   failed_when: false
   when:
     - not item.0.get("skipped")
-    - item.0.get("rc", 0) == 0
+    - item.0.get("rc", 0) == "0"
     - not osd_auto_discovery
index 36bdfa092db4b26326d23ad4cec0519547a0f158..c6833e1fbb2266dd0f42450e6267c64128aac97a 100644 (file)
 # for SSD journals.
 
 - include: ./check_devices_static.yml
-  when:
-    - not osd_auto_discovery
-  # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
-  static: False
-
-- include: ./check_devices_auto.yml
-  when:
-    - osd_auto_discovery
   # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
   static: False
 
diff --git a/roles/ceph-osd/tasks/check_devices_auto.yml b/roles/ceph-osd/tasks/check_devices_auto.yml
deleted file mode 100644 (file)
index 7240b67..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
----
-- name: check if the device is a partition (autodiscover disks)
-  shell: "readlink -f /dev/{{ item.key }} | egrep '/dev/([hsv]d[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}|fio[a-z]{1,2}[0-9]{1,2}$'"
-  with_dict: "{{ ansible_devices }}"
-  changed_when: false
-  failed_when: false
-  always_run: true
-  register: ispartition_results
-  when:
-    - ansible_devices is defined
-    - item.value.removable == 0
-
-- name: check if any of the raw partitions are mounted
-  shell: "mount |grep -sq '^/dev/{{ item.key }} '"
-  args:
-    warn: false
-  ignore_errors: yes
-  with_dict: "{{ ansible_devices }}"
-  register: mount_cmd
-  changed_when: false
-  always_run: true
-  when:
-    - ansible_devices is defined
-    - item.value.removable == 0
-    - item.value.partitions|count == 0
-    - item.value.holders|count == 0
-
-- name: fail if any of the raw partitions are mounted
-  fail:
-    msg: "OSD device autodetection failed because one or more raw partitions is mounted on the host."
-  with_items: "{{ mount_cmd.results }}"
-  when:
-    - not item.get("skipped")
-    - item.rc == 0
-
-- name: check the partition status of the osd disks (autodiscover disks)
-  shell: "parted --script /dev/{{ item.key }} print > /dev/null 2>&1"
-  with_dict: "{{ ansible_devices }}"
-  changed_when: false
-  failed_when: false
-  always_run: true
-  register: osd_partition_status_results
-  when:
-    - ansible_devices is defined
-    - item.value.removable == 0
-    - item.value.partitions|count == 0
-    - item.value.holders|count == 0
-
-- name: fix partitions gpt header or labels of the osd disks (autodiscover disks)
-  shell: "sgdisk --zap-all --clear --mbrtogpt -- '/dev/{{ item.0.item.key }}' || sgdisk --zap-all --clear --mbrtogpt -- '/dev/{{ item.0.item.key }}'"
-  with_together:
-    - "{{ osd_partition_status_results.results }}"
-    - "{{ ansible_devices }}"
-  changed_when: false
-  when:
-    - ansible_devices is defined
-    - not item.0.get("skipped")
-    - item.0.get("rc", 0) != 0
-    - item.1.value.removable == 0
-    - item.1.value.partitions|count == 0
-    - item.1.value.holders|count == 0
-    - not containerized_deployment
-
-- name: create gpt disk label
-  command: parted --script {{ item.1 }} mklabel gpt
-  with_together:
-    - "{{ osd_partition_status_results.results }}"
-    - "{{ ansible_devices }}"
-  changed_when: false
-  when:
-    - ansible_devices is defined
-    - not item.0.get("skipped")
-    - item.0.get("rc", 0) != 0
-    - item.1.value.removable == 0
-    - item.1.value.partitions|count == 0
-    - item.1.value.holders|count == 0
-    - containerized_deployment
-
-- name: check if a partition named 'ceph' exists (autodiscover disks)
-  shell: "parted --script /dev/{{ item.key }} print | egrep -sq '^ 1.*ceph'"
-  with_dict: "{{ ansible_devices }}"
-  changed_when: false
-  failed_when: false
-  always_run: true
-  register: parted_results
-  when:
-    - ansible_devices is defined
-    - item.value.removable == 0
index 457d23e93af33c951f79a36f49959162d55befdb..c74487dd2ed2da2ead8228edab219fe83d2c3e2a 100644 (file)
@@ -8,6 +8,17 @@
 
 - include: ceph_disk_cli_options_facts.yml
 
+- name: generate device list when osd_auto_discovery
+  set_fact:
+    devices: "{{ devices | default([]) + [ item.key | regex_replace('^', '/dev/') ] }}"
+  with_dict: "{{ ansible_devices }}"
+  when:
+    - ansible_devices is defined
+    - item.value.removable == "0"
+    - item.value.partitions|count == 0
+    - item.value.holders|count == 0
+    - osd_auto_discovery
+
 - include: check_devices.yml
 
 - include: ./scenarios/collocated.yml
index 7dea12ddf642ea997631a2606ca68c4096adddb4..f62f83ac8d56c59acf6a41a39394f46ec069fa21 100644 (file)
     - osd_auto_discovery
     - containerized_deployment
 
-# NOTE (alahouze): if the device is a partition, the parted command below has
-# failed, this is why we check if the device is a partition too.
-- name: automatic prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) without partitions with collocated osd data and journal
-  command: "ceph-disk prepare {{ ceph_disk_cli_options }} /dev/{{ item.key }}"
-  register: prepared_osds
-  with_dict: "{{ ansible_devices }}"
-  when:
-    - ansible_devices is defined
-    - item.value.removable == "0"
-    - item.value.partitions|count == 0
-    - item.value.holders|count == 0
-    - osd_auto_discovery
-    - not containerized_deployment
-
 - name: manually prepare ceph "{{ osd_objectstore }}" non-containerized osd disk(s) with collocated osd data and journal
   command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }}"
   with_together:
@@ -80,7 +66,6 @@
     - not item.1.get("skipped")
     - item.0.get("rc", 0) != 0
     - item.1.get("rc", 0) != 0
-    - not osd_auto_discovery
     - not containerized_deployment
 
 - include: ../activate_osds.yml
index c5fc80c0394fb898a140442888bc29ba56f568a8..795472815b848ab056c9d5c70c050aa3cd44624c 100644 (file)
@@ -19,6 +19,7 @@ def node(host, request):
     ceph_stable_release = os.environ.get("CEPH_STABLE_RELEASE", "kraken")
     node_type = ansible_vars["group_names"][0]
     docker = ansible_vars.get("docker")
+    osd_auto_discovery = ansible_vars.get("osd_auto_discovery")
     lvm_scenario = ansible_vars.get("osd_scenario") == 'lvm'
     if not request.node.get_marker(node_type) and not request.node.get_marker('all'):
         pytest.skip("Not a valid test for node type: %s" % node_type)
@@ -50,7 +51,10 @@ def node(host, request):
     address = host.interface("eth1").addresses[0]
     subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
     num_mons = len(ansible_vars["groups"]["mons"])
-    num_devices = len(ansible_vars.get("devices", []))
+    if osd_auto_discovery:
+        num_devices = 3
+    else:
+        num_devices = len(ansible_vars.get("devices", []))
     if not num_devices:
         num_devices = len(ansible_vars.get("lvm_volumes", []))
     num_osd_hosts = len(ansible_vars["groups"]["osds"])
diff --git a/tests/functional/centos/7/jrn-col-auto-dm/Vagrantfile b/tests/functional/centos/7/jrn-col-auto-dm/Vagrantfile
new file mode 120000 (symlink)
index 0000000..dfd7436
--- /dev/null
@@ -0,0 +1 @@
+../../../../../Vagrantfile
\ No newline at end of file
diff --git a/tests/functional/centos/7/jrn-col-auto-dm/group_vars/all b/tests/functional/centos/7/jrn-col-auto-dm/group_vars/all
new file mode 100644 (file)
index 0000000..f091491
--- /dev/null
@@ -0,0 +1,21 @@
+---
+
+ceph_origin: repository
+ceph_repository: community
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: filestore
+dmcrypt: true
+osd_auto_discovery: true
+osd_scenario: collocated
+os_tuning_params:
+  - { name: kernel.pid_max, value: 4194303 }
+  - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+  global:
+    osd_pool_default_pg_num: 8
+    osd_pool_default_size: 1
diff --git a/tests/functional/centos/7/jrn-col-auto-dm/group_vars/osds b/tests/functional/centos/7/jrn-col-auto-dm/group_vars/osds
new file mode 100644 (file)
index 0000000..ed9b23a
--- /dev/null
@@ -0,0 +1,4 @@
+---
+os_tuning_params:
+  - { name: kernel.pid_max, value: 4194303 }
+  - { name: fs.file-max, value: 26234859 }
diff --git a/tests/functional/centos/7/jrn-col-auto-dm/hosts b/tests/functional/centos/7/jrn-col-auto-dm/hosts
new file mode 100644 (file)
index 0000000..f6a265a
--- /dev/null
@@ -0,0 +1,5 @@
+[mons]
+mon0
+
+[osds]
+osd0
diff --git a/tests/functional/centos/7/jrn-col-auto-dm/vagrant_variables.yml b/tests/functional/centos/7/jrn-col-auto-dm/vagrant_variables.yml
new file mode 100644 (file)
index 0000000..0dcf9d6
--- /dev/null
@@ -0,0 +1,69 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# Deploy RESTAPI on each of the Monitors
+restapi: true
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+#   - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+#   - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location.  vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+  - { name: kernel.pid_max, value: 4194303 }
+  - { name: fs.file-max, value: 26234859 }
diff --git a/tests/functional/centos/7/jrn-col-auto/Vagrantfile b/tests/functional/centos/7/jrn-col-auto/Vagrantfile
new file mode 120000 (symlink)
index 0000000..dfd7436
--- /dev/null
@@ -0,0 +1 @@
+../../../../../Vagrantfile
\ No newline at end of file
diff --git a/tests/functional/centos/7/jrn-col-auto/group_vars/all b/tests/functional/centos/7/jrn-col-auto/group_vars/all
new file mode 100644 (file)
index 0000000..f83598c
--- /dev/null
@@ -0,0 +1,20 @@
+---
+
+ceph_origin: repository
+ceph_repository: community
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+radosgw_interface: eth1
+journal_size: 100
+osd_objectstore: filestore
+osd_auto_discovery: true
+osd_scenario: collocated
+os_tuning_params:
+  - { name: kernel.pid_max, value: 4194303 }
+  - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+  global:
+    osd_pool_default_pg_num: 8
+    osd_pool_default_size: 1
diff --git a/tests/functional/centos/7/jrn-col-auto/group_vars/osds b/tests/functional/centos/7/jrn-col-auto/group_vars/osds
new file mode 100644 (file)
index 0000000..ed9b23a
--- /dev/null
@@ -0,0 +1,4 @@
+---
+os_tuning_params:
+  - { name: kernel.pid_max, value: 4194303 }
+  - { name: fs.file-max, value: 26234859 }
diff --git a/tests/functional/centos/7/jrn-col-auto/hosts b/tests/functional/centos/7/jrn-col-auto/hosts
new file mode 100644 (file)
index 0000000..f6a265a
--- /dev/null
@@ -0,0 +1,5 @@
+[mons]
+mon0
+
+[osds]
+osd0
diff --git a/tests/functional/centos/7/jrn-col-auto/vagrant_variables.yml b/tests/functional/centos/7/jrn-col-auto/vagrant_variables.yml
new file mode 100644 (file)
index 0000000..0dcf9d6
--- /dev/null
@@ -0,0 +1,69 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# Deploy RESTAPI on each of the Monitors
+restapi: true
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+#   - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+#   - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location.  vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+  - { name: kernel.pid_max, value: 4194303 }
+  - { name: fs.file-max, value: 26234859 }
index 3c6d4d2349a327df0f4413068504a03089f89e07..f651de4537e086934d77e13547d86906beec85bf 100644 (file)
@@ -3,5 +3,8 @@ class TestOSD(object):
 
     def test_osds_are_all_collocated(self, node, host):
         # TODO: figure out way to paramaterize node['vars']['devices'] for this test
+        osd_auto_discovery = node["vars"].get('osd_auto_discovery', False)
+        if osd_auto_discovery:
+            node["vars"]["devices"] = ["/dev/sda", "/dev/sdb", "/dev/sdc"] # Hardcoded since we can't retrieve the devices list generated during playbook run
         for device in node["vars"]["devices"]:
             assert host.check_output("sudo blkid -s PARTLABEL -o value %s2" % device) in ["ceph journal", "ceph block"]
diff --git a/tox.ini b/tox.ini
index aa4cf3a158a969ae89e8cb829a0dc67b53671f0e..94d1232f4d04252843602eb66cc0035a1a1a541e 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
 [tox]
 envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster,switch_to_containers}
-  {dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd}
+  {dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_cluster,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,journal_collocation_auto,journal_collocation_auto_dmcrypt}
 
 skipsdist = True
 
@@ -152,6 +152,10 @@ changedir=
   xenial_cluster: {toxinidir}/tests/functional/ubuntu/16.04/cluster
   # tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario
   journal_collocation: {toxinidir}/tests/functional/centos/7/jrn-col
+  # tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario with disk autodiscovery
+  journal_collocation_auto: {toxinidir}/tests/functional/centos/7/jrn-col-auto
+  # tests a 1 mon 1 osd centos7 cluster using collocated OSD scenario with encrypted disk autodiscovery
+  journal_collocation_auto_dmcrypt: {toxinidir}/tests/functional/centos/7/jrn-col-auto-dm
   # tests a 1 mon 1 osd centos7 cluster using dmcrypt non-collocated OSD scenario
   dmcrypt_journal: {toxinidir}/tests/functional/centos/7/crypt-ded-jrn
   # tests a 1 mon 1 osd centos7 cluster using dmcrypt collocated OSD scenario