]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Add ipv6 libvirt support scenario in vagrant
authorTeoman ONAY <tonay@ibm.com>
Wed, 7 Jun 2023 15:10:52 +0000 (17:10 +0200)
committerTeoman ONAY <tonay@redhat.com>
Thu, 29 Jun 2023 13:02:26 +0000 (15:02 +0200)
Addition of ipv6 support in vagrant/libvirt and an all_daemons_ipv6 scenario.
Some typo fixes

Signed-off-by: Teoman ONAY <tonay@ibm.com>
(cherry picked from commit 8f3bdd855989b9d89cc89d764647cdeb97adaf8e)

24 files changed:
Vagrantfile
tests/functional/all_daemons_ipv6/Vagrantfile [new symlink]
tests/functional/all_daemons_ipv6/ceph-override.json [new file with mode: 0644]
tests/functional/all_daemons_ipv6/container/Vagrantfile [new symlink]
tests/functional/all_daemons_ipv6/container/ceph-override.json [new symlink]
tests/functional/all_daemons_ipv6/container/group_vars/all [new file with mode: 0644]
tests/functional/all_daemons_ipv6/container/group_vars/clients [new file with mode: 0644]
tests/functional/all_daemons_ipv6/container/group_vars/iscsigws [new file with mode: 0644]
tests/functional/all_daemons_ipv6/container/group_vars/mons [new file with mode: 0644]
tests/functional/all_daemons_ipv6/container/group_vars/osds [new file with mode: 0644]
tests/functional/all_daemons_ipv6/container/group_vars/rgws [new file with mode: 0644]
tests/functional/all_daemons_ipv6/container/hosts [new file with mode: 0644]
tests/functional/all_daemons_ipv6/container/vagrant_variables.yml [new file with mode: 0644]
tests/functional/all_daemons_ipv6/group_vars/all [new file with mode: 0644]
tests/functional/all_daemons_ipv6/group_vars/clients [new file with mode: 0644]
tests/functional/all_daemons_ipv6/group_vars/iscsigws [new file with mode: 0644]
tests/functional/all_daemons_ipv6/group_vars/mons [new file with mode: 0644]
tests/functional/all_daemons_ipv6/group_vars/nfss [new file with mode: 0644]
tests/functional/all_daemons_ipv6/group_vars/osds [new file with mode: 0644]
tests/functional/all_daemons_ipv6/group_vars/rgws [new file with mode: 0644]
tests/functional/all_daemons_ipv6/hosts [new file with mode: 0644]
tests/functional/all_daemons_ipv6/vagrant_variables.yml [new file with mode: 0644]
tests/requirements.txt
tox.ini

index 72e891f1b4446cd14472e7f0cfa89e7e09a24767..4d2f09884fdaf1b3d3afdb8bc2fbd54adbad0508 100644 (file)
@@ -2,6 +2,7 @@
 # vi: set ft=ruby :
 
 require 'yaml'
+require 'resolv'
 VAGRANTFILE_API_VERSION = '2'
 
 if File.file?(File.join(File.dirname(__FILE__), 'vagrant_variables.yml')) then
@@ -40,6 +41,8 @@ DEBUG           = settings['debug']
 ASSIGN_STATIC_IP = !(BOX == 'openstack' or BOX == 'linode')
 DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
 
+"#{PUBLIC_SUBNET}" =~ Resolv::IPv6::Regex ? IPV6 = true : IPV6 = false
+
 $last_ip_pub_digit   = 9
 $last_ip_cluster_digit = 9
 
@@ -69,18 +72,26 @@ ansible_provision = proc do |ansible|
     'monitoring'   => (0..GRAFANA - 1).map { |j| "#{LABEL_PREFIX}grafana#{j}" }
   }
 
-  ansible.extra_vars = {
-      cluster_network: "#{CLUSTER_SUBNET}.0/24",
-      journal_size: 100,
-      public_network: "#{PUBLIC_SUBNET}.0/24",
-  }
+  if IPV6 then
+    ansible.extra_vars = {
+        cluster_network: "#{CLUSTER_SUBNET}/64",
+        journal_size: 100,
+        public_network: "#{PUBLIC_SUBNET}/64",
+    }
+  else
+    ansible.extra_vars = {
+        cluster_network: "#{CLUSTER_SUBNET}.0/24",
+        journal_size: 100,
+        public_network: "#{PUBLIC_SUBNET}.0/24",
+    }
+  end
 
   # In a production deployment, these should be secret
   if DOCKER then
     ansible.extra_vars = ansible.extra_vars.merge({
       containerized_deployment: 'true',
       monitor_interface: ETH,
-      ceph_mon_docker_subnet: "#{PUBLIC_SUBNET}.0/24",
+      ceph_mon_docker_subnet: ansible.extra_vars[:public_network],
       devices: settings['disks'],
       radosgw_interface: ETH,
       generate_fsid: 'true',
@@ -190,12 +201,13 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   (0..NMONS - 1).each do |i|
     config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
       mon.vm.hostname = "#{LABEL_PREFIX}mon#{i}"
-      if ASSIGN_STATIC_IP
-        mon.vm.network :private_network,
-          ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+      if ASSIGN_STATIC_IP && !IPV6
+         mon.vm.network :private_network,
+         :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
       end
+
       # Virtualbox
-      mon.vm.provider :virtualbox do |vb|
+      mon.vm.provider :virtualbox do |vb,override|
         vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
       end
 
@@ -205,9 +217,19 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       end
 
       # Libvirt
-      mon.vm.provider :libvirt do |lv|
+      mon.vm.provider :libvirt do |lv,override|
         lv.memory = MEMORY
         lv.random_hostname = true
+       if IPV6 then
+         override.vm.network :private_network,
+         :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+         :libvirt__ipv6_prefix => "64",
+         :libvirt__dhcp_enabled => false,
+         :libvirt__forward_mode => "veryisolated",
+         :libvirt__network_name => "ipv6-public-network",
+         :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+         :netmask => "64"
+       end  
       end
 
       # Parallels
@@ -225,9 +247,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   (0..GRAFANA - 1).each do |i|
     config.vm.define "#{LABEL_PREFIX}grafana#{i}" do |grf|
       grf.vm.hostname = "#{LABEL_PREFIX}grafana#{i}"
-      if ASSIGN_STATIC_IP
-        grf.vm.network :private_network,
-          ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+      if ASSIGN_STATIC_IP && !IPV6
+         grf.vm.network :private_network,
+         :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
       end
       # Virtualbox
       grf.vm.provider :virtualbox do |vb|
@@ -240,9 +262,19 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       end
 
       # Libvirt
-      grf.vm.provider :libvirt do |lv|
+      grf.vm.provider :libvirt do |lv,override|
         lv.memory = MEMORY
         lv.random_hostname = true
+       if IPV6 then
+         override.vm.network :private_network,
+         :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+         :libvirt__ipv6_prefix => "64",
+         :libvirt__dhcp_enabled => false,
+         :libvirt__forward_mode => "veryisolated",
+         :libvirt__network_name => "ipv6-public-network",
+         :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+         :netmask => "64"
+       end
       end
 
       # Parallels
@@ -260,9 +292,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   (0..MGRS - 1).each do |i|
     config.vm.define "#{LABEL_PREFIX}mgr#{i}" do |mgr|
       mgr.vm.hostname = "#{LABEL_PREFIX}mgr#{i}"
-      if ASSIGN_STATIC_IP
-        mgr.vm.network :private_network,
-          ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+      if ASSIGN_STATIC_IP && !IPV6
+         mgr.vm.network :private_network,
+         :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
       end
       # Virtualbox
       mgr.vm.provider :virtualbox do |vb|
@@ -275,9 +307,19 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       end
 
       # Libvirt
-      mgr.vm.provider :libvirt do |lv|
+      mgr.vm.provider :libvirt do |lv,override|
         lv.memory = MEMORY
         lv.random_hostname = true
+       if IPV6 then
+         override.vm.network :private_network,
+         :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+         :libvirt__ipv6_prefix => "64",
+         :libvirt__dhcp_enabled => false,
+         :libvirt__forward_mode => "veryisolated",
+         :libvirt__network_name => "ipv6-public-network",
+         :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+         :netmask => "64"
+       end
       end
 
       # Parallels
@@ -296,9 +338,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
     config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
       client.vm.box = CLIENT_BOX
       client.vm.hostname = "#{LABEL_PREFIX}client#{i}"
-      if ASSIGN_STATIC_IP
-        client.vm.network :private_network,
-          ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+      if ASSIGN_STATIC_IP && !IPV6
+         client.vm.network :private_network,
+         :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
       end
       # Virtualbox
       client.vm.provider :virtualbox do |vb|
@@ -311,9 +353,19 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       end
 
       # Libvirt
-      client.vm.provider :libvirt do |lv|
+      client.vm.provider :libvirt do |lv,override|
         lv.memory = MEMORY
         lv.random_hostname = true
+       if IPV6 then
+         override.vm.network :private_network,
+         :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+         :libvirt__ipv6_prefix => "64",
+         :libvirt__dhcp_enabled => false,
+         :libvirt__forward_mode => "veryisolated",
+         :libvirt__network_name => "ipv6-public-network",
+         :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+         :netmask => "64"
+       end
       end
 
       # Parallels
@@ -331,9 +383,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   (0..NRGWS - 1).each do |i|
     config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
       rgw.vm.hostname = "#{LABEL_PREFIX}rgw#{i}"
-      if ASSIGN_STATIC_IP
-        rgw.vm.network :private_network,
-          ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+      if ASSIGN_STATIC_IP && !IPV6
+         rgw.vm.network :private_network,
+         :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
       end
 
       # Virtualbox
@@ -347,9 +399,19 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       end
 
       # Libvirt
-      rgw.vm.provider :libvirt do |lv|
+      rgw.vm.provider :libvirt do |lv,override|
         lv.memory = MEMORY
         lv.random_hostname = true
+       if IPV6 then
+         override.vm.network :private_network,
+         :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+         :libvirt__ipv6_prefix => "64",
+         :libvirt__dhcp_enabled => false,
+         :libvirt__forward_mode => "veryisolated",
+         :libvirt__network_name => "ipv6-public-network",
+         :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+         :netmask => "64"
+       end
       end
 
       # Parallels
@@ -367,9 +429,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   (0..NNFSS - 1).each do |i|
     config.vm.define "#{LABEL_PREFIX}nfs#{i}" do |nfs|
       nfs.vm.hostname = "#{LABEL_PREFIX}nfs#{i}"
-      if ASSIGN_STATIC_IP
-        nfs.vm.network :private_network,
-          ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+      if ASSIGN_STATIC_IP && !IPV6
+          nfs.vm.network :private_network,
+         :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
       end
 
       # Virtualbox
@@ -383,9 +445,19 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       end
 
       # Libvirt
-      nfs.vm.provider :libvirt do |lv|
+      nfs.vm.provider :libvirt do |lv,override|
         lv.memory = MEMORY
         lv.random_hostname = true
+       if IPV6 then
+         override.vm.network :private_network,
+         :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+         :libvirt__ipv6_prefix => "64",
+         :libvirt__dhcp_enabled => false,
+         :libvirt__forward_mode => "veryisolated",
+         :libvirt__network_name => "ipv6-public-network",
+         :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+         :netmask => "64"
+       end
       end
 
       # Parallels
@@ -403,9 +475,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   (0..NMDSS - 1).each do |i|
     config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
       mds.vm.hostname = "#{LABEL_PREFIX}mds#{i}"
-      if ASSIGN_STATIC_IP
-        mds.vm.network :private_network,
-          ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+      if ASSIGN_STATIC_IP && !IPV6
+         mds.vm.network :private_network,
+          :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
       end
       # Virtualbox
       mds.vm.provider :virtualbox do |vb|
@@ -418,9 +490,19 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       end
 
       # Libvirt
-      mds.vm.provider :libvirt do |lv|
+      mds.vm.provider :libvirt do |lv,override|
         lv.memory = MEMORY
         lv.random_hostname = true
+       if IPV6 then
+         override.vm.network :private_network,
+         :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+         :libvirt__ipv6_prefix => "64",
+         :libvirt__dhcp_enabled => false,
+         :libvirt__forward_mode => "veryisolated",
+         :libvirt__network_name => "ipv6-public-network",
+         :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+         :netmask => "64"
+       end  
       end
       # Parallels
       mds.vm.provider "parallels" do |prl|
@@ -437,9 +519,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   (0..NRBD_MIRRORS - 1).each do |i|
     config.vm.define "#{LABEL_PREFIX}rbd-mirror#{i}" do |rbd_mirror|
       rbd_mirror.vm.hostname = "#{LABEL_PREFIX}rbd-mirror#{i}"
-      if ASSIGN_STATIC_IP
-        rbd_mirror.vm.network :private_network,
-          ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+      if ASSIGN_STATIC_IP && !IPV6
+         rbd_mirror.vm.network :private_network,
+          :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
       end
       # Virtualbox
       rbd_mirror.vm.provider :virtualbox do |vb|
@@ -452,9 +534,19 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       end
 
       # Libvirt
-      rbd_mirror.vm.provider :libvirt do |lv|
+      rbd_mirror.vm.provider :libvirt do |lv,override|
         lv.memory = MEMORY
         lv.random_hostname = true
+       if IPV6 then
+         override.vm.network :private_network,
+         :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+         :libvirt__ipv6_prefix => "64",
+         :libvirt__dhcp_enabled => false,
+         :libvirt__forward_mode => "veryisolated",
+         :libvirt__network_name => "ipv6-public-network",
+         :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+         :netmask => "64"
+       end
       end
       # Parallels
       rbd_mirror.vm.provider "parallels" do |prl|
@@ -471,9 +563,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   (0..NISCSI_GWS - 1).each do |i|
     config.vm.define "#{LABEL_PREFIX}iscsi-gw#{i}" do |iscsi_gw|
       iscsi_gw.vm.hostname = "#{LABEL_PREFIX}iscsi-gw#{i}"
-      if ASSIGN_STATIC_IP
-        iscsi_gw.vm.network :private_network,
-          ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+      if ASSIGN_STATIC_IP && !IPV6
+         iscsi_gw.vm.network :private_network,
+          :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
       end
       # Virtualbox
       iscsi_gw.vm.provider :virtualbox do |vb|
@@ -486,9 +578,19 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
       end
 
       # Libvirt
-      iscsi_gw.vm.provider :libvirt do |lv|
+      iscsi_gw.vm.provider :libvirt do |lv,override|
         lv.memory = MEMORY
         lv.random_hostname = true
+       if IPV6 then
+          override.vm.network :private_network,
+         :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+         :libvirt__ipv6_prefix => "64",
+         :libvirt__dhcp_enabled => false,
+         :libvirt__forward_mode => "veryisolated",
+         :libvirt__network_name => "ipv6-public-network",
+         :ip => "#{PUBLIC_SUBNET}#{$last_ip_pub_digit+=1}",
+         :netmask => "64"
+       end
       end
       # Parallels
       iscsi_gw.vm.provider "parallels" do |prl|
@@ -505,11 +607,11 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   (0..NOSDS - 1).each do |i|
     config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
       osd.vm.hostname = "#{LABEL_PREFIX}osd#{i}"
-      if ASSIGN_STATIC_IP
-        osd.vm.network :private_network,
-          ip: "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
-        osd.vm.network :private_network,
-          ip: "#{CLUSTER_SUBNET}.#{$last_ip_cluster_digit+=1}"
+      if ASSIGN_STATIC_IP && !IPV6
+         osd.vm.network :private_network,
+          :ip => "#{PUBLIC_SUBNET}.#{$last_ip_pub_digit+=1}"
+         osd.vm.network :private_network,
+          :ip => "#{CLUSTER_SUBNET}.#{$last_ip_cluster_digit+=1}"
       end
       # Virtualbox
       osd.vm.provider :virtualbox do |vb|
@@ -550,7 +652,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
 
       # Libvirt
       driverletters = ('a'..'z').to_a
-      osd.vm.provider :libvirt do |lv|
+      osd.vm.provider :libvirt do |lv,override|
         # always make /dev/sd{a/b/c} so that CI can ensure that
         # virtualbox and libvirt will have the same devices to use for OSDs
         (0..2).each do |d|
@@ -558,6 +660,22 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
         end
         lv.memory = MEMORY
         lv.random_hostname = true
+       if IPV6 then
+         override.vm.network :private_network,
+           :libvirt__ipv6_address => "#{PUBLIC_SUBNET}",
+           :libvirt__ipv6_prefix => "64",
+           :libvirt__dhcp_enabled => false,
+           :libvirt__forward_mode => "veryisolated",
+           :libvirt__network_name => "ipv6-public-network",
+           :netmask => "64"
+         override.vm.network :private_network,
+           :libvirt__ipv6_address => "#{CLUSTER_SUBNET}",
+           :libvirt__ipv6_prefix => "64",
+           :libvirt__dhcp_enabled => false,
+           :libvirt__forward_mode => "veryisolated",
+           :libvirt__network_name => "ipv6-cluster-network",
+           :netmask => "64"
+       end
       end
 
       # Parallels
diff --git a/tests/functional/all_daemons_ipv6/Vagrantfile b/tests/functional/all_daemons_ipv6/Vagrantfile
new file mode 120000 (symlink)
index 0000000..706a5bb
--- /dev/null
@@ -0,0 +1 @@
+../../../Vagrantfile
\ No newline at end of file
diff --git a/tests/functional/all_daemons_ipv6/ceph-override.json b/tests/functional/all_daemons_ipv6/ceph-override.json
new file mode 100644 (file)
index 0000000..0a87e1a
--- /dev/null
@@ -0,0 +1,40 @@
+{
+  "ceph_conf_overrides": {
+    "global": {
+      "auth_allow_insecure_global_id_reclaim": false,
+      "osd_pool_default_pg_num": 12,
+      "osd_pool_default_size": 1,
+      "mon_allow_pool_size_one": true,
+      "mon_warn_on_pool_no_redundancy": false,
+      "mon_max_pg_per_osd": 300
+    }
+  },
+  "cephfs_pools": [
+    {
+      "name": "cephfs_data",
+      "pg_num": 8,
+      "pgp_num": 8,
+      "rule_name": "replicated_rule",
+      "type": 1,
+      "erasure_profile": "",
+      "expected_num_objects": "",
+      "application": "cephfs",
+      "size": 2,
+      "min_size": 0
+    },
+    {
+      "name": "cephfs_metadata",
+      "pg_num": 8,
+      "pgp_num": 8,
+      "rule_name": "replicated_rule",
+      "type": 1,
+      "erasure_profile": "",
+      "expected_num_objects": "",
+      "application": "cephfs",
+      "size": 2,
+      "min_size": 0
+    }
+  ],
+  "ceph_mon_docker_memory_limit": "2g",
+  "radosgw_num_instances": 2
+}
diff --git a/tests/functional/all_daemons_ipv6/container/Vagrantfile b/tests/functional/all_daemons_ipv6/container/Vagrantfile
new file mode 120000 (symlink)
index 0000000..16076e4
--- /dev/null
@@ -0,0 +1 @@
+../../../../Vagrantfile
\ No newline at end of file
diff --git a/tests/functional/all_daemons_ipv6/container/ceph-override.json b/tests/functional/all_daemons_ipv6/container/ceph-override.json
new file mode 120000 (symlink)
index 0000000..772bdc5
--- /dev/null
@@ -0,0 +1 @@
+../ceph-override.json
\ No newline at end of file
diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/all b/tests/functional/all_daemons_ipv6/container/group_vars/all
new file mode 100644 (file)
index 0000000..d0c9ee6
--- /dev/null
@@ -0,0 +1,46 @@
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_mon_docker_subnet: "{{ public_network }}"
+ip_version: ipv6
+public_network: "fdec:f1fb:29cd:6940::/64"
+cluster_network: "fdec:f1fb:29cd:7120::/64"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+  global:
+    auth_allow_insecure_global_id_reclaim: false
+    mon_allow_pool_size_one: true
+    mon_warn_on_pool_no_redundancy: false
+    osd_pool_default_size: 1
+    mon_max_pg_per_osd: 300
+openstack_config: True
+openstack_glance_pool:
+  name: "images"
+  size: 1
+  target_size_ratio: 0.2
+openstack_cinder_pool:
+  name: "volumes"
+  rule_name: "HDD"
+  size: 1
+openstack_pools:
+  - "{{ openstack_glance_pool }}"
+  - "{{ openstack_cinder_pool }}"
+docker_pull_timeout: 600s
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+mds_max_mds: 2
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.io
+ceph_docker_image: ceph/daemon-base
+ceph_docker_image_tag: latest-main
+node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
+grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/clients b/tests/functional/all_daemons_ipv6/container/group_vars/clients
new file mode 100644 (file)
index 0000000..ec0bb3e
--- /dev/null
@@ -0,0 +1,13 @@
+---
+user_config: True
+copy_admin_key: True
+test:
+  name: "test"
+  rule_name: "HDD"
+  size: 1
+test2:
+  name: "test2"
+  size: 1
+pools:
+  - "{{ test }}"
+  - "{{ test2 }}"
diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/iscsigws b/tests/functional/all_daemons_ipv6/container/group_vars/iscsigws
new file mode 100644 (file)
index 0000000..8d0932a
--- /dev/null
@@ -0,0 +1,2 @@
+---
+generate_crt: True
diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/mons b/tests/functional/all_daemons_ipv6/container/group_vars/mons
new file mode 100644 (file)
index 0000000..7b31aa9
--- /dev/null
@@ -0,0 +1,11 @@
+---
+create_crush_tree: True
+crush_rule_config: True
+crush_rule_hdd:
+  name: HDD
+  root: default
+  type: host
+  class: hdd
+  default: true
+crush_rules:
+  - "{{ crush_rule_hdd }}"
diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/osds b/tests/functional/all_daemons_ipv6/container/group_vars/osds
new file mode 100644 (file)
index 0000000..ad25b58
--- /dev/null
@@ -0,0 +1,8 @@
+---
+lvm_volumes:
+  - data: data-lv1
+    data_vg: test_group
+  - data: data-lv2
+    data_vg: test_group
+    db: journal1
+    db_vg: journals
\ No newline at end of file
diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/rgws b/tests/functional/all_daemons_ipv6/container/group_vars/rgws
new file mode 100644 (file)
index 0000000..639ade9
--- /dev/null
@@ -0,0 +1,8 @@
+---
+copy_admin_key: True
+rgw_create_pools:
+  foo:
+    pg_num: 16
+    type: replicated
+  bar:
+    pg_num: 16
diff --git a/tests/functional/all_daemons_ipv6/container/hosts b/tests/functional/all_daemons_ipv6/container/hosts
new file mode 100644 (file)
index 0000000..572b62e
--- /dev/null
@@ -0,0 +1,36 @@
+[mons]
+mon0 monitor_address="fdec:f1fb:29cd:6940::10"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+mon2 monitor_address="fdec:f1fb:29cd:6940::12"
+
+[mgrs]
+mgr0
+
+[osds]
+osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
+osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
+osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"
+
+[mdss]
+mds0
+mds1
+mds2
+
+[rgws]
+rgw0
+
+#[nfss]
+#nfs0
+
+[clients]
+client0
+client1
+
+[rbdmirrors]
+rbd-mirror0
+
+[iscsigws]
+iscsi-gw0
+
+[monitoring]
+mon0
diff --git a/tests/functional/all_daemons_ipv6/container/vagrant_variables.yml b/tests/functional/all_daemons_ipv6/container/vagrant_variables.yml
new file mode 100644 (file)
index 0000000..46045ea
--- /dev/null
@@ -0,0 +1,61 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 3
+mds_vms: 3
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 1
+client_vms: 2
+iscsi_gw_vms: 1
+mgr_vms: 1
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: "fdec:f1fb:29cd:6940::"
+cluster_subnet: "fdec:f1fb:29cd:7120::"
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+#   - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+#   - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+#client_vagrant_box: centos/stream8
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location.  vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/tests/functional/all_daemons_ipv6/group_vars/all b/tests/functional/all_daemons_ipv6/group_vars/all
new file mode 100644 (file)
index 0000000..204219f
--- /dev/null
@@ -0,0 +1,39 @@
+---
+ceph_origin: repository
+ceph_repository: dev
+ip_version: ipv6
+public_network: "fdec:f1fb:29cd:6940::/64"
+cluster_network: "fdec:f1fb:29cd:7120::/64"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+ceph_conf_overrides:
+  global:
+    auth_allow_insecure_global_id_reclaim: false
+    mon_allow_pool_size_one: true
+    mon_warn_on_pool_no_redundancy: false
+    osd_pool_default_size: 1
+    mon_max_pg_per_osd: 300
+openstack_config: True
+openstack_glance_pool:
+  name: "images"
+  size: 1
+  application: rbd
+  target_size_ratio: 0.2
+openstack_cinder_pool:
+  name: "volumes"
+  rule_name: "HDD"
+  size: 1
+  application: rbd
+openstack_pools:
+  - "{{ openstack_glance_pool }}"
+  - "{{ openstack_cinder_pool }}"
+handler_health_mon_check_delay: 10
+handler_health_osd_check_delay: 10
+mds_max_mds: 2
+dashboard_admin_password: $sX!cD$rYU6qR^B!
+grafana_admin_password: +xFRe+RES@7vg24n
+ceph_docker_registry: quay.io
+node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
+prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
+alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
+grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
+grafana_server_group_name: ceph_monitoring
diff --git a/tests/functional/all_daemons_ipv6/group_vars/clients b/tests/functional/all_daemons_ipv6/group_vars/clients
new file mode 100644 (file)
index 0000000..4c37898
--- /dev/null
@@ -0,0 +1,13 @@
+---
+copy_admin_key: True
+user_config: True
+test:
+  name: "test"
+  rule_name: "HDD"
+  size: 1
+test2:
+  name: "test2"
+  size: 1
+pools:
+  - "{{ test }}"
+  - "{{ test2 }}"
diff --git a/tests/functional/all_daemons_ipv6/group_vars/iscsigws b/tests/functional/all_daemons_ipv6/group_vars/iscsigws
new file mode 100644 (file)
index 0000000..8d0932a
--- /dev/null
@@ -0,0 +1,2 @@
+---
+generate_crt: True
diff --git a/tests/functional/all_daemons_ipv6/group_vars/mons b/tests/functional/all_daemons_ipv6/group_vars/mons
new file mode 100644 (file)
index 0000000..f6ab9a5
--- /dev/null
@@ -0,0 +1,11 @@
+---
+create_crush_tree: True
+crush_rule_config: True
+crush_rule_hdd:
+  name: HDD
+  root: default
+  type: host
+  class: hdd
+  default: true
+crush_rules:
+  - "{{ crush_rule_hdd }}"
\ No newline at end of file
diff --git a/tests/functional/all_daemons_ipv6/group_vars/nfss b/tests/functional/all_daemons_ipv6/group_vars/nfss
new file mode 100644 (file)
index 0000000..fc280e2
--- /dev/null
@@ -0,0 +1,10 @@
+copy_admin_key: true
+nfs_file_gw: false
+nfs_obj_gw: true
+ganesha_conf_overrides: |
+    CACHEINODE {
+            Entries_HWMark = 100000;
+    }
+nfs_ganesha_stable: true
+nfs_ganesha_dev: false
+nfs_ganesha_flavor: "ceph_main"
diff --git a/tests/functional/all_daemons_ipv6/group_vars/osds b/tests/functional/all_daemons_ipv6/group_vars/osds
new file mode 100644 (file)
index 0000000..99c065e
--- /dev/null
@@ -0,0 +1,10 @@
+---
+os_tuning_params:
+  - { name: fs.file-max, value: 26234859 }
+lvm_volumes:
+  - data: data-lv1
+    data_vg: test_group
+  - data: data-lv2
+    data_vg: test_group
+    db: journal1
+    db_vg: journals
\ No newline at end of file
diff --git a/tests/functional/all_daemons_ipv6/group_vars/rgws b/tests/functional/all_daemons_ipv6/group_vars/rgws
new file mode 100644 (file)
index 0000000..d9c09f8
--- /dev/null
@@ -0,0 +1,9 @@
+copy_admin_key: true
+rgw_create_pools:
+  foo:
+    pg_num: 16
+    type: replicated
+  bar:
+    pg_num: 16
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
diff --git a/tests/functional/all_daemons_ipv6/hosts b/tests/functional/all_daemons_ipv6/hosts
new file mode 100644 (file)
index 0000000..e625b8b
--- /dev/null
@@ -0,0 +1,36 @@
+[mons]
+mon0 monitor_address="fdec:f1fb:29cd:6940::10"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+mon2 monitor_address="fdec:f1fb:29cd:6940::12"
+
+[mgrs]
+mgr0
+
+[osds]
+osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
+osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
+osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }" devices="['/dev/sda', '/dev/sdb']" dedicated_devices="['/dev/sdc']" lvm_volumes="[]"
+
+[mdss]
+mds0
+mds1
+mds2
+
+[rgws]
+rgw0
+
+[clients]
+client0
+client1
+
+#[nfss]
+#nfs0
+
+[rbdmirrors]
+rbd-mirror0
+
+[iscsigws]
+iscsi-gw0
+
+[ceph_monitoring]
+mon0
diff --git a/tests/functional/all_daemons_ipv6/vagrant_variables.yml b/tests/functional/all_daemons_ipv6/vagrant_variables.yml
new file mode 100644 (file)
index 0000000..a8e0085
--- /dev/null
@@ -0,0 +1,74 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 3
+mds_vms: 3
+rgw_vms: 1
+nfs_vms: 0
+grafana_server_vms: 0
+rbd_mirror_vms: 1
+client_vms: 2
+iscsi_gw_vms: 1
+mgr_vms: 1
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: "fdec:f1fb:29cd:6940::"
+cluster_subnet: "fdec:f1fb:29cd:7120::"
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+#   - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+#   - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/stream8
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location.  vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+  - { name: fs.file-max, value: 26234859 }
+
+# VM prefix name, need to match the hostname
+# label_prefix: ceph
index 5394e33e1623e0813060c449a33c937f00859e2c..54ae0dbee143d34d9763e03ce9e5f443899d571b 100644 (file)
@@ -1,5 +1,5 @@
 # These are Python requirements needed to run the functional tests
-testinfra
+pytest-testinfra
 pytest-xdist
 pytest
 ansible>=2.10,<2.11,!=2.9.10
diff --git a/tox.ini b/tox.ini
index bb0e902625ddfd75139711a6c1f84d90753eb41a..1c1c71daf2d1c7f9b05ec9852dd3ee517e8b2bbc 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = centos-{container,non_container}-{all_daemons,collocation,lvm_osds,shrink_mon,shrink_mgr,shrink_mds,shrink_rbdmirror,shrink_rgw,lvm_batch,add_mons,add_mgrs,add_mdss,add_rbdmirrors,add_rgws,rgw_multisite,purge,storage_inventory,lvm_auto_discovery,all_in_one,cephadm_adopt,purge_dashboard}
+envlist = centos-{container,non_container}-{all_daemons,all_daemons_ipv6,collocation,lvm_osds,shrink_mon,shrink_mgr,shrink_mds,shrink_rbdmirror,shrink_rgw,lvm_batch,add_mons,add_mgrs,add_mdss,add_rbdmirrors,add_rgws,rgw_multisite,purge,storage_inventory,lvm_auto_discovery,all_in_one,cephadm_adopt,purge_dashboard}
   centos-non_container-{switch_to_containers}
   infra_lv_create
   migrate_ceph_disk_to_ceph_volume
@@ -298,6 +298,7 @@ setenv=
 deps= -r{toxinidir}/tests/requirements.txt
 changedir=
   all_daemons: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
+  all_daemons_ipv6: {toxinidir}/tests/functional/all_daemons_ipv6{env:CONTAINER_DIR:}
   cluster: {toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:}
   shrink_mon: {toxinidir}/tests/functional/shrink_mon{env:CONTAINER_DIR:}
   shrink_mgn: {toxinidir}/tests/functional/shrink_mon{env:CONTAINER_DIR:}
@@ -350,13 +351,13 @@ commands=
   py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
   # reboot all vms
-  all_daemons,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
+  all_daemons,all_daemons_ipv6,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/reboot.yml
 
   # retest to ensure cluster came back up correctly after rebooting
-  all_daemons,collocation: py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
+  all_daemons,all_daemons_ipv6,collocation: py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests
 
   # handlers/idempotency test
-  all_daemons,all_in_one,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-pacific}" --extra-vars @ceph-override.json
+  all_daemons,all_daemons_ipv6,all_in_one,collocation: ansible-playbook -vv -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-pacific}" --extra-vars @ceph-override.json
 
   purge: {[purge]commands}
   purge_dashboard: {[purge-dashboard]commands}