From 5ca43241ceb2caea5a4432e4990ab881aea98567 Mon Sep 17 00:00:00 2001 From: Georgios Kyratsas Date: Mon, 3 Feb 2020 13:03:14 +0100 Subject: [PATCH] qa: Ceph-salt task + suite Adding Ceph-salt task and a basic suite under qa/suites/ceph_salt. Also changed suse suite to point to ceph-salt instead of deepsea. Signed-off-by: Georgios Kyratsas --- qa/ceph_salt/.qa | 1 + qa/ceph_salt/boilerplate/+ | 0 qa/ceph_salt/boilerplate/ceph_cm.yaml | 1 + qa/ceph_salt/ceph_salt.yaml | 9 + qa/ceph_salt/disks/0disks.yaml | 4 + qa/ceph_salt/disks/1disk.yaml | 4 + qa/ceph_salt/disks/2disks.yaml | 4 + qa/ceph_salt/disks/3disks.yaml | 4 + qa/ceph_salt/disks/4disks.yaml | 4 + qa/ceph_salt/disks/5disks.yaml | 4 + qa/ceph_salt/distros/.qa | 1 + qa/ceph_salt/distros/opensuse_15.2.yaml | 1 + qa/ceph_salt/distros/sle_15.2.yaml | 1 + qa/ceph_salt/nodes/1node.yaml | 2 + qa/ceph_salt/nodes/20nodes.yaml | 21 + qa/ceph_salt/nodes/2nodes.yaml | 3 + qa/ceph_salt/nodes/3nodes.yaml | 4 + qa/ceph_salt/nodes/4nodes.yaml | 5 + qa/ceph_salt/nodes/5nodes.yaml | 6 + qa/ceph_salt/salt.yaml | 6 + .../storage-profiles/bs_dedicated_db.yaml | 15 + .../bs_dedicated_db_crypt.yaml | 18 + .../bs_dedicated_db_sizes.yaml | 18 + .../bs_dedicated_db_sizes_crypt.yaml | 21 + .../bs_dedicated_db_sizes_mixed.yaml | 17 + .../bs_dedicated_db_sizes_mixed_crypt.yaml | 20 + .../storage-profiles/bs_dedicated_wal.yaml | 15 + .../bs_dedicated_wal_crypt.yaml | 18 + .../storage-profiles/bs_dedicated_wal_db.yaml | 14 + .../bs_dedicated_wal_db_crypt.yaml | 16 + .../bs_dedicated_wal_db_sizes_all.yaml | 18 + .../bs_dedicated_wal_db_sizes_all_crypt.yaml | 20 + .../bs_dedicated_wal_db_sizes_mixed.yaml | 16 + ...bs_dedicated_wal_db_sizes_mixed_crypt.yaml | 18 + .../bs_dedicated_wal_sizes.yaml | 18 + .../bs_dedicated_wal_sizes_crypt.yaml | 21 + .../bs_dedicated_wal_sizes_mixed.yaml | 17 + .../bs_dedicated_wal_sizes_mixed_crypt.yaml | 20 + .../fs_dedicated_journal.yaml | 15 + .../fs_dedicated_journal_crypt.yaml | 18 + qa/suites/ceph_salt/.qa | 1 + qa/suites/ceph_salt/tier0/.qa | 1 + qa/suites/ceph_salt/tier0/salt/% | 0 qa/suites/ceph_salt/tier0/salt/.qa | 1 + qa/suites/ceph_salt/tier0/salt/0-salt.yaml | 1 + qa/suites/ceph_salt/tier0/salt/boilerplate | 1 + qa/suites/ceph_salt/tier0/salt/cluster/+ | 0 qa/suites/ceph_salt/tier0/salt/cluster/.qa | 1 + .../ceph_salt/tier0/salt/cluster/1disk.yaml | 1 + .../ceph_salt/tier0/salt/cluster/1node.yaml | 1 + qa/suites/ceph_salt/tier0/salt/distros | 1 + qa/suites/ceph_salt/tier1/.qa | 1 + qa/suites/ceph_salt/tier1/health-ok/% | 0 qa/suites/ceph_salt/tier1/health-ok/.qa | 1 + .../ceph_salt/tier1/health-ok/0-salt.yaml | 1 + .../tier1/health-ok/1-deploy-phase.yaml | 1 + .../tier1/health-ok/2-test-phase.yaml | 4 + .../ceph_salt/tier1/health-ok/boilerplate | 1 + qa/suites/ceph_salt/tier1/health-ok/cluster/+ | 0 .../ceph_salt/tier1/health-ok/cluster/.qa | 1 + .../tier1/health-ok/cluster/4disks.yaml | 1 + .../tier1/health-ok/cluster/roles.yaml | 2 + qa/suites/ceph_salt/tier1/health-ok/distros | 1 + qa/suites/suse/tier0 | 2 +- qa/suites/suse/tier1 | 2 +- qa/tasks/ceph_salt.py | 368 ++++++++++++++++++ qa/tasks/salt_manager.py | 10 + qa/tasks/scripts/drive_groups.sh | 15 + qa/tasks/scripts/install_ceph_salt.sh | 16 + 69 files changed, 872 insertions(+), 2 deletions(-) create mode 120000 qa/ceph_salt/.qa create mode 100644 qa/ceph_salt/boilerplate/+ create mode 100644 qa/ceph_salt/boilerplate/ceph_cm.yaml create mode 100644 qa/ceph_salt/ceph_salt.yaml create mode 100644 qa/ceph_salt/disks/0disks.yaml create mode 100644 qa/ceph_salt/disks/1disk.yaml create mode 100644 qa/ceph_salt/disks/2disks.yaml create mode 100644 qa/ceph_salt/disks/3disks.yaml create mode 100644 qa/ceph_salt/disks/4disks.yaml create mode 100644 qa/ceph_salt/disks/5disks.yaml create mode 120000 qa/ceph_salt/distros/.qa create mode 120000 qa/ceph_salt/distros/opensuse_15.2.yaml create mode 120000 qa/ceph_salt/distros/sle_15.2.yaml create mode 100644 qa/ceph_salt/nodes/1node.yaml create mode 100644 qa/ceph_salt/nodes/20nodes.yaml create mode 100644 qa/ceph_salt/nodes/2nodes.yaml create mode 100644 qa/ceph_salt/nodes/3nodes.yaml create mode 100644 qa/ceph_salt/nodes/4nodes.yaml create mode 100644 qa/ceph_salt/nodes/5nodes.yaml create mode 100644 qa/ceph_salt/salt.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_db.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_db_crypt.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_crypt.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_mixed.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_mixed_crypt.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_crypt.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_db.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_crypt.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_all.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_all_crypt.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_mixed.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_mixed_crypt.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_crypt.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_mixed.yaml create mode 100644 qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_mixed_crypt.yaml create mode 100644 qa/ceph_salt/storage-profiles/fs_dedicated_journal.yaml create mode 100644 qa/ceph_salt/storage-profiles/fs_dedicated_journal_crypt.yaml create mode 120000 qa/suites/ceph_salt/.qa create mode 120000 qa/suites/ceph_salt/tier0/.qa create mode 100644 qa/suites/ceph_salt/tier0/salt/% create mode 120000 qa/suites/ceph_salt/tier0/salt/.qa create mode 120000 qa/suites/ceph_salt/tier0/salt/0-salt.yaml create mode 120000 qa/suites/ceph_salt/tier0/salt/boilerplate create mode 100644 qa/suites/ceph_salt/tier0/salt/cluster/+ create mode 120000 qa/suites/ceph_salt/tier0/salt/cluster/.qa create mode 120000 qa/suites/ceph_salt/tier0/salt/cluster/1disk.yaml create mode 120000 qa/suites/ceph_salt/tier0/salt/cluster/1node.yaml create mode 120000 qa/suites/ceph_salt/tier0/salt/distros create mode 120000 qa/suites/ceph_salt/tier1/.qa create mode 100644 qa/suites/ceph_salt/tier1/health-ok/% create mode 120000 qa/suites/ceph_salt/tier1/health-ok/.qa create mode 120000 qa/suites/ceph_salt/tier1/health-ok/0-salt.yaml create mode 120000 qa/suites/ceph_salt/tier1/health-ok/1-deploy-phase.yaml create mode 100644 qa/suites/ceph_salt/tier1/health-ok/2-test-phase.yaml create mode 120000 qa/suites/ceph_salt/tier1/health-ok/boilerplate create mode 100644 qa/suites/ceph_salt/tier1/health-ok/cluster/+ create mode 120000 qa/suites/ceph_salt/tier1/health-ok/cluster/.qa create mode 120000 qa/suites/ceph_salt/tier1/health-ok/cluster/4disks.yaml create mode 100644 qa/suites/ceph_salt/tier1/health-ok/cluster/roles.yaml create mode 120000 qa/suites/ceph_salt/tier1/health-ok/distros create mode 100644 qa/tasks/ceph_salt.py create mode 100644 qa/tasks/scripts/drive_groups.sh create mode 100644 qa/tasks/scripts/install_ceph_salt.sh diff --git a/qa/ceph_salt/.qa b/qa/ceph_salt/.qa new file mode 120000 index 0000000000000..a96aa0ea9d8c4 --- /dev/null +++ b/qa/ceph_salt/.qa @@ -0,0 +1 @@ +.. \ No newline at end of file diff --git a/qa/ceph_salt/boilerplate/+ b/qa/ceph_salt/boilerplate/+ new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/ceph_salt/boilerplate/ceph_cm.yaml b/qa/ceph_salt/boilerplate/ceph_cm.yaml new file mode 100644 index 0000000000000..e74c1884fe5a3 --- /dev/null +++ b/qa/ceph_salt/boilerplate/ceph_cm.yaml @@ -0,0 +1 @@ +ceph_cm_ansible: false diff --git a/qa/ceph_salt/ceph_salt.yaml b/qa/ceph_salt/ceph_salt.yaml new file mode 100644 index 0000000000000..b40d2ac6fa0c4 --- /dev/null +++ b/qa/ceph_salt/ceph_salt.yaml @@ -0,0 +1,9 @@ +tasks: + - ceph_salt: + - cephadm: + conf: + mgr: + debug ms: 1 + debug mgr: 20 + + diff --git a/qa/ceph_salt/disks/0disks.yaml b/qa/ceph_salt/disks/0disks.yaml new file mode 100644 index 0000000000000..dc8605a64d6fd --- /dev/null +++ b/qa/ceph_salt/disks/0disks.yaml @@ -0,0 +1,4 @@ +openstack: +- volumes: # attached to each instance + count: 0 + size: 10 # GB diff --git a/qa/ceph_salt/disks/1disk.yaml b/qa/ceph_salt/disks/1disk.yaml new file mode 100644 index 0000000000000..1654bdaf20b2e --- /dev/null +++ b/qa/ceph_salt/disks/1disk.yaml @@ -0,0 +1,4 @@ +openstack: +- volumes: # attached to each instance + count: 1 + size: 10 # GB diff --git a/qa/ceph_salt/disks/2disks.yaml b/qa/ceph_salt/disks/2disks.yaml new file mode 100644 index 0000000000000..f794a6f90ecfa --- /dev/null +++ b/qa/ceph_salt/disks/2disks.yaml @@ -0,0 +1,4 @@ +openstack: +- volumes: # attached to each instance + count: 2 + size: 10 # GB diff --git a/qa/ceph_salt/disks/3disks.yaml b/qa/ceph_salt/disks/3disks.yaml new file mode 100644 index 0000000000000..8da92ca9628df --- /dev/null +++ b/qa/ceph_salt/disks/3disks.yaml @@ -0,0 +1,4 @@ +openstack: +- volumes: # attached to each instance + count: 3 + size: 10 # GB diff --git a/qa/ceph_salt/disks/4disks.yaml b/qa/ceph_salt/disks/4disks.yaml new file mode 100644 index 0000000000000..2054da95e8b08 --- /dev/null +++ b/qa/ceph_salt/disks/4disks.yaml @@ -0,0 +1,4 @@ +openstack: +- volumes: # attached to each instance + count: 4 + size: 10 # GB diff --git a/qa/ceph_salt/disks/5disks.yaml b/qa/ceph_salt/disks/5disks.yaml new file mode 100644 index 0000000000000..a5bf871fb394c --- /dev/null +++ b/qa/ceph_salt/disks/5disks.yaml @@ -0,0 +1,4 @@ +openstack: +- volumes: # attached to each instance + count: 5 + size: 10 # GB diff --git a/qa/ceph_salt/distros/.qa b/qa/ceph_salt/distros/.qa new file mode 120000 index 0000000000000..fea2489fdf6d9 --- /dev/null +++ b/qa/ceph_salt/distros/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/ceph_salt/distros/opensuse_15.2.yaml b/qa/ceph_salt/distros/opensuse_15.2.yaml new file mode 120000 index 0000000000000..4f6ad5b503a06 --- /dev/null +++ b/qa/ceph_salt/distros/opensuse_15.2.yaml @@ -0,0 +1 @@ +.qa/distros/all/opensuse_15.2.yaml \ No newline at end of file diff --git a/qa/ceph_salt/distros/sle_15.2.yaml b/qa/ceph_salt/distros/sle_15.2.yaml new file mode 120000 index 0000000000000..afb5ae4ae9720 --- /dev/null +++ b/qa/ceph_salt/distros/sle_15.2.yaml @@ -0,0 +1 @@ +.qa/distros/all/sle_15.2.yaml \ No newline at end of file diff --git a/qa/ceph_salt/nodes/1node.yaml b/qa/ceph_salt/nodes/1node.yaml new file mode 100644 index 0000000000000..aaaf43d31d45c --- /dev/null +++ b/qa/ceph_salt/nodes/1node.yaml @@ -0,0 +1,2 @@ +roles: +- [client.salt_master] diff --git a/qa/ceph_salt/nodes/20nodes.yaml b/qa/ceph_salt/nodes/20nodes.yaml new file mode 100644 index 0000000000000..f587a8d0d8d2b --- /dev/null +++ b/qa/ceph_salt/nodes/20nodes.yaml @@ -0,0 +1,21 @@ +roles: +- [salt.master, node.0] +- [node.1] +- [node.2] +- [node.3] +- [node.4] +- [node.5] +- [node.6] +- [node.7] +- [node.8] +- [node.9] +- [node.10] +- [node.11] +- [node.12] +- [node.13] +- [node.14] +- [node.15] +- [node.16] +- [node.17] +- [node.18] +- [node.19] diff --git a/qa/ceph_salt/nodes/2nodes.yaml b/qa/ceph_salt/nodes/2nodes.yaml new file mode 100644 index 0000000000000..4a520a8ab5f6a --- /dev/null +++ b/qa/ceph_salt/nodes/2nodes.yaml @@ -0,0 +1,3 @@ +roles: +- [salt.master] +- [node.1] diff --git a/qa/ceph_salt/nodes/3nodes.yaml b/qa/ceph_salt/nodes/3nodes.yaml new file mode 100644 index 0000000000000..84e1eef828c49 --- /dev/null +++ b/qa/ceph_salt/nodes/3nodes.yaml @@ -0,0 +1,4 @@ +roles: +- [salt.master] +- [node.1] +- [node.2] diff --git a/qa/ceph_salt/nodes/4nodes.yaml b/qa/ceph_salt/nodes/4nodes.yaml new file mode 100644 index 0000000000000..73caaecddebef --- /dev/null +++ b/qa/ceph_salt/nodes/4nodes.yaml @@ -0,0 +1,5 @@ +roles: +- [salt.master] +- [node.1] +- [node.2] +- [node.3] diff --git a/qa/ceph_salt/nodes/5nodes.yaml b/qa/ceph_salt/nodes/5nodes.yaml new file mode 100644 index 0000000000000..35028c0edf102 --- /dev/null +++ b/qa/ceph_salt/nodes/5nodes.yaml @@ -0,0 +1,6 @@ +roles: +- [salt.master, node.0] +- [node.1] +- [node.2] +- [node.3] +- [node.4] diff --git a/qa/ceph_salt/salt.yaml b/qa/ceph_salt/salt.yaml new file mode 100644 index 0000000000000..c91159c3e5961 --- /dev/null +++ b/qa/ceph_salt/salt.yaml @@ -0,0 +1,6 @@ +tasks: +- clock: +- install: + install_ceph_packages: false + extra_system_packages: [salt, salt-master, salt-minion, salt-api, lsof] +- salt: diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_db.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_db.yaml new file mode 100644 index 0000000000000..78114bf003ddb --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_db.yaml @@ -0,0 +1,15 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + db: /dev/vde + /dev/vdc: + format: bluestore + db: /dev/vde + /dev/vdd: + format: bluestore + db: /dev/vde diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_db_crypt.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_db_crypt.yaml new file mode 100644 index 0000000000000..a9ea7b7a6fd35 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_db_crypt.yaml @@ -0,0 +1,18 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + encryption: dmcrypt + db: /dev/vde + /dev/vdc: + format: bluestore + encryption: dmcrypt + db: /dev/vde + /dev/vdd: + format: bluestore + encryption: dmcrypt + db: /dev/vde diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes.yaml new file mode 100644 index 0000000000000..34a8d5f926ef2 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes.yaml @@ -0,0 +1,18 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + db: /dev/vde + db_size: 1G + /dev/vdc: + format: bluestore + db: /dev/vde + db_size: 2G + /dev/vdd: + format: bluestore + db: /dev/vde + db_size: 3G diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_crypt.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_crypt.yaml new file mode 100644 index 0000000000000..3e08f56189ec2 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_crypt.yaml @@ -0,0 +1,21 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + encryption: dmcrypt + db: /dev/vde + db_size: 1G + /dev/vdc: + format: bluestore + encryption: dmcrypt + db: /dev/vde + db_size: 2G + /dev/vdd: + format: bluestore + encryption: dmcrypt + db: /dev/vde + db_size: 3G diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_mixed.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_mixed.yaml new file mode 100644 index 0000000000000..4f838bbff0add --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_mixed.yaml @@ -0,0 +1,17 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + db: /dev/vde + db_size: 1G + /dev/vdc: + format: bluestore + db: /dev/vde + db_size: 2G + /dev/vdd: + format: bluestore + db: /dev/vde diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_mixed_crypt.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_mixed_crypt.yaml new file mode 100644 index 0000000000000..4f2f60e36c819 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_db_sizes_mixed_crypt.yaml @@ -0,0 +1,20 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + encryption: dmcrypt + db: /dev/vde + db_size: 1G + /dev/vdc: + format: bluestore + encryption: dmcrypt + db: /dev/vde + db_size: 2G + /dev/vdd: + format: bluestore + encryption: dmcrypt + db: /dev/vde diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal.yaml new file mode 100644 index 0000000000000..7f6093df7a1ed --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal.yaml @@ -0,0 +1,15 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + wal: /dev/vde + /dev/vdc: + format: bluestore + wal: /dev/vde + /dev/vdd: + format: bluestore + wal: /dev/vde diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_crypt.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_crypt.yaml new file mode 100644 index 0000000000000..df5e5ebee189f --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_crypt.yaml @@ -0,0 +1,18 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + encryption: dmcrypt + wal: /dev/vde + /dev/vdc: + format: bluestore + encryption: dmcrypt + wal: /dev/vde + /dev/vdd: + format: bluestore + encryption: dmcrypt + wal: /dev/vde diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db.yaml new file mode 100644 index 0000000000000..61daf1a1c0315 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db.yaml @@ -0,0 +1,14 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + wal: /dev/vde + db: /dev/vdd + /dev/vdc: + format: bluestore + wal: /dev/vde + db: /dev/vdd diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_crypt.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_crypt.yaml new file mode 100644 index 0000000000000..07ea6bfc6332a --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_crypt.yaml @@ -0,0 +1,16 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + encryption: dmcrypt + wal: /dev/vde + db: /dev/vdd + /dev/vdc: + format: bluestore + encryption: dmcrypt + wal: /dev/vde + db: /dev/vdd diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_all.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_all.yaml new file mode 100644 index 0000000000000..8693a351d13e4 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_all.yaml @@ -0,0 +1,18 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + wal_size: 1G + wal: /dev/vde + db: /dev/vdd + db_size: 2G + /dev/vdc: + format: bluestore + wal: /dev/vde + db: /dev/vdd + wal_size: 3G + db_size: 4G diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_all_crypt.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_all_crypt.yaml new file mode 100644 index 0000000000000..a9c4aecb165ab --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_all_crypt.yaml @@ -0,0 +1,20 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + encryption: dmcrypt + wal_size: 1G + wal: /dev/vde + db: /dev/vdd + db_size: 2G + /dev/vdc: + format: bluestore + encryption: dmcrypt + wal: /dev/vde + db: /dev/vdd + wal_size: 3G + db_size: 4G diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_mixed.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_mixed.yaml new file mode 100644 index 0000000000000..c4f2e147e2530 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_mixed.yaml @@ -0,0 +1,16 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + wal: /dev/vde + db: /dev/vdd + /dev/vdc: + format: bluestore + wal: /dev/vde + db: /dev/vdd + wal_size: 3G + db_size: 4G diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_mixed_crypt.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_mixed_crypt.yaml new file mode 100644 index 0000000000000..9a1f408fb939c --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_db_sizes_mixed_crypt.yaml @@ -0,0 +1,18 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + encryption: dmcrypt + wal: /dev/vde + db: /dev/vdd + /dev/vdc: + format: bluestore + encryption: dmcrypt + wal: /dev/vde + db: /dev/vdd + wal_size: 3G + db_size: 4G diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes.yaml new file mode 100644 index 0000000000000..b22f89616e4d8 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes.yaml @@ -0,0 +1,18 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + wal_size: 1G + wal: /dev/vde + /dev/vdc: + format: bluestore + wal: /dev/vde + wal_size: 2G + /dev/vdd: + format: bluestore + wal: /dev/vde + wal_size: 3G diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_crypt.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_crypt.yaml new file mode 100644 index 0000000000000..b5c02df46ee62 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_crypt.yaml @@ -0,0 +1,21 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + encryption: dmcrypt + wal_size: 1G + wal: /dev/vde + /dev/vdc: + format: bluestore + encryption: dmcrypt + wal: /dev/vde + wal_size: 2G + /dev/vdd: + format: bluestore + encryption: dmcrypt + wal: /dev/vde + wal_size: 3G diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_mixed.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_mixed.yaml new file mode 100644 index 0000000000000..0897b6e01a315 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_mixed.yaml @@ -0,0 +1,17 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + wal_size: 1G + wal: /dev/vde + /dev/vdc: + format: bluestore + wal: /dev/vde + wal_size: 2G + /dev/vdd: + format: bluestore + wal: /dev/vde diff --git a/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_mixed_crypt.yaml b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_mixed_crypt.yaml new file mode 100644 index 0000000000000..f4c803382a082 --- /dev/null +++ b/qa/ceph_salt/storage-profiles/bs_dedicated_wal_sizes_mixed_crypt.yaml @@ -0,0 +1,20 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: bluestore + encryption: dmcrypt + wal_size: 1G + wal: /dev/vde + /dev/vdc: + format: bluestore + encryption: dmcrypt + wal: /dev/vde + wal_size: 2G + /dev/vdd: + format: bluestore + encryption: dmcrypt + wal: /dev/vde diff --git a/qa/ceph_salt/storage-profiles/fs_dedicated_journal.yaml b/qa/ceph_salt/storage-profiles/fs_dedicated_journal.yaml new file mode 100644 index 0000000000000..0b5b2513e119c --- /dev/null +++ b/qa/ceph_salt/storage-profiles/fs_dedicated_journal.yaml @@ -0,0 +1,15 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: filestore + journal: /dev/vde + /dev/vdc: + format: filestore + journal: /dev/vde + /dev/vdd: + format: filestore + journal: /dev/vde diff --git a/qa/ceph_salt/storage-profiles/fs_dedicated_journal_crypt.yaml b/qa/ceph_salt/storage-profiles/fs_dedicated_journal_crypt.yaml new file mode 100644 index 0000000000000..6c7d500a0f30a --- /dev/null +++ b/qa/ceph_salt/storage-profiles/fs_dedicated_journal_crypt.yaml @@ -0,0 +1,18 @@ +overrides: + deepsea: + storage_profile: + ceph: + storage: + osds: + /dev/vdb: + format: filestore + encryption: dmcrypt + journal: /dev/vde + /dev/vdc: + format: filestore + encryption: dmcrypt + journal: /dev/vde + /dev/vdd: + format: filestore + encryption: dmcrypt + journal: /dev/vde diff --git a/qa/suites/ceph_salt/.qa b/qa/suites/ceph_salt/.qa new file mode 120000 index 0000000000000..fea2489fdf6d9 --- /dev/null +++ b/qa/suites/ceph_salt/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier0/.qa b/qa/suites/ceph_salt/tier0/.qa new file mode 120000 index 0000000000000..fea2489fdf6d9 --- /dev/null +++ b/qa/suites/ceph_salt/tier0/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier0/salt/% b/qa/suites/ceph_salt/tier0/salt/% new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/suites/ceph_salt/tier0/salt/.qa b/qa/suites/ceph_salt/tier0/salt/.qa new file mode 120000 index 0000000000000..fea2489fdf6d9 --- /dev/null +++ b/qa/suites/ceph_salt/tier0/salt/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier0/salt/0-salt.yaml b/qa/suites/ceph_salt/tier0/salt/0-salt.yaml new file mode 120000 index 0000000000000..bd005a0c7f17a --- /dev/null +++ b/qa/suites/ceph_salt/tier0/salt/0-salt.yaml @@ -0,0 +1 @@ +.qa/ceph_salt/salt.yaml \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier0/salt/boilerplate b/qa/suites/ceph_salt/tier0/salt/boilerplate new file mode 120000 index 0000000000000..e27b9bd07b59e --- /dev/null +++ b/qa/suites/ceph_salt/tier0/salt/boilerplate @@ -0,0 +1 @@ +.qa/ceph_salt/boilerplate/ \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier0/salt/cluster/+ b/qa/suites/ceph_salt/tier0/salt/cluster/+ new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/suites/ceph_salt/tier0/salt/cluster/.qa b/qa/suites/ceph_salt/tier0/salt/cluster/.qa new file mode 120000 index 0000000000000..fea2489fdf6d9 --- /dev/null +++ b/qa/suites/ceph_salt/tier0/salt/cluster/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier0/salt/cluster/1disk.yaml b/qa/suites/ceph_salt/tier0/salt/cluster/1disk.yaml new file mode 120000 index 0000000000000..2cd0d749b692b --- /dev/null +++ b/qa/suites/ceph_salt/tier0/salt/cluster/1disk.yaml @@ -0,0 +1 @@ +.qa/ceph_salt/disks/1disk.yaml \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier0/salt/cluster/1node.yaml b/qa/suites/ceph_salt/tier0/salt/cluster/1node.yaml new file mode 120000 index 0000000000000..c4bd4d4b46f8b --- /dev/null +++ b/qa/suites/ceph_salt/tier0/salt/cluster/1node.yaml @@ -0,0 +1 @@ +.qa/ceph_salt/nodes/1node.yaml \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier0/salt/distros b/qa/suites/ceph_salt/tier0/salt/distros new file mode 120000 index 0000000000000..1afc3b4e827a2 --- /dev/null +++ b/qa/suites/ceph_salt/tier0/salt/distros @@ -0,0 +1 @@ +.qa/ceph_salt/distros/ \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier1/.qa b/qa/suites/ceph_salt/tier1/.qa new file mode 120000 index 0000000000000..fea2489fdf6d9 --- /dev/null +++ b/qa/suites/ceph_salt/tier1/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier1/health-ok/% b/qa/suites/ceph_salt/tier1/health-ok/% new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/suites/ceph_salt/tier1/health-ok/.qa b/qa/suites/ceph_salt/tier1/health-ok/.qa new file mode 120000 index 0000000000000..fea2489fdf6d9 --- /dev/null +++ b/qa/suites/ceph_salt/tier1/health-ok/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier1/health-ok/0-salt.yaml b/qa/suites/ceph_salt/tier1/health-ok/0-salt.yaml new file mode 120000 index 0000000000000..bd005a0c7f17a --- /dev/null +++ b/qa/suites/ceph_salt/tier1/health-ok/0-salt.yaml @@ -0,0 +1 @@ +.qa/ceph_salt/salt.yaml \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier1/health-ok/1-deploy-phase.yaml b/qa/suites/ceph_salt/tier1/health-ok/1-deploy-phase.yaml new file mode 120000 index 0000000000000..6a8e46ad6ad3f --- /dev/null +++ b/qa/suites/ceph_salt/tier1/health-ok/1-deploy-phase.yaml @@ -0,0 +1 @@ +.qa/ceph_salt/ceph_salt.yaml \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier1/health-ok/2-test-phase.yaml b/qa/suites/ceph_salt/tier1/health-ok/2-test-phase.yaml new file mode 100644 index 0000000000000..37774d1907a61 --- /dev/null +++ b/qa/suites/ceph_salt/tier1/health-ok/2-test-phase.yaml @@ -0,0 +1,4 @@ +tasks: + - exec: + mon.a: + - 'ceph -s' diff --git a/qa/suites/ceph_salt/tier1/health-ok/boilerplate b/qa/suites/ceph_salt/tier1/health-ok/boilerplate new file mode 120000 index 0000000000000..e27b9bd07b59e --- /dev/null +++ b/qa/suites/ceph_salt/tier1/health-ok/boilerplate @@ -0,0 +1 @@ +.qa/ceph_salt/boilerplate/ \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier1/health-ok/cluster/+ b/qa/suites/ceph_salt/tier1/health-ok/cluster/+ new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/qa/suites/ceph_salt/tier1/health-ok/cluster/.qa b/qa/suites/ceph_salt/tier1/health-ok/cluster/.qa new file mode 120000 index 0000000000000..fea2489fdf6d9 --- /dev/null +++ b/qa/suites/ceph_salt/tier1/health-ok/cluster/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier1/health-ok/cluster/4disks.yaml b/qa/suites/ceph_salt/tier1/health-ok/cluster/4disks.yaml new file mode 120000 index 0000000000000..27637a24b59ff --- /dev/null +++ b/qa/suites/ceph_salt/tier1/health-ok/cluster/4disks.yaml @@ -0,0 +1 @@ +.qa/ceph_salt/disks/4disks.yaml \ No newline at end of file diff --git a/qa/suites/ceph_salt/tier1/health-ok/cluster/roles.yaml b/qa/suites/ceph_salt/tier1/health-ok/cluster/roles.yaml new file mode 100644 index 0000000000000..87868730834a1 --- /dev/null +++ b/qa/suites/ceph_salt/tier1/health-ok/cluster/roles.yaml @@ -0,0 +1,2 @@ +roles: + - [client.salt_master, osd.0, osd.1, osd.2, osd.3, mon.a, mgr.x] diff --git a/qa/suites/ceph_salt/tier1/health-ok/distros b/qa/suites/ceph_salt/tier1/health-ok/distros new file mode 120000 index 0000000000000..d61a491ca4f6c --- /dev/null +++ b/qa/suites/ceph_salt/tier1/health-ok/distros @@ -0,0 +1 @@ +.qa/ceph_salt/distros \ No newline at end of file diff --git a/qa/suites/suse/tier0 b/qa/suites/suse/tier0 index 06233b177ec30..e3092f6dd4dc0 120000 --- a/qa/suites/suse/tier0 +++ b/qa/suites/suse/tier0 @@ -1 +1 @@ -../deepsea/tier0 \ No newline at end of file +../ceph_salt/tier0 \ No newline at end of file diff --git a/qa/suites/suse/tier1 b/qa/suites/suse/tier1 index 5be3d9a32a6c1..3419a5b5af400 120000 --- a/qa/suites/suse/tier1 +++ b/qa/suites/suse/tier1 @@ -1 +1 @@ -../deepsea/tier1 \ No newline at end of file +../ceph_salt/tier1 \ No newline at end of file diff --git a/qa/tasks/ceph_salt.py b/qa/tasks/ceph_salt.py new file mode 100644 index 0000000000000..9f5ac85a69fec --- /dev/null +++ b/qa/tasks/ceph_salt.py @@ -0,0 +1,368 @@ +''' +Task that deploys a Ceph cluster on all the nodes +using Ceph-salt +Linter: + flake8 --max-line-length=100 +''' +import logging +import argparse +import uuid + +from salt_manager import SaltManager +from scripts import Scripts +from teuthology import misc +from util import ( + introspect_roles, + remote_exec, + ) + +from teuthology.exceptions import ConfigError +from teuthology.task import Task +from teuthology.orchestra.daemon import DaemonGroup +from tasks.ceph import get_mons + +log = logging.getLogger(__name__) +ceph_salt_ctx = {} +reboot_tries = 50 + + +def anchored(log_message): + global ceph_salt_ctx + assert 'log_anchor' in ceph_salt_ctx, "ceph_salt_ctx not populated" + return "{}{}".format(ceph_salt_ctx['log_anchor'], log_message) + + +class CephSalt(Task): + """ + Deploy a Ceph cluster on all remotes using + Ceph-salt (https://github.com/SUSE/ceph-salt) + + Assumes a Salt cluster is already running (use the Salt task to achieve + this). + + This task understands the following config keys which apply to + this task and all its subtasks: + + log_anchor a string (default: "WWWW: ") which will precede + log messages emitted at key points during the + deployment + quiet_salt: + true suppress stderr on salt commands (the default) + false let salt commands spam the log + allow_reboots: + true Allow cluster nodes to be rebooted if needed (default) + false + deploy: + true Enable role deployment on ceph-salt (default) + false + repo: Ceph-salt repo for building it from source. If no repo + is provided Ceph-salt will be installed from packages + using zypper. + branch: Ceph-salt branch in case repo is provided. If no branch + is provided master is used by default. + + """ + err_prefix = "(ceph_salt task) " + + log_anchor_str = "WWWW: " + + def __init__(self, ctx, config): + super(CephSalt, self).__init__(ctx, config) + log.debug("beginning of constructor method") + if not ceph_salt_ctx: + self._populate_ceph_salt_context() + self.log_anchor = ceph_salt_ctx['log_anchor'] + introspect_roles(self.ctx, self.log, quiet=False) + self.ctx['roles'] = self.ctx.config['roles'] + self.log = log + self.reboots_explicitly_forbidden = not self.config.get( + "allow_reboots", True) + self.master_remote = ceph_salt_ctx['master_remote'] + self.quiet_salt = ceph_salt_ctx['quiet_salt'] + self.nodes = self.ctx['nodes'] + self.nodes_storage = self.ctx['nodes_storage'] + self.nodes_storage_only = self.ctx['nodes_storage_only'] + self.remotes = self.ctx['remotes'] + self.roles = self.ctx['roles'] + self.sm = ceph_salt_ctx['salt_manager_instance'] + self.role_types = self.ctx['role_types'] + self.remote_lookup_table = self.ctx['remote_lookup_table'] + self.ceph_salt_deploy = ceph_salt_ctx['deploy'] + self.cluster = self.config.get('cluster', 'ceph') + self.testdir = misc.get_testdir(self.ctx) + self.config['cephadm_mode'] = 'cephadm-package' + self.ctx.cephadm = 'cephadm' + self.ctx.daemons = DaemonGroup(use_cephadm=self.ctx.cephadm) + if not hasattr(self.ctx, 'ceph'): + self.ctx.ceph = {} + self.ctx.managers = {} + self.ctx.ceph[self.cluster] = argparse.Namespace() + self.ctx.ceph[self.cluster].thrashers = [] + self.scripts = Scripts(self.ctx, self.log) + self.bootstrap_remote = None + + def _install_ceph_salt(self): + ''' + Installs ceph-salt on master either from source if repo and/or + branch are provided in the suite yaml or from rpm if not + ''' + global ceph_salt_ctx + if ceph_salt_ctx['repo']: + if not ceph_salt_ctx['branch']: + self.scripts.run( + self.master_remote, + 'install_ceph_salt.sh', + args=ceph_salt_ctx['repo'] + ) + else: + self.scripts.run( + self.master_remote, + 'install_ceph_salt.sh', + args=[ceph_salt_ctx['repo'], ceph_salt_ctx['branch']] + ) + else: + self.scripts.run( + self.master_remote, + 'install_ceph_salt.sh' + ) + self.ctx.cluster.run(args='sudo systemctl restart salt-minion') + self.master_remote.sh("sudo systemctl restart salt-master") + + def _populate_ceph_salt_context(self): + global ceph_salt_ctx + ceph_salt_ctx['log_anchor'] = self.config.get( + 'log_anchor', self.log_anchor_str) + if not isinstance(ceph_salt_ctx['log_anchor'], str): + self.log.warning( + "log_anchor was set to non-string value ->{}<-, " + "changing to empty string" + .format(ceph_salt_ctx['log_anchor']) + ) + ceph_salt_ctx['log_anchor'] = '' + ceph_salt_ctx['deploy'] = self.config.get('deploy', True) + ceph_salt_ctx['quiet_salt'] = self.config.get('quiet_salt', True) + ceph_salt_ctx['salt_manager_instance'] = SaltManager(self.ctx) + ceph_salt_ctx['master_remote'] = ( + ceph_salt_ctx['salt_manager_instance'].master_remote + ) + ceph_salt_ctx['repo'] = self.config.get('repo', None) + ceph_salt_ctx['branch'] = self.config.get('branch', None) + + def _get_bootstrap_remote(self): + ''' + Get the bootstrap node that's one with 'mon' and 'mgr' roles + and will be used by ceph-salt for bootstraping the cluster and then + by cephadm to deploy the rest of the nodes + ''' + for host, roles in self.remote_lookup_table.items(): + # possibly use teuthology.is_type() here + if ("mon" in [r.split('.')[0] for r in roles] and + "mgr" in [r.split('.')[0] for r in roles]): + self.bootstrap_remote = self.remotes[host] + break + if not self.bootstrap_remote: + raise ConfigError("No possible bootstrap minion found." + " Please check the provided roles") + self.log.info("Bootstrap minion is: {}" + .format(self.bootstrap_remote.hostname)) + cluster_name = self.cluster + fsid = str(uuid.uuid1()) + self.log.info('Cluster fsid is %s' % fsid) + self.ctx.ceph[cluster_name].fsid = fsid + fsid = self.ctx.ceph[cluster_name].fsid + self.ctx.ceph[cluster_name].bootstrap_remote = self.bootstrap_remote + for roles in self.remote_lookup_table[self.bootstrap_remote.hostname]: + _, role, role_id = misc.split_role(roles) + if role == 'mon': + self.ctx.ceph[cluster_name].first_mon = role_id + break + for roles in self.remote_lookup_table[self.bootstrap_remote.hostname]: + _, role, role_id = misc.split_role(roles) + if role == 'mgr': + self.ctx.ceph[cluster_name].first_mgr = role_id + break + self.log.info('First mon is mon.%s on %s' % ( + self.ctx.ceph[cluster_name].first_mon, + self.bootstrap_remote.shortname)) + self.log.info('First mgr is mgr.%s on %s' % ( + self.ctx.ceph[cluster_name].first_mgr, + self.bootstrap_remote.shortname)) + + remotes_and_roles = self.ctx.cluster.remotes.items() + roles = [role_list for (remote, role_list) in remotes_and_roles] + ips = [host for (host, port) in + (remote.ssh.get_transport().getpeername() + for (remote, role_list) in remotes_and_roles)] + self.ctx.ceph[cluster_name].mons = get_mons( + roles, ips, self.cluster, + mon_bind_msgr2=self.config.get('mon_bind_msgr2', True), + mon_bind_addrvec=self.config.get('mon_bind_addrvec', True), + ) + log.info('Monitor IPs: %s' % self.ctx.ceph[cluster_name].mons) + + def _ceph_salt_bootstrap(self): + ''' + This function populates ceph-salt config according to the + configuration on the yaml files on the suite regarding node roles, + chrony server, dashboard credentials etc and then runs the cluster + deployment + ''' + registry_cache = "192.168.0.43:5000" + container_image = ("registry.suse.de/devel/storage/7.0/" + "containers/ses/7/ceph/ceph") + fsid = self.ctx.ceph[self.cluster].fsid + first_mon = self.ctx.ceph[self.cluster].first_mon + first_mgr = self.ctx.ceph[self.cluster].first_mgr + for host, _ in self.remote_lookup_table.items(): + self.master_remote.sh("sudo ceph-salt config /ceph_cluster/minions" + " add {}".format(host)) + self.master_remote.sh("sudo ceph-salt config " + "/ceph_cluster/roles/cephadm " + "add {}".format(host)) + self.master_remote.sh("sudo ceph-salt config " + "/ceph_cluster/roles/admin " + "add {}".format(host)) + if len(self.remote_lookup_table.keys()) <= 3: + self.master_remote.sh("sudo ceph-salt config " + "/cephadm_bootstrap/ceph_conf add global") + self.master_remote.sh("sudo ceph-salt config " + "/cephadm_bootstrap/ceph_conf/global set" + " \"osd crush chooseleaf type\" 0") + self.master_remote.sh("sudo ceph-salt config " + "/ceph_cluster/roles/bootstrap " + "set {}".format(self.bootstrap_remote.hostname)) + self.master_remote.sh("sudo ceph-salt config " + "/system_update/packages disable") + self.master_remote.sh("sudo ceph-salt config " + "/system_update/reboot disable") + self.master_remote.sh("sudo ceph-salt config /ssh/ generate") + self.master_remote.sh("sudo ceph-salt config /containers/registries " + "add prefix=registry.suse.de" + " location={} insecure=true" + .format(registry_cache)) + self.master_remote.sh("sudo ceph-salt config /containers/images/ceph " + "set {}".format(container_image)) + self.ctx.ceph[self.cluster].image = container_image + self.master_remote.sh("sudo ceph-salt " + "config /time_server/server_hostname set {}" + .format(self.master_remote.hostname)) + self.master_remote.sh("sudo ceph-salt config " + "/time_server/external_servers add" + " 0.pt.pool.ntp.org") + self.master_remote.sh("sudo ceph-salt config " + "/cephadm_bootstrap/advanced " + "set mon-id {}".format(first_mon)) + self.master_remote.sh("sudo ceph-salt config " + "/cephadm_bootstrap/advanced set " + "mgr-id {}".format(first_mgr)) + self.master_remote.sh("sudo ceph-salt config " + "/cephadm_bootstrap/advanced set " + "fsid {}".format(fsid)) + self.master_remote.sh("sudo ceph-salt config " + "/cephadm_bootstrap/dashboard/username " + "set admin") + self.master_remote.sh("sudo ceph-salt config " + "/cephadm_bootstrap/dashboard/password " + "set admin") + self.master_remote.sh("sudo ceph-salt config ls") + if self.ceph_salt_deploy: + self.master_remote.sh("sudo stdbuf -o0 ceph-salt -ldebug apply" + " --non-interactive") + self.ctx.ceph[self.cluster].bootstrapped = True + # register initial daemons + self.ctx.daemons.register_daemon( + self.bootstrap_remote, 'mon', first_mon, + cluster=self.cluster, + fsid=fsid, + logger=log.getChild('mon.' + first_mon), + wait=False, + started=True, + ) + self.ctx.daemons.register_daemon( + self.bootstrap_remote, 'mgr', first_mgr, + cluster=self.cluster, + fsid=fsid, + logger=log.getChild('mgr.' + first_mgr), + wait=False, + started=True, + ) + # fetch keys and configs + log.info('Fetching config...') + self.ctx.ceph[self.cluster].config_file = misc.get_file( + remote=self.bootstrap_remote, + path='/etc/ceph/{}.conf'.format(self.cluster)) + log.info('Fetching client.admin keyring...') + self.ctx.ceph[self.cluster].admin_keyring = misc.get_file( + remote=self.bootstrap_remote, + path='/etc/ceph/{}.client.admin.keyring'.format(self.cluster), + sudo=True) + log.info('Fetching mon keyring...') + self.ctx.ceph[self.cluster].mon_keyring = misc.get_file( + remote=self.bootstrap_remote, + path='/var/lib/ceph/%s/mon.%s/keyring' % (fsid, first_mon), + sudo=True) + + def __zypper_ps_with_possible_reboot(self): + if self.sm.all_minions_zypper_ps_requires_reboot(): + log_spec = "Detected updates requiring reboot" + self.log.warning(anchored(log_spec)) + if self.reboots_explicitly_forbidden: + self.log.info("Reboots explicitly forbidden in test " + "configuration: not rebooting") + self.log.warning("Processes using deleted files may " + "cause instability") + else: + self.log.warning(anchored("Rebooting the whole cluster now!")) + self.reboot_the_cluster_now(log_spec=log_spec) + assert not self.sm.all_minions_zypper_ps_requires_reboot(), \ + "No more updates requiring reboot anywhere "\ + "in the whole cluster" + + def reboot_the_cluster_now(self, log_spec=None): + global reboot_tries + if not log_spec: + log_spec = "all nodes reboot now" + cmd_str = "salt \\* cmd.run reboot" + if self.quiet_salt: + cmd_str += " 2> /dev/null" + remote_exec( + self.master_remote, + cmd_str, + self.log, + log_spec, + rerun=False, + quiet=True, + tries=reboot_tries, + ) + self.sm.ping_minions() + + def begin(self): + global ceph_salt_ctx + super(CephSalt, self).begin() + self._get_bootstrap_remote() + self._install_ceph_salt() + self.sm.ping_minions() + self.sm.all_minions_zypper_ref() + self.sm.all_minions_zypper_up_if_needed() + self.__zypper_ps_with_possible_reboot() + self.sm.sync_pillar_data(quiet=self.quiet_salt) + self._ceph_salt_bootstrap() + + def end(self): + self.log.debug("beginning of end method") + super(CephSalt, self).end() + success = self.ctx.summary.get('success', None) + if success is None: + self.log.warning("Problem with ctx summary key? ctx is {}" + .format(self.ctx)) + if not success: + self.ctx.cluster.run(args="rpm -qa | sort") + self.log.debug("end of end method") + + def teardown(self): + self.log.debug("beginning of teardown method") + super(CephSalt, self).teardown() + self.log.debug("end of teardown method") + + +task = CephSalt diff --git a/qa/tasks/salt_manager.py b/qa/tasks/salt_manager.py index f769f0fcdf748..28bf6eae3e23e 100644 --- a/qa/tasks/salt_manager.py +++ b/qa/tasks/salt_manager.py @@ -132,6 +132,16 @@ class SaltManager(object): self.all_minions_zypper_lu() self.all_minions_zypper_ps() + def all_minions_zypper_up_if_needed(self): + """ + List updates and perform update if needed + """ + zypper_lu = "sudo salt \\* cmd.run \'zypper lu || true\' 2>/dev/null" + zypper_up = "sudo zypper -n up || true" + output = self.master_remote.sh(zypper_lu) + if 'Nothing to do' not in output: + self.all_minions_cmd_run(zypper_up) + def cat_salt_master_conf(self): self.__cat_file_remote(self.master_remote, filename="/etc/salt/master") diff --git a/qa/tasks/scripts/drive_groups.sh b/qa/tasks/scripts/drive_groups.sh new file mode 100644 index 0000000000000..4165995702ca8 --- /dev/null +++ b/qa/tasks/scripts/drive_groups.sh @@ -0,0 +1,15 @@ +if [ -n $1 ] && [ -n $2 ] +then +cat >> /home/ubuntu/dg.yaml << EOF + placement: + service_type: osd + host_pattern: "$1"* + data_devices: + all: 'true' + limit: "$2" +EOF +else + echo "Either hostname or OSD count is missing" + return 1 +fi + diff --git a/qa/tasks/scripts/install_ceph_salt.sh b/qa/tasks/scripts/install_ceph_salt.sh new file mode 100644 index 0000000000000..33dc1551e0ff9 --- /dev/null +++ b/qa/tasks/scripts/install_ceph_salt.sh @@ -0,0 +1,16 @@ +set -ex + +REPO=$1 +BRANCH=${2:-"master"} +if [ -n "$REPO" ] +then + git clone $REPO + cd ceph-salt + zypper -n install autoconf gcc python3-devel python3-pip python3-curses + git checkout $BRANCH + pip install . + cp -r ceph-salt-formula/salt/* /srv/salt/ + chown -R salt:salt /srv +else + zypper -n install ceph-salt +fi -- 2.39.5