From daf405ea5ba98a41e46638e1d7d9e1492c453349 Mon Sep 17 00:00:00 2001 From: John Mulligan Date: Sat, 10 Aug 2024 14:42:16 -0400 Subject: [PATCH] qa/suites/orch: add a pair of teuthology tests for ctdb smb clusters Signed-off-by: John Mulligan --- .../tasks/deploy_smb_mgr_ctdb_res_basic.yaml | 135 +++++++++++++++++ .../tasks/deploy_smb_mgr_ctdb_res_dom.yaml | 138 ++++++++++++++++++ 2 files changed, 273 insertions(+) create mode 100644 qa/suites/orch/cephadm/smb/tasks/deploy_smb_mgr_ctdb_res_basic.yaml create mode 100644 qa/suites/orch/cephadm/smb/tasks/deploy_smb_mgr_ctdb_res_dom.yaml diff --git a/qa/suites/orch/cephadm/smb/tasks/deploy_smb_mgr_ctdb_res_basic.yaml b/qa/suites/orch/cephadm/smb/tasks/deploy_smb_mgr_ctdb_res_basic.yaml new file mode 100644 index 00000000000..b9b0ec0d6f1 --- /dev/null +++ b/qa/suites/orch/cephadm/smb/tasks/deploy_smb_mgr_ctdb_res_basic.yaml @@ -0,0 +1,135 @@ +roles: +# Test is for basic smb deployment & functionality. one node cluster is OK +- - host.a + - mon.a + - mgr.x + - osd.0 + - osd.1 + - client.0 +- - host.b + - mon.b + - osd.2 + - osd.3 +- - host.c + - mon.c + - osd.4 + - osd.5 +# Reserve a host for acting as an smb client +- - host.d + - cephadm.exclude +overrides: + ceph: + log-only-match: + - CEPHADM_ +tasks: +- cephadm.configure_samba_client_container: + role: host.d +- cephadm: + +- cephadm.shell: + host.a: + - ceph fs volume create cephfs +- cephadm.wait_for_service: + service: mds.cephfs + +- cephadm.shell: + host.a: + # add subvolgroup & subvolumes for test + - cmd: ceph fs subvolumegroup create cephfs smb + - cmd: ceph fs subvolume create cephfs sv1 --group-name=smb --mode=0777 + - cmd: ceph fs subvolume create cephfs sv2 --group-name=smb --mode=0777 + # set up smb cluster and shares + - cmd: ceph mgr module enable smb + # TODO: replace sleep with poll of mgr state? + - cmd: sleep 30 + - cmd: ceph smb apply -i - + stdin: | + # --- Begin Embedded YAML + - resource_type: ceph.smb.cluster + cluster_id: uctdb1 + auth_mode: user + user_group_settings: + - {source_type: resource, ref: ug1} + placement: + count: 3 + - resource_type: ceph.smb.usersgroups + users_groups_id: ug1 + values: + users: + - {name: user1, password: t3stP4ss1} + - {name: user2, password: t3stP4ss2} + groups: [] + - resource_type: ceph.smb.share + cluster_id: uctdb1 + share_id: share1 + cephfs: + volume: cephfs + subvolumegroup: smb + subvolume: sv1 + path: / + - resource_type: ceph.smb.share + cluster_id: uctdb1 + share_id: share2 + cephfs: + volume: cephfs + subvolumegroup: smb + subvolume: sv2 + path: / + # --- End Embedded YAML +# Wait for the smb service to start +- cephadm.wait_for_service: + service: smb.uctdb1 +# Since this is a true cluster there should be a clustermeta in rados +- cephadm.shell: + host.a: + - cmd: rados --pool=.smb -N uctdb1 get cluster.meta.json /dev/stdout + +# Check if shares exist +- cephadm.exec: + host.d: + - sleep 30 + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user1%t3stP4ss1 //{{'host.a'|role_to_remote|attr('ip_address')}}/share1 -c ls" + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user2%t3stP4ss2 //{{'host.a'|role_to_remote|attr('ip_address')}}/share2 -c ls" + +# verify CTDB is healthy, cluster well formed +- cephadm.exec: + host.a: + - "{{ctx.cephadm}} ls --no-detail | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith(\"smb.uctdb1\")))[-1].name' > /tmp/svcname" + - "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status" + - cat /tmp/ctdb_status + - grep 'pnn:0 .*OK' /tmp/ctdb_status + - grep 'pnn:1 .*OK' /tmp/ctdb_status + - grep 'pnn:2 .*OK' /tmp/ctdb_status + - grep 'Number of nodes:3' /tmp/ctdb_status + - rm -rf /tmp/svcname /tmp/ctdb_status + +# Test a different host in the cluster +- cephadm.exec: + host.d: + - sleep 30 + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user1%t3stP4ss1 //{{'host.c'|role_to_remote|attr('ip_address')}}/share1 -c ls" + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U user2%t3stP4ss2 //{{'host.c'|role_to_remote|attr('ip_address')}}/share2 -c ls" + +- cephadm.shell: + host.a: + - cmd: ceph smb apply -i - + stdin: | + # --- Begin Embedded YAML + - resource_type: ceph.smb.cluster + cluster_id: uctdb1 + intent: removed + - resource_type: ceph.smb.usersgroups + users_groups_id: ug1 + intent: removed + - resource_type: ceph.smb.share + cluster_id: uctdb1 + share_id: share1 + intent: removed + - resource_type: ceph.smb.share + cluster_id: uctdb1 + share_id: share2 + intent: removed + # --- End Embedded YAML +# Wait for the smb service to be removed +- cephadm.wait_for_service_not_present: + service: smb.uctdb1 diff --git a/qa/suites/orch/cephadm/smb/tasks/deploy_smb_mgr_ctdb_res_dom.yaml b/qa/suites/orch/cephadm/smb/tasks/deploy_smb_mgr_ctdb_res_dom.yaml new file mode 100644 index 00000000000..b74593058e2 --- /dev/null +++ b/qa/suites/orch/cephadm/smb/tasks/deploy_smb_mgr_ctdb_res_dom.yaml @@ -0,0 +1,138 @@ +roles: +# Test is for basic smb deployment & functionality. one node cluster is OK +- - host.a + - mon.a + - mgr.x + - osd.0 + - osd.1 + - client.0 +- - host.b + - mon.b + - osd.2 + - osd.3 +- - host.c + - mon.c + - osd.4 + - osd.5 +# Reserve a host for acting as a domain controller and smb client +- - host.d + - cephadm.exclude +overrides: + ceph: + log-only-match: + - CEPHADM_ +tasks: +- cephadm.deploy_samba_ad_dc: + role: host.d +- cephadm: + +- cephadm.shell: + host.a: + - ceph fs volume create cephfs +- cephadm.wait_for_service: + service: mds.cephfs + +- cephadm.shell: + host.a: + # add subvolgroup & subvolumes for test + - cmd: ceph fs subvolumegroup create cephfs smb + - cmd: ceph fs subvolume create cephfs sv1 --group-name=smb --mode=0777 + - cmd: ceph fs subvolume create cephfs sv2 --group-name=smb --mode=0777 + # set up smb cluster and shares + - cmd: ceph mgr module enable smb + # TODO: replace sleep with poll of mgr state? + - cmd: sleep 30 + - cmd: ceph smb apply -i - + stdin: | + # --- Begin Embedded YAML + - resource_type: ceph.smb.cluster + cluster_id: adctdb1 + auth_mode: active-directory + domain_settings: + realm: DOMAIN1.SINK.TEST + join_sources: + - source_type: resource + ref: join1-admin + custom_dns: + - "{{ctx.samba_ad_dc_ip}}" + placement: + count: 3 + - resource_type: ceph.smb.join.auth + auth_id: join1-admin + auth: + username: Administrator + password: Passw0rd + - resource_type: ceph.smb.share + cluster_id: adctdb1 + share_id: share1 + cephfs: + volume: cephfs + subvolumegroup: smb + subvolume: sv1 + path: / + - resource_type: ceph.smb.share + cluster_id: adctdb1 + share_id: share2 + cephfs: + volume: cephfs + subvolumegroup: smb + subvolume: sv2 + path: / + # --- End Embedded YAML +# Wait for the smb service to start +- cephadm.wait_for_service: + service: smb.adctdb1 +# Since this is a true cluster there should be a clustermeta in rados +- cephadm.shell: + host.a: + - cmd: rados --pool=.smb -N adctdb1 get cluster.meta.json /dev/stdout + +# Check if shares exist +- cephadm.exec: + host.d: + - sleep 30 + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share1 -c ls" + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{'host.a'|role_to_remote|attr('ip_address')}}/share2 -c ls" + +# verify CTDB is healthy, cluster well formed +- cephadm.exec: + host.a: + - "{{ctx.cephadm}} ls --no-detail | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith(\"smb.adctdb1\")))[-1].name' > /tmp/svcname" + - "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status" + - cat /tmp/ctdb_status + - grep 'pnn:0 .*OK' /tmp/ctdb_status + - grep 'pnn:1 .*OK' /tmp/ctdb_status + - grep 'pnn:2 .*OK' /tmp/ctdb_status + - grep 'Number of nodes:3' /tmp/ctdb_status + - rm -rf /tmp/svcname /tmp/ctdb_status + +# Test a different host in the cluster +- cephadm.exec: + host.d: + - sleep 30 + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{'host.c'|role_to_remote|attr('ip_address')}}/share1 -c ls" + - "{{ctx.samba_client_container_cmd|join(' ')}} smbclient -U DOMAIN1\\\\ckent%1115Rose. //{{'host.c'|role_to_remote|attr('ip_address')}}/share2 -c ls" + +- cephadm.shell: + host.a: + - cmd: ceph smb apply -i - + stdin: | + # --- Begin Embedded YAML + - resource_type: ceph.smb.cluster + cluster_id: adctdb1 + intent: removed + - resource_type: ceph.smb.join.auth + auth_id: join1-admin + intent: removed + - resource_type: ceph.smb.share + cluster_id: adctdb1 + share_id: share1 + intent: removed + - resource_type: ceph.smb.share + cluster_id: adctdb1 + share_id: share2 + intent: removed + # --- End Embedded YAML +# Wait for the smb service to be removed +- cephadm.wait_for_service_not_present: + service: smb.adctdb1 -- 2.39.5