]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/suites/orch: add a test to verify ctdb_status doesn't include GONE node 61158/head
authorAvan Thakkar <athakkar@redhat.com>
Thu, 9 Jan 2025 15:40:16 +0000 (21:10 +0530)
committerAvan Thakkar <athakkar@redhat.com>
Tue, 4 Mar 2025 12:56:51 +0000 (18:26 +0530)
Signed-off-by: Avan Thakkar <athakkar@redhat.com>
qa/suites/orch/cephadm/smb/tasks/deploy_smb_ctdb_node_gone_state.yaml [new file with mode: 0644]

diff --git a/qa/suites/orch/cephadm/smb/tasks/deploy_smb_ctdb_node_gone_state.yaml b/qa/suites/orch/cephadm/smb/tasks/deploy_smb_ctdb_node_gone_state.yaml
new file mode 100644 (file)
index 0000000..0d862b2
--- /dev/null
@@ -0,0 +1,100 @@
+roles:
+# Test is for basic smb deployment & functionality. one node cluster is OK
+- - host.a
+  - mon.a
+  - mgr.x
+  - osd.0
+  - osd.1
+  - client.0
+- - host.b
+  - mon.b
+  - osd.2
+  - osd.3
+- - host.c
+  - mon.c
+  - osd.4
+  - osd.5
+# Reserve a host for acting as a domain controller and smb client
+- - host.d
+  - cephadm.exclude
+overrides:
+  ceph:
+    log-only-match:
+      - CEPHADM_
+tasks:
+- cephadm.configure_samba_client_container:
+    role: host.d
+- cephadm:
+
+- cephadm.shell:
+    host.a:
+      - ceph fs volume create cephfs
+- cephadm.wait_for_service:
+    service: mds.cephfs
+
+- cephadm.shell:
+    host.a:
+      # add subvolgroup & subvolumes for test
+      - cmd: ceph fs subvolumegroup create cephfs smb
+      - cmd: ceph fs subvolume create cephfs sv1 --group-name=smb --mode=0777
+      # set up smb cluster and shares
+      - cmd: ceph mgr module enable smb
+      - cmd: sleep 30
+      - cmd: >
+          ceph smb cluster create modusr1 user
+          --define-user-pass=user1%t3stP4ss1
+          --placement=count:3
+          --clustering=default
+      - cmd: ceph smb share create modusr1 share1 cephfs / --subvolume=smb/sv1
+# Wait for the smb service to start
+- cephadm.wait_for_service:
+    service: smb.modusr1
+
+# verify CTDB is healthy, cluster well formed
+- cephadm.exec:
+    host.a:
+      - sleep 30
+      - "{{ctx.cephadm}} ls --no-detail  | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith(\"smb.modusr1\")))[-1].name' > /tmp/svcname"
+      - "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb listnodes > /tmp/ctdb_listnodes"
+      - "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status"
+      - cat /tmp/ctdb_status
+      - cat /tmp/ctdb_listnodes
+      - grep 'pnn:0 .*OK' /tmp/ctdb_status
+      - grep 'pnn:1 .*OK' /tmp/ctdb_status
+      - grep 'pnn:2 .*OK' /tmp/ctdb_status
+      - grep 'Number of nodes:3' /tmp/ctdb_status
+      - rm -rf /tmp/svcname /tmp/ctdb_status
+
+# Modify the placement to remove a node
+- cephadm.shell:
+    host.a:
+      - |
+        ceph orch ls --service-name smb.modusr1 --export --format json > /tmp/smb_json
+        jq '.[0] | .placement.count = 2' /tmp/smb_json > /tmp/smb_json_new
+        ceph orch apply -i /tmp/smb_json_new
+        rm -f /tmp/smb_json /tmp/smb_json_new
+
+# Wait for the smb service to restart
+- cephadm.wait_for_service:
+    service: smb.modusr1
+
+# verify CTDB status doesn't include the node that was removed
+- cephadm.exec:
+    host.a:
+      - "{{ctx.cephadm}} ls --no-detail  | {{ctx.cephadm}} shell jq -r 'map(select(.name | startswith(\"smb.modusr1\")))[-1].name' > /tmp/svcname"
+      - "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb listnodes > /tmp/ctdb_listnodes"
+      - "{{ctx.cephadm}} enter -n $(cat /tmp/svcname) ctdb status > /tmp/ctdb_status"
+      - cat /tmp/ctdb_status
+      - cat /tmp/ctdb_listnodes
+      - grep 'pnn:0 .*OK' /tmp/ctdb_status
+      - grep 'pnn:1 .*OK' /tmp/ctdb_status
+      - grep 'Number of nodes:3 (including 1 deleted nodes)' /tmp/ctdb_status
+      - rm -rf /tmp/svcname /tmp/ctdb_status
+
+- cephadm.shell:
+    host.a:
+      - cmd: ceph smb share rm modusr1 share1
+      - cmd: ceph smb cluster rm modusr1
+# Wait for the smb service to be removed
+- cephadm.wait_for_service_not_present:
+    service: smb.modusr1