]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/dashboard: run cephadm-backend e2e tests with KCLI 41883/head
authorAlfonso Martínez <almartin@redhat.com>
Fri, 2 Jul 2021 08:08:49 +0000 (10:08 +0200)
committerAlfonso Martínez <almartin@redhat.com>
Fri, 2 Jul 2021 08:08:49 +0000 (10:08 +0200)
Fixes: https://tracker.ceph.com/issues/51300
Signed-off-by: Alfonso Martínez <almartin@redhat.com>
doc/dev/developer_guide/dash-devel.rst
src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh [new file with mode: 0755]
src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml [new file with mode: 0755]
src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh [new file with mode: 0755]
src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/hosts.po.ts
src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-hosts.e2e-spec.ts [new file with mode: 0644]

index a568d977629c0787ccc4e77b6a534a6d91528995..5c616ade37c9450a2feaee34c4a98992a363bd41 100644 (file)
@@ -423,6 +423,26 @@ Note:
   When using docker, as your device, you might need to run the script with sudo
   permissions.
 
+run-cephadm-e2e-tests.sh
+.........................
+
+``run-cephadm-e2e-tests.sh`` runs a subset of E2E tests to verify that the Dashboard and cephadm as
+Orchestrator backend behave correctly.
+
+Prerequisites: you need to install `KCLI
+<https://kcli.readthedocs.io/en/latest/>`_ in your local machine.
+
+Note:
+  This script is aimed to be run as jenkins job so the cleanup is triggered only in a jenkins
+  environment. In local, the user will shutdown the cluster when desired (i.e. after debugging).
+
+Start E2E tests by running::
+
+  $ cd <your/ceph/repo/dir>
+  $ sudo chown -R $(id -un) src/pybind/mgr/dashboard/frontend/dist src/pybind/mgr/dashboard/frontend/node_modules
+  $ ./src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
+  $ kcli delete plan -y ceph  # After tests finish.
+
 Other running options
 .....................
 
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh
new file mode 100755 (executable)
index 0000000..f0de590
--- /dev/null
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+export PATH=/root/bin:$PATH
+mkdir /root/bin
+{% if ceph_dev_folder is defined %}
+  cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm
+{% else %}
+  cd /root/bin
+  curl --silent --remote-name --location https://raw.githubusercontent.com/ceph/ceph/master/src/cephadm/cephadm
+{% endif %}
+chmod +x /root/bin/cephadm
+mkdir -p /etc/ceph
+mon_ip=$(ifconfig eth0  | grep 'inet ' | awk '{ print $2}')
+{% if ceph_dev_folder is defined %}
+  cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate --shared_ceph_folder /mnt/{{ ceph_dev_folder }}
+{% else %}
+  cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate
+{% endif %}
+fsid=$(cat /etc/ceph/ceph.conf | grep fsid | awk '{ print $3}')
+{% for number in range(1, nodes) %}
+  ssh-copy-id -f -i /etc/ceph/ceph.pub  -o StrictHostKeyChecking=no root@{{ prefix }}-node-0{{ number }}.{{ domain }}
+{% endfor %}
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml
new file mode 100755 (executable)
index 0000000..80273bb
--- /dev/null
@@ -0,0 +1,40 @@
+parameters:
+ nodes: 3
+ pool: default
+ network: default
+ domain: cephlab.com
+ prefix: ceph
+ numcpus: 1
+ memory: 2048
+ image: fedora34
+ notify: false
+ admin_password: password
+ disks:
+ - 15
+ - 5
+
+{% for number in range(0, nodes) %}
+{{ prefix }}-node-0{{ number }}:
+ image: {{ image }}
+ numcpus: {{ numcpus }}
+ memory: {{ memory }}
+ reserveip: true
+ reservedns: true
+ sharedkey: true
+ domain: {{ domain }}
+ nets:
+  - {{ network }}
+ disks: {{ disks }}
+ pool: {{ pool }}
+ {% if ceph_dev_folder is defined %}
+ sharedfolders: [{{ ceph_dev_folder }}]
+ {% endif %}
+ cmds:
+ - dnf -y install python3 chrony lvm2 podman
+ - sed -i "s/SELINUX=enforcing/SELINUX=permissive/" /etc/selinux/config
+ - setenforce 0
+ {% if number == 0 %}
+ scripts:
+  - bootstrap-cluster.sh
+ {% endif %}
+{% endfor %}
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
new file mode 100755 (executable)
index 0000000..90bfa8d
--- /dev/null
@@ -0,0 +1,81 @@
+#!/usr/bin/env bash
+
+set -ex
+
+cleanup() {
+    if [[ -n "$JENKINS_HOME" ]]; then
+        printf "\n\nStarting cleanup...\n\n"
+        kcli delete plan -y ceph || true
+        sudo podman container prune -f
+        printf "\n\nCleanup completed.\n\n"
+    fi
+}
+
+on_error() {
+    if [ "$1" != "0" ]; then
+        printf "\n\nERROR $1 thrown on line $2\n\n"
+        printf "\n\nCollecting info...\n\n"
+        for vm_id in 0 1 2
+        do
+            local vm="ceph-node-0${vm_id}"
+            printf "\n\nDisplaying journalctl from VM ${vm}:\n\n"
+            kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true
+            printf "\n\nEnd of journalctl from VM ${vm}\n\n"
+            printf "\n\nDisplaying podman logs:\n\n"
+            kcli ssh -u root -- ${vm} 'podman logs --names --since 30s $(podman ps -aq)' || true
+        done
+        printf "\n\nTEST FAILED.\n\n"
+    fi
+}
+
+trap 'on_error $? $LINENO' ERR
+trap 'cleanup $? $LINENO' EXIT
+
+sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
+
+: ${CEPH_DEV_FOLDER:=${PWD}}
+
+# Required to start dashboard.
+cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
+NG_CLI_ANALYTICS=false npm ci
+npm run build
+
+cd ${CEPH_DEV_FOLDER}
+kcli delete plan -y ceph || true
+kcli create plan -f ./src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml -P ceph_dev_folder=${CEPH_DEV_FOLDER} ceph
+
+while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "Dashboard is now available") ]]; do
+    sleep 30
+    kcli list vm
+    # Uncomment for debugging purposes.
+    #kcli ssh -u root -- ceph-node-00 'podman ps -a'
+    #kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s $(podman ps -aq)'
+    kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
+done
+
+cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
+npx cypress info
+
+: ${CYPRESS_BASE_URL:=''}
+: ${CYPRESS_LOGIN_USER:='admin'}
+: ${CYPRESS_LOGIN_PWD:='password'}
+: ${CYPRESS_ARGS:=''}
+
+if [[ -z "${CYPRESS_BASE_URL}" ]]; then
+    CYPRESS_BASE_URL="https://$(kcli info vm ceph-node-00 -f ip -v | sed -e 's/[^0-9.]//'):8443"
+fi
+
+export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD
+
+cypress_run () {
+    local specs="$1"
+    local timeout="$2"
+    local override_config="ignoreTestFiles=*.po.ts,retries=0,testFiles=${specs}"
+
+    if [[ -n "$timeout" ]]; then
+        override_config="${override_config},defaultCommandTimeout=${timeout}"
+    fi
+    npx cypress run ${CYPRESS_ARGS} --browser chrome --headless --config "$override_config"
+}
+
+cypress_run "orchestrator/workflow/*-spec.ts"
index 160242d82b04ea6fd70fd90b5a82770ffa1daf74..6752fe9e7870cb511c98f8ff9ff073137ceeeb6e 100644 (file)
@@ -50,9 +50,12 @@ export class HostsPageHelper extends PageHelper {
   }
 
   @PageHelper.restrictTo(pages.create.url)
-  add(hostname: string, exist?: boolean) {
+  add(hostname: string, exist?: boolean, maintenance?: boolean) {
     cy.get(`${this.pages.create.id}`).within(() => {
       cy.get('#hostname').type(hostname);
+      if (maintenance) {
+        cy.get('label[for=maintenance]').click();
+      }
       cy.get('cd-submit-button').click();
     });
     if (exist) {
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-hosts.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-hosts.e2e-spec.ts
new file mode 100644 (file)
index 0000000..b1c8ad0
--- /dev/null
@@ -0,0 +1,57 @@
+import { HostsPageHelper } from 'cypress/integration/cluster/hosts.po';
+
+describe('Hosts page', () => {
+  const hosts = new HostsPageHelper();
+  const hostnames = ['ceph-node-00.cephlab.com', 'ceph-node-01.cephlab.com'];
+  const addHost = (hostname: string, exist?: boolean, maintenance?: boolean) => {
+    hosts.navigateTo('create');
+    hosts.add(hostname, exist, maintenance);
+    hosts.checkExist(hostname, true);
+  };
+
+  beforeEach(() => {
+    cy.login();
+    Cypress.Cookies.preserveOnce('token');
+    hosts.navigateTo();
+  });
+
+  describe('when Orchestrator is available', () => {
+    it('should display inventory', function () {
+      hosts.clickHostTab(hostnames[0], 'Physical Disks');
+      cy.get('cd-host-details').within(() => {
+        hosts.getTableCount('total').should('be.gte', 0);
+      });
+    });
+
+    it('should display daemons', function () {
+      hosts.clickHostTab(hostnames[0], 'Daemons');
+      cy.get('cd-host-details').within(() => {
+        hosts.getTableCount('total').should('be.gte', 0);
+      });
+    });
+
+    it('should edit host labels', function () {
+      const labels = ['foo', 'bar'];
+      hosts.editLabels(hostnames[0], labels, true);
+      hosts.editLabels(hostnames[0], labels, false);
+    });
+
+    it('should not add an existing host', function () {
+      hosts.navigateTo('create');
+      hosts.add(hostnames[0], true);
+    });
+
+    it('should add a host in maintenance mode', function () {
+      addHost(hostnames[1], false, true);
+    });
+
+    it('should delete a host and add it back', function () {
+      hosts.delete(hostnames[1]);
+      addHost(hostnames[1], false, true);
+    });
+
+    it('should exit host from maintenance', function () {
+      hosts.maintenance(hostnames[1], true);
+    });
+  });
+});