From 5c03b49c4da55cf8d0c679ecb2c58182e4d3361a Mon Sep 17 00:00:00 2001 From: =?utf8?q?Alfonso=20Mart=C3=ADnez?= Date: Fri, 2 Jul 2021 10:08:49 +0200 Subject: [PATCH] mgr/dashboard: run cephadm-backend e2e tests with KCLI MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Fixes: https://tracker.ceph.com/issues/51300 Signed-off-by: Alfonso Martínez --- doc/dev/developer_guide/dash-devel.rst | 20 +++++ .../dashboard/ci/cephadm/bootstrap-cluster.sh | 22 +++++ .../mgr/dashboard/ci/cephadm/ceph_cluster.yml | 40 +++++++++ .../ci/cephadm/run-cephadm-e2e-tests.sh | 81 +++++++++++++++++++ .../cypress/integration/cluster/hosts.po.ts | 5 +- .../workflow/01-hosts.e2e-spec.ts | 57 +++++++++++++ 6 files changed, 224 insertions(+), 1 deletion(-) create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml create mode 100755 src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh create mode 100644 src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-hosts.e2e-spec.ts diff --git a/doc/dev/developer_guide/dash-devel.rst b/doc/dev/developer_guide/dash-devel.rst index a568d977629..5c616ade37c 100644 --- a/doc/dev/developer_guide/dash-devel.rst +++ b/doc/dev/developer_guide/dash-devel.rst @@ -423,6 +423,26 @@ Note: When using docker, as your device, you might need to run the script with sudo permissions. +run-cephadm-e2e-tests.sh +......................... + +``run-cephadm-e2e-tests.sh`` runs a subset of E2E tests to verify that the Dashboard and cephadm as +Orchestrator backend behave correctly. + +Prerequisites: you need to install `KCLI +`_ in your local machine. + +Note: + This script is aimed to be run as jenkins job so the cleanup is triggered only in a jenkins + environment. In local, the user will shutdown the cluster when desired (i.e. after debugging). + +Start E2E tests by running:: + + $ cd + $ sudo chown -R $(id -un) src/pybind/mgr/dashboard/frontend/dist src/pybind/mgr/dashboard/frontend/node_modules + $ ./src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh + $ kcli delete plan -y ceph # After tests finish. + Other running options ..................... diff --git a/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh new file mode 100755 index 00000000000..f0de590252a --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +export PATH=/root/bin:$PATH +mkdir /root/bin +{% if ceph_dev_folder is defined %} + cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm +{% else %} + cd /root/bin + curl --silent --remote-name --location https://raw.githubusercontent.com/ceph/ceph/master/src/cephadm/cephadm +{% endif %} +chmod +x /root/bin/cephadm +mkdir -p /etc/ceph +mon_ip=$(ifconfig eth0 | grep 'inet ' | awk '{ print $2}') +{% if ceph_dev_folder is defined %} + cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate --shared_ceph_folder /mnt/{{ ceph_dev_folder }} +{% else %} + cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate +{% endif %} +fsid=$(cat /etc/ceph/ceph.conf | grep fsid | awk '{ print $3}') +{% for number in range(1, nodes) %} + ssh-copy-id -f -i /etc/ceph/ceph.pub -o StrictHostKeyChecking=no root@{{ prefix }}-node-0{{ number }}.{{ domain }} +{% endfor %} diff --git a/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml new file mode 100755 index 00000000000..80273bbfe5a --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml @@ -0,0 +1,40 @@ +parameters: + nodes: 3 + pool: default + network: default + domain: cephlab.com + prefix: ceph + numcpus: 1 + memory: 2048 + image: fedora34 + notify: false + admin_password: password + disks: + - 15 + - 5 + +{% for number in range(0, nodes) %} +{{ prefix }}-node-0{{ number }}: + image: {{ image }} + numcpus: {{ numcpus }} + memory: {{ memory }} + reserveip: true + reservedns: true + sharedkey: true + domain: {{ domain }} + nets: + - {{ network }} + disks: {{ disks }} + pool: {{ pool }} + {% if ceph_dev_folder is defined %} + sharedfolders: [{{ ceph_dev_folder }}] + {% endif %} + cmds: + - dnf -y install python3 chrony lvm2 podman + - sed -i "s/SELINUX=enforcing/SELINUX=permissive/" /etc/selinux/config + - setenforce 0 + {% if number == 0 %} + scripts: + - bootstrap-cluster.sh + {% endif %} +{% endfor %} diff --git a/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh new file mode 100755 index 00000000000..90bfa8d9ebb --- /dev/null +++ b/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +set -ex + +cleanup() { + if [[ -n "$JENKINS_HOME" ]]; then + printf "\n\nStarting cleanup...\n\n" + kcli delete plan -y ceph || true + sudo podman container prune -f + printf "\n\nCleanup completed.\n\n" + fi +} + +on_error() { + if [ "$1" != "0" ]; then + printf "\n\nERROR $1 thrown on line $2\n\n" + printf "\n\nCollecting info...\n\n" + for vm_id in 0 1 2 + do + local vm="ceph-node-0${vm_id}" + printf "\n\nDisplaying journalctl from VM ${vm}:\n\n" + kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true + printf "\n\nEnd of journalctl from VM ${vm}\n\n" + printf "\n\nDisplaying podman logs:\n\n" + kcli ssh -u root -- ${vm} 'podman logs --names --since 30s $(podman ps -aq)' || true + done + printf "\n\nTEST FAILED.\n\n" + fi +} + +trap 'on_error $? $LINENO' ERR +trap 'cleanup $? $LINENO' EXIT + +sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts + +: ${CEPH_DEV_FOLDER:=${PWD}} + +# Required to start dashboard. +cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend +NG_CLI_ANALYTICS=false npm ci +npm run build + +cd ${CEPH_DEV_FOLDER} +kcli delete plan -y ceph || true +kcli create plan -f ./src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml -P ceph_dev_folder=${CEPH_DEV_FOLDER} ceph + +while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "Dashboard is now available") ]]; do + sleep 30 + kcli list vm + # Uncomment for debugging purposes. + #kcli ssh -u root -- ceph-node-00 'podman ps -a' + #kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s $(podman ps -aq)' + kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init' +done + +cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend +npx cypress info + +: ${CYPRESS_BASE_URL:=''} +: ${CYPRESS_LOGIN_USER:='admin'} +: ${CYPRESS_LOGIN_PWD:='password'} +: ${CYPRESS_ARGS:=''} + +if [[ -z "${CYPRESS_BASE_URL}" ]]; then + CYPRESS_BASE_URL="https://$(kcli info vm ceph-node-00 -f ip -v | sed -e 's/[^0-9.]//'):8443" +fi + +export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD + +cypress_run () { + local specs="$1" + local timeout="$2" + local override_config="ignoreTestFiles=*.po.ts,retries=0,testFiles=${specs}" + + if [[ -n "$timeout" ]]; then + override_config="${override_config},defaultCommandTimeout=${timeout}" + fi + npx cypress run ${CYPRESS_ARGS} --browser chrome --headless --config "$override_config" +} + +cypress_run "orchestrator/workflow/*-spec.ts" diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/hosts.po.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/hosts.po.ts index 160242d82b0..6752fe9e787 100644 --- a/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/hosts.po.ts +++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/cluster/hosts.po.ts @@ -50,9 +50,12 @@ export class HostsPageHelper extends PageHelper { } @PageHelper.restrictTo(pages.create.url) - add(hostname: string, exist?: boolean) { + add(hostname: string, exist?: boolean, maintenance?: boolean) { cy.get(`${this.pages.create.id}`).within(() => { cy.get('#hostname').type(hostname); + if (maintenance) { + cy.get('label[for=maintenance]').click(); + } cy.get('cd-submit-button').click(); }); if (exist) { diff --git a/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-hosts.e2e-spec.ts b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-hosts.e2e-spec.ts new file mode 100644 index 00000000000..b1c8ad0bbc0 --- /dev/null +++ b/src/pybind/mgr/dashboard/frontend/cypress/integration/orchestrator/workflow/01-hosts.e2e-spec.ts @@ -0,0 +1,57 @@ +import { HostsPageHelper } from 'cypress/integration/cluster/hosts.po'; + +describe('Hosts page', () => { + const hosts = new HostsPageHelper(); + const hostnames = ['ceph-node-00.cephlab.com', 'ceph-node-01.cephlab.com']; + const addHost = (hostname: string, exist?: boolean, maintenance?: boolean) => { + hosts.navigateTo('create'); + hosts.add(hostname, exist, maintenance); + hosts.checkExist(hostname, true); + }; + + beforeEach(() => { + cy.login(); + Cypress.Cookies.preserveOnce('token'); + hosts.navigateTo(); + }); + + describe('when Orchestrator is available', () => { + it('should display inventory', function () { + hosts.clickHostTab(hostnames[0], 'Physical Disks'); + cy.get('cd-host-details').within(() => { + hosts.getTableCount('total').should('be.gte', 0); + }); + }); + + it('should display daemons', function () { + hosts.clickHostTab(hostnames[0], 'Daemons'); + cy.get('cd-host-details').within(() => { + hosts.getTableCount('total').should('be.gte', 0); + }); + }); + + it('should edit host labels', function () { + const labels = ['foo', 'bar']; + hosts.editLabels(hostnames[0], labels, true); + hosts.editLabels(hostnames[0], labels, false); + }); + + it('should not add an existing host', function () { + hosts.navigateTo('create'); + hosts.add(hostnames[0], true); + }); + + it('should add a host in maintenance mode', function () { + addHost(hostnames[1], false, true); + }); + + it('should delete a host and add it back', function () { + hosts.delete(hostnames[1]); + addHost(hostnames[1], false, true); + }); + + it('should exit host from maintenance', function () { + hosts.maintenance(hostnames[1], true); + }); + }); +}); -- 2.39.5