When using docker, as your device, you might need to run the script with sudo
permissions.
+run-cephadm-e2e-tests.sh
+.........................
+
+``run-cephadm-e2e-tests.sh`` runs a subset of E2E tests to verify that the Dashboard and cephadm as
+Orchestrator backend behave correctly.
+
+Prerequisites: you need to install `KCLI
+<https://kcli.readthedocs.io/en/latest/>`_ in your local machine.
+
+Note:
+ This script is aimed to be run as jenkins job so the cleanup is triggered only in a jenkins
+ environment. In local, the user will shutdown the cluster when desired (i.e. after debugging).
+
+Start E2E tests by running::
+
+ $ cd <your/ceph/repo/dir>
+ $ sudo chown -R $(id -un) src/pybind/mgr/dashboard/frontend/dist src/pybind/mgr/dashboard/frontend/node_modules
+ $ ./src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
+ $ kcli delete plan -y ceph # After tests finish.
+
Other running options
.....................
--- /dev/null
+#!/usr/bin/env bash
+
+export PATH=/root/bin:$PATH
+mkdir /root/bin
+{% if ceph_dev_folder is defined %}
+ cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm
+{% else %}
+ cd /root/bin
+ curl --silent --remote-name --location https://raw.githubusercontent.com/ceph/ceph/master/src/cephadm/cephadm
+{% endif %}
+chmod +x /root/bin/cephadm
+mkdir -p /etc/ceph
+mon_ip=$(ifconfig eth0 | grep 'inet ' | awk '{ print $2}')
+{% if ceph_dev_folder is defined %}
+ cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate --shared_ceph_folder /mnt/{{ ceph_dev_folder }}
+{% else %}
+ cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate
+{% endif %}
+fsid=$(cat /etc/ceph/ceph.conf | grep fsid | awk '{ print $3}')
+{% for number in range(1, nodes) %}
+ ssh-copy-id -f -i /etc/ceph/ceph.pub -o StrictHostKeyChecking=no root@{{ prefix }}-node-0{{ number }}.{{ domain }}
+{% endfor %}
--- /dev/null
+parameters:
+ nodes: 3
+ pool: default
+ network: default
+ domain: cephlab.com
+ prefix: ceph
+ numcpus: 1
+ memory: 2048
+ image: fedora34
+ notify: false
+ admin_password: password
+ disks:
+ - 15
+ - 5
+
+{% for number in range(0, nodes) %}
+{{ prefix }}-node-0{{ number }}:
+ image: {{ image }}
+ numcpus: {{ numcpus }}
+ memory: {{ memory }}
+ reserveip: true
+ reservedns: true
+ sharedkey: true
+ domain: {{ domain }}
+ nets:
+ - {{ network }}
+ disks: {{ disks }}
+ pool: {{ pool }}
+ {% if ceph_dev_folder is defined %}
+ sharedfolders: [{{ ceph_dev_folder }}]
+ {% endif %}
+ cmds:
+ - dnf -y install python3 chrony lvm2 podman
+ - sed -i "s/SELINUX=enforcing/SELINUX=permissive/" /etc/selinux/config
+ - setenforce 0
+ {% if number == 0 %}
+ scripts:
+ - bootstrap-cluster.sh
+ {% endif %}
+{% endfor %}
--- /dev/null
+#!/usr/bin/env bash
+
+set -ex
+
+cleanup() {
+ if [[ -n "$JENKINS_HOME" ]]; then
+ printf "\n\nStarting cleanup...\n\n"
+ kcli delete plan -y ceph || true
+ sudo podman container prune -f
+ printf "\n\nCleanup completed.\n\n"
+ fi
+}
+
+on_error() {
+ if [ "$1" != "0" ]; then
+ printf "\n\nERROR $1 thrown on line $2\n\n"
+ printf "\n\nCollecting info...\n\n"
+ for vm_id in 0 1 2
+ do
+ local vm="ceph-node-0${vm_id}"
+ printf "\n\nDisplaying journalctl from VM ${vm}:\n\n"
+ kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true
+ printf "\n\nEnd of journalctl from VM ${vm}\n\n"
+ printf "\n\nDisplaying podman logs:\n\n"
+ kcli ssh -u root -- ${vm} 'podman logs --names --since 30s $(podman ps -aq)' || true
+ done
+ printf "\n\nTEST FAILED.\n\n"
+ fi
+}
+
+trap 'on_error $? $LINENO' ERR
+trap 'cleanup $? $LINENO' EXIT
+
+sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
+
+: ${CEPH_DEV_FOLDER:=${PWD}}
+
+# Required to start dashboard.
+cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
+NG_CLI_ANALYTICS=false npm ci
+npm run build
+
+cd ${CEPH_DEV_FOLDER}
+kcli delete plan -y ceph || true
+kcli create plan -f ./src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml -P ceph_dev_folder=${CEPH_DEV_FOLDER} ceph
+
+while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "Dashboard is now available") ]]; do
+ sleep 30
+ kcli list vm
+ # Uncomment for debugging purposes.
+ #kcli ssh -u root -- ceph-node-00 'podman ps -a'
+ #kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s $(podman ps -aq)'
+ kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
+done
+
+cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
+npx cypress info
+
+: ${CYPRESS_BASE_URL:=''}
+: ${CYPRESS_LOGIN_USER:='admin'}
+: ${CYPRESS_LOGIN_PWD:='password'}
+: ${CYPRESS_ARGS:=''}
+
+if [[ -z "${CYPRESS_BASE_URL}" ]]; then
+ CYPRESS_BASE_URL="https://$(kcli info vm ceph-node-00 -f ip -v | sed -e 's/[^0-9.]//'):8443"
+fi
+
+export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD
+
+cypress_run () {
+ local specs="$1"
+ local timeout="$2"
+ local override_config="ignoreTestFiles=*.po.ts,retries=0,testFiles=${specs}"
+
+ if [[ -n "$timeout" ]]; then
+ override_config="${override_config},defaultCommandTimeout=${timeout}"
+ fi
+ npx cypress run ${CYPRESS_ARGS} --browser chrome --headless --config "$override_config"
+}
+
+cypress_run "orchestrator/workflow/*-spec.ts"
}
@PageHelper.restrictTo(pages.create.url)
- add(hostname: string, exist?: boolean) {
+ add(hostname: string, exist?: boolean, maintenance?: boolean) {
cy.get(`${this.pages.create.id}`).within(() => {
cy.get('#hostname').type(hostname);
+ if (maintenance) {
+ cy.get('label[for=maintenance]').click();
+ }
cy.get('cd-submit-button').click();
});
if (exist) {
--- /dev/null
+import { HostsPageHelper } from 'cypress/integration/cluster/hosts.po';
+
+describe('Hosts page', () => {
+ const hosts = new HostsPageHelper();
+ const hostnames = ['ceph-node-00.cephlab.com', 'ceph-node-01.cephlab.com'];
+ const addHost = (hostname: string, exist?: boolean, maintenance?: boolean) => {
+ hosts.navigateTo('create');
+ hosts.add(hostname, exist, maintenance);
+ hosts.checkExist(hostname, true);
+ };
+
+ beforeEach(() => {
+ cy.login();
+ Cypress.Cookies.preserveOnce('token');
+ hosts.navigateTo();
+ });
+
+ describe('when Orchestrator is available', () => {
+ it('should display inventory', function () {
+ hosts.clickHostTab(hostnames[0], 'Physical Disks');
+ cy.get('cd-host-details').within(() => {
+ hosts.getTableCount('total').should('be.gte', 0);
+ });
+ });
+
+ it('should display daemons', function () {
+ hosts.clickHostTab(hostnames[0], 'Daemons');
+ cy.get('cd-host-details').within(() => {
+ hosts.getTableCount('total').should('be.gte', 0);
+ });
+ });
+
+ it('should edit host labels', function () {
+ const labels = ['foo', 'bar'];
+ hosts.editLabels(hostnames[0], labels, true);
+ hosts.editLabels(hostnames[0], labels, false);
+ });
+
+ it('should not add an existing host', function () {
+ hosts.navigateTo('create');
+ hosts.add(hostnames[0], true);
+ });
+
+ it('should add a host in maintenance mode', function () {
+ addHost(hostnames[1], false, true);
+ });
+
+ it('should delete a host and add it back', function () {
+ hosts.delete(hostnames[1]);
+ addHost(hostnames[1], false, true);
+ });
+
+ it('should exit host from maintenance', function () {
+ hosts.maintenance(hostnames[1], true);
+ });
+ });
+});