]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/dashboard: cephadm-e2e script: improvements 42586/head
authorAlfonso Martínez <almartin@redhat.com>
Mon, 2 Aug 2021 12:34:20 +0000 (14:34 +0200)
committerAlfonso Martínez <almartin@redhat.com>
Mon, 2 Aug 2021 12:34:20 +0000 (14:34 +0200)
Improvements and some adaptations related to the jenkins job.

Fixes: https://tracker.ceph.com/issues/51612
Signed-off-by: Alfonso Martínez <almartin@redhat.com>
(cherry picked from commit 65b75000b7694cb3cbe617bbec28c513a2522be8)

    Conflicts:
        doc/dev/developer_guide/dash-devel.rst
        - Put changes in HACKING.rst as this file does not exist in the octopus branch.
        src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh
        - Resolve conflict originated by code that is deleted anyway.

Signed-off-by: Alfonso Martínez <almartin@redhat.com>
src/pybind/mgr/dashboard/HACKING.rst
src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh
src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml
src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh [new file with mode: 0755]

index cd841d20797ef4537e237a77762b1ca578823428..b50182c85ef3ca2fc3512558ab086bb4a5b79a69 100644 (file)
@@ -190,7 +190,14 @@ run-cephadm-e2e-tests.sh
 Orchestrator backend behave correctly.
 
 Prerequisites: you need to install `KCLI
-<https://kcli.readthedocs.io/en/latest/>`_ in your local machine.
+<https://kcli.readthedocs.io/en/latest/>`_ and Node.js in your local machine.
+
+Configure KCLI plan requirements::
+
+  $ sudo chown -R $(id -un) /var/lib/libvirt/images
+  $ mkdir -p /var/lib/libvirt/images/ceph-dashboard dashboard
+  $ kcli create pool -p /var/lib/libvirt/images/ceph-dashboard dashboard
+  $ kcli create network -c 192.168.100.0/24 dashboard
 
 Note:
   This script is aimed to be run as jenkins job so the cleanup is triggered only in a jenkins
@@ -199,10 +206,17 @@ Note:
 Start E2E tests by running::
 
   $ cd <your/ceph/repo/dir>
-  $ sudo chown -R $(id -un) src/pybind/mgr/dashboard/frontend/dist src/pybind/mgr/dashboard/frontend/node_modules
+  $ sudo chown -R $(id -un) src/pybind/mgr/dashboard/frontend/{dist,node_modules,src/environments}
   $ ./src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
   $ kcli delete plan -y ceph  # After tests finish.
 
+You can also start a cluster in development mode and later run E2E tests by running::
+
+  $ ./src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh --dev-mode
+  $ # Work on your feature, bug fix, ...
+  $ ./src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh
+  $ # Remember to kill the npm build watch process i.e.: pkill -f "ng build"
+
 Other running options
 .....................
 
index fd836f7378e92a429529f5bedaa41b00715c42d9..af7ef81f43f3f5d182288e57e5d7d194aa79bee3 100755 (executable)
@@ -2,21 +2,14 @@
 
 export PATH=/root/bin:$PATH
 mkdir /root/bin
-{% if ceph_dev_folder is defined %}
-  cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm
-{% else %}
-  cd /root/bin
-  curl --silent --remote-name --location https://raw.githubusercontent.com/ceph/ceph/octopus/src/cephadm/cephadm
-{% endif %}
+
+cp /mnt/{{ ceph_dev_folder }}/src/cephadm/cephadm /root/bin/cephadm
 chmod +x /root/bin/cephadm
 mkdir -p /etc/ceph
 mon_ip=$(ifconfig eth0  | grep 'inet ' | awk '{ print $2}')
-{% if ceph_dev_folder is defined %}
-  cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate --shared_ceph_folder /mnt/{{ ceph_dev_folder }}
-{% else %}
-  cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate
-{% endif %}
-fsid=$(cat /etc/ceph/ceph.conf | grep fsid | awk '{ print $3}')
+
+cephadm bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --allow-fqdn-hostname --dashboard-password-noupdate --shared_ceph_folder /mnt/{{ ceph_dev_folder }}
+
 {% for number in range(1, nodes) %}
   ssh-copy-id -f -i /etc/ceph/ceph.pub  -o StrictHostKeyChecking=no root@{{ prefix }}-node-0{{ number }}.{{ domain }}
 {% endfor %}
index 80273bbfe5ace5599fcdf54b541944b0e0faec01..60440972360ed5e74c5900ea146ab15e1e7976b9 100755 (executable)
@@ -1,7 +1,7 @@
 parameters:
  nodes: 3
- pool: default
- network: default
+ pool: ceph-dashboard
+ network: ceph-dashboard
  domain: cephlab.com
  prefix: ceph
  numcpus: 1
@@ -26,15 +26,14 @@ parameters:
   - {{ network }}
  disks: {{ disks }}
  pool: {{ pool }}
- {% if ceph_dev_folder is defined %}
  sharedfolders: [{{ ceph_dev_folder }}]
- {% endif %}
+ files:
+  - bootstrap-cluster.sh
  cmds:
  - dnf -y install python3 chrony lvm2 podman
  - sed -i "s/SELINUX=enforcing/SELINUX=permissive/" /etc/selinux/config
  - setenforce 0
  {% if number == 0 %}
- scripts:
-  - bootstrap-cluster.sh
+ - bash /root/bootstrap-cluster.sh
  {% endif %}
 {% endfor %}
index 90bfa8d9ebb12cd2bb0447364b7c98fdf6931bb7..178c89f5ba60fa55a89441c287ff4195d8022188 100755 (executable)
@@ -2,67 +2,23 @@
 
 set -ex
 
-cleanup() {
-    if [[ -n "$JENKINS_HOME" ]]; then
-        printf "\n\nStarting cleanup...\n\n"
-        kcli delete plan -y ceph || true
-        sudo podman container prune -f
-        printf "\n\nCleanup completed.\n\n"
-    fi
-}
-
-on_error() {
-    if [ "$1" != "0" ]; then
-        printf "\n\nERROR $1 thrown on line $2\n\n"
-        printf "\n\nCollecting info...\n\n"
-        for vm_id in 0 1 2
-        do
-            local vm="ceph-node-0${vm_id}"
-            printf "\n\nDisplaying journalctl from VM ${vm}:\n\n"
-            kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true
-            printf "\n\nEnd of journalctl from VM ${vm}\n\n"
-            printf "\n\nDisplaying podman logs:\n\n"
-            kcli ssh -u root -- ${vm} 'podman logs --names --since 30s $(podman ps -aq)' || true
-        done
-        printf "\n\nTEST FAILED.\n\n"
-    fi
-}
-
-trap 'on_error $? $LINENO' ERR
-trap 'cleanup $? $LINENO' EXIT
-
-sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
-
-: ${CEPH_DEV_FOLDER:=${PWD}}
-
-# Required to start dashboard.
-cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
-NG_CLI_ANALYTICS=false npm ci
-npm run build
-
-cd ${CEPH_DEV_FOLDER}
-kcli delete plan -y ceph || true
-kcli create plan -f ./src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml -P ceph_dev_folder=${CEPH_DEV_FOLDER} ceph
-
-while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "Dashboard is now available") ]]; do
-    sleep 30
-    kcli list vm
-    # Uncomment for debugging purposes.
-    #kcli ssh -u root -- ceph-node-00 'podman ps -a'
-    #kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s $(podman ps -aq)'
-    kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
-done
-
-cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
-npx cypress info
-
 : ${CYPRESS_BASE_URL:=''}
 : ${CYPRESS_LOGIN_USER:='admin'}
 : ${CYPRESS_LOGIN_PWD:='password'}
 : ${CYPRESS_ARGS:=''}
+: ${DASHBOARD_PORT:='8443'}
+
+get_vm_ip () {
+    local ip=$(kcli info vm "$1" -f ip -v | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
+    echo -n $ip
+}
 
 if [[ -z "${CYPRESS_BASE_URL}" ]]; then
-    CYPRESS_BASE_URL="https://$(kcli info vm ceph-node-00 -f ip -v | sed -e 's/[^0-9.]//'):8443"
+    CEPH_NODE_00_IP="$(get_vm_ip ceph-node-00)"
+    if [[ -z "${CEPH_NODE_00_IP}" ]]; then
+        . "$(dirname $0)"/start-cluster.sh
+    fi
+    CYPRESS_BASE_URL="https://$(get_vm_ip ceph-node-00):${DASHBOARD_PORT}"
 fi
 
 export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD
@@ -78,4 +34,8 @@ cypress_run () {
     npx cypress run ${CYPRESS_ARGS} --browser chrome --headless --config "$override_config"
 }
 
+: ${CEPH_DEV_FOLDER:=${PWD}}
+
+cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
+
 cypress_run "orchestrator/workflow/*-spec.ts"
diff --git a/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh b/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh
new file mode 100755 (executable)
index 0000000..61775d0
--- /dev/null
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+
+set -ex
+
+cleanup() {
+    set +x
+    if [[ -n "$JENKINS_HOME" ]]; then
+        printf "\n\nStarting cleanup...\n\n"
+        kcli delete plan -y ceph || true
+        docker container prune -f
+        printf "\n\nCleanup completed.\n\n"
+    fi
+}
+
+on_error() {
+    set +x
+    if [ "$1" != "0" ]; then
+        printf "\n\nERROR $1 thrown on line $2\n\n"
+        printf "\n\nCollecting info...\n\n"
+        for vm_id in 0 1 2
+        do
+            local vm="ceph-node-0${vm_id}"
+            printf "\n\nDisplaying journalctl from VM ${vm}:\n\n"
+            kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' || true
+            printf "\n\nEnd of journalctl from VM ${vm}\n\n"
+            printf "\n\nDisplaying container logs:\n\n"
+            kcli ssh -u root -- ${vm} 'podman logs --names --since 30s $(podman ps -aq)' || true
+        done
+        printf "\n\nTEST FAILED.\n\n"
+    fi
+}
+
+trap 'on_error $? $LINENO' ERR
+trap 'cleanup $? $LINENO' EXIT
+
+sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts
+
+: ${CEPH_DEV_FOLDER:=${PWD}}
+EXTRA_PARAMS=''
+DEV_MODE=''
+# Check script args/options.
+for arg in "$@"; do
+  shift
+  case "$arg" in
+    "--dev-mode") DEV_MODE='true'; EXTRA_PARAMS="-P dev_mode=${DEV_MODE}" ;;
+  esac
+done
+
+kcli delete plan -y ceph || true
+
+# Build dashboard frontend (required to start the module).
+cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
+NG_CLI_ANALYTICS=false npm ci
+FRONTEND_BUILD_OPTS='-- --prod'
+if [[ -n "${DEV_MODE}" ]]; then
+    FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch'
+fi
+npm run build ${FRONTEND_BUILD_OPTS} &
+
+cd ${CEPH_DEV_FOLDER}
+: ${VM_IMAGE:='fedora34'}
+: ${VM_IMAGE_URL:='https://fedora.mirror.liteserver.nl/linux/releases/34/Cloud/x86_64/images/Fedora-Cloud-Base-34-1.2.x86_64.qcow2'}
+kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE}
+kcli delete plan -y ceph || true
+kcli create plan -f ./src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \
+    -P ceph_dev_folder=${CEPH_DEV_FOLDER} \
+    ${EXTRA_PARAMS} ceph
+
+: ${CLUSTER_DEBUG:=0}
+: ${DASHBOARD_CHECK_INTERVAL:=10}
+while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "Dashboard is now available") ]]; do
+    sleep ${DASHBOARD_CHECK_INTERVAL}
+    kcli list vm
+    if [[ ${CLUSTER_DEBUG} != 0 ]]; then
+        kcli ssh -u root -- ceph-node-00 'podman ps -a'
+        kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s $(podman ps -aq)'
+    fi
+    kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
+done