From c8515ff576be1db65bae72ec759fe59fdc086768 Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Fri, 28 Apr 2017 16:02:23 -0400 Subject: [PATCH] ceph-ansible-scenario: create an individual scenario job Signed-off-by: Alfredo Deza --- ceph-ansible-scenario/build/build | 25 +++++++++ ceph-ansible-scenario/build/teardown | 20 +++++++ .../definitions/ceph-ansible-scenario.yml | 52 +++++++++++++++++++ 3 files changed, 97 insertions(+) create mode 100644 ceph-ansible-scenario/build/build create mode 100644 ceph-ansible-scenario/build/teardown create mode 100644 ceph-ansible-scenario/config/definitions/ceph-ansible-scenario.yml diff --git a/ceph-ansible-scenario/build/build b/ceph-ansible-scenario/build/build new file mode 100644 index 00000000..9da5957b --- /dev/null +++ b/ceph-ansible-scenario/build/build @@ -0,0 +1,25 @@ +#!/bin/bash + +# the following two methods exist in scripts/build_utils.sh +pkgs=( "tox" ) +install_python_packages "pkgs[@]" + +# XXX this might not be needed +source $VENV/activate + +WORKDIR=$(mktemp -td tox.XXXXXXXXXX) + +# Sometimes, networks may linger around, so we must ensure they are killed: +networks=`sudo virsh net-list --all | grep active | egrep -v "(default|libvirt)" | cut -d ' ' -f 2` +for network in $networks; do + sudo virsh net-destroy $network || true + sudo virsh net-undefine $network || true +done + +# restart libvirt services +sudo service libvirt-bin restart +sudo service libvirt-guests restart + +# the $SCENARIO var is injected by the job configuration. It maps +# to an actual, defined, tox environment +$VENV/tox -rv -e=$SCENARIO --workdir=$WORKDIR -- --provider=libvirt diff --git a/ceph-ansible-scenario/build/teardown b/ceph-ansible-scenario/build/teardown new file mode 100644 index 00000000..27b9bfa6 --- /dev/null +++ b/ceph-ansible-scenario/build/teardown @@ -0,0 +1,20 @@ +#!/bin/bash +# There has to be a better way to do this than this script which just looks +# for every Vagrantfile in scenarios and then just destroys whatever is left. + +cd $WORKSPACE/tests + +scenarios=$(find . | grep Vagrantfile | xargs dirname) + +for scenario in $scenarios; do + cd $scenario + vagrant destroy -f + cd - +done + +# Sometimes, networks may linger around, so we must ensure they are killed: +networks=`sudo virsh net-list --all | grep active | egrep -v "(default|libvirt)" | cut -d ' ' -f 2` +for network in $networks; do + sudo virsh net-destroy $network || true + sudo virsh net-undefine $network || true +done diff --git a/ceph-ansible-scenario/config/definitions/ceph-ansible-scenario.yml b/ceph-ansible-scenario/config/definitions/ceph-ansible-scenario.yml new file mode 100644 index 00000000..dddd58f3 --- /dev/null +++ b/ceph-ansible-scenario/config/definitions/ceph-ansible-scenario.yml @@ -0,0 +1,52 @@ + +- job: + name: 'ceph-ansible-scenario' + node: vagrant&&libvirt + concurrent: false + defaults: global + display-name: 'ceph-ansible: individual scenario testing' + quiet-period: 5 + block-downstream: false + block-upstream: false + retry-count: 3 + properties: + - github: + url: https://github.com/ceph/ceph-ansible + logrotate: + daysToKeep: 15 + numToKeep: 30 + artifactDaysToKeep: -1 + artifactNumToKeep: -1 + + parameters: + - string: + name: SCENARIO + description: "A full scenario name for ceph-ansible testing, like jewel-ansible2.2-purge_cluster" + - string: + name: BRANCH + description: "The ceph-ansible branch to test against" + default: "master" + + scm: + - git: + url: https://github.com/ceph/ceph-ansible.git + branches: + - $BRANCH + refspec: +refs/pull/*:refs/remotes/origin/pr/* + browser: auto + timeout: 20 + skip-tag: true + wipe-workspace: false + + builders: + - shell: + !include-raw-escape: + - ../../../scripts/build_utils.sh + - ../../build/build + + publishers: + - postbuildscript: + script-only-if-succeeded: False + script-only-if-failed: True + builders: + - shell: !include-raw ../../build/teardown -- 2.39.5