--- /dev/null
+#!/bin/bash
+
+set -ex
+
+sudo apt-get install -y --force-yes docker
+docker --version
+sudo apt-get install -y --force-yes xfsprogs
+sudo "$WORKSPACE"/travis-builds/purge_cluster.sh
+sudo "$WORKSPACE"/travis-builds/build_imgs.sh
+
+# NOTE(leseb): somehow putting everything in a 'script' task does not work
+# so we have to split it up that way.
+# It seems that we have an issue when not running 'docker run' from an install step
+#install:
+sudo "$WORKSPACE"/travis-builds/prepare_osd_fs.sh
+sudo docker run -d --name ceph-mon --net=host -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -e MON_IP=$(ip -4 -o a | awk '/eth|ens|eno|enp/ { sub ("/..", "", $4); print $4 }') -e CEPH_PUBLIC_NETWORK=$(grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/[0-9]\{1,2\}' /proc/net/fib_trie | grep -vE "^127|0$" | head -1) daemon mon
+sudo "$WORKSPACE"/travis-builds/bootstrap_osd.sh
+sudo docker run -d --name ceph-osd --net=host -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph daemon osd_directory_single
+sudo docker run -d --name ceph-mds --net=host -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -e CEPHFS_CREATE=1 daemon mds
+sudo docker run -d --name ceph-rgw --net=host -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph daemon rgw
+
+#script:
+sudo "$WORKSPACE"/travis-builds/validate_cluster.sh
+sudo "$WORKSPACE"/travis-builds/purge_cluster.sh
+sudo "$WORKSPACE"/travis-builds/prepare_osd_fs.sh
+sudo docker run -d --name ceph-demo --net=host -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -e MON_IP=$(ip -4 -o a | awk '/eth|ens|eno|enp/ { sub ("/..", "", $4); print $4 }') -e CEPH_PUBLIC_NETWORK=$(grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/[0-9]\{1,2\}' /proc/net/fib_trie | grep -vE "^127|0$" | head -1) demo
+sudo "$WORKSPACE"/travis-builds/validate_demo_cluster.sh
+
+# on success
+sudo docker exec ceph-mon ceph -s
+sudo docker exec ceph-demo ceph -s
--- /dev/null
+#!/bin/bash
+
+set -ex
+
+# on failure:
+sudo docker images
+sudo docker ps
+sudo docker ps -a
+sudo docker logs ceph-mon
+sudo docker logs ceph-osd
+sudo docker logs ceph-mds
+sudo docker logs ceph-rgw
+sudo docker logs ceph-demo
+sudo docker exec ceph-mon ceph -s
+sudo docker exec ceph-demo ceph -s
+
+
--- /dev/null
+- scm:
+ name: ceph-docker
+ scm:
+ - git:
+ url: https://github.com/ceph/ceph-docker.git
+ branches:
+ - ${sha1}
+ refspec: +refs/pull/*:refs/remotes/origin/pr/*
+ browser: auto
+ timeout: 20
+ skip-tag: true
+ wipe-workspace: false
+ basedir: "ceph-docker"
+
+- job:
+ name: ceph-docker-pull-requests
+ node: huge
+ project-type: freestyle
+ defaults: global
+ display-name: 'ceph-docker: Pull Requests'
+ concurrent: true
+ quiet-period: 5
+ block-downstream: false
+ block-upstream: false
+ retry-count: 3
+ properties:
+ - github:
+ url: https://github.com/ceph/ceph-docker
+ logrotate:
+ daysToKeep: 15
+ numToKeep: 30
+ artifactDaysToKeep: -1
+ artifactNumToKeep: -1
+
+ parameters:
+ - string:
+ name: sha1
+ description: "A pull request ID, like 'origin/pr/72/head'"
+
+ triggers:
+ - github-pull-request:
+ allow-whitelist-orgs-as-admins: true
+ org-list:
+ - ceph
+ trigger-phrase: ''
+ only-trigger-phrase: false
+ github-hooks: true
+ permit-all: true
+ auto-close-on-fail: false
+ status-context: "docker image"
+ started-status: "creating docker image(s)"
+ success-status: "OK - image(s) created"
+ failure-status: "docker image failed with errors"
+
+ scm:
+ - ceph-docker
+
+ builders:
+ - shell:
+ !include-raw:
+ - ../../../scripts/build_utils.sh
+ - ../../build/build
+ - ../../build/teardown
+
+ publishers:
+ - postbuildscript:
+ script-only-if-succeeded: False
+ script-only-if-failed: True
+ builders:
+ - shell: !include-raw ../../build/failure