--- /dev/null
+#!/bin/bash
+set -ex
+
+# create a release directory for ceph-build tools
+mkdir -p release
+cp -a dist release/${vers}
+echo $DIST > release/${vers}/debian_dists
+echo "${debian_version}" > release/${vers}/debian_version
+
+cd release/$vers
+
+
+# HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
+# FIXME: I don't think we need this 'hack' anymore
+# Dirty Hack:
+baddist=$(echo $DIST | grep -ic -e squeeze -e wheezy || true)
+if [ $baddist -eq 1 ]
+then
+ sed -i 's/ libbabeltrace-ctf-dev, libbabeltrace-dev,//g' ceph_${vers}-1.dsc || true
+ sed -i 's/ liblttng-ust-dev//g' ceph_${vers}-1.dsc || true
+
+fi
+# HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
+
+
+# unpack sources
+dpkg-source -x ceph_${vers}-1.dsc
+
+
+# HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
+if [ $baddist -eq 1 ]
+then
+ rm -vf *.orig.tar.gz || true
+ grep -v babeltrace ceph-${vers}/debian/control | grep -v liblttng > ceph-${vers}/debian/control.new
+ mv -v ceph-${vers}/debian/control.new ceph-${vers}/debian/control
+fi
+# HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
+
+
+( cd ceph-${vers}
+ DEB_VERSION=$(dpkg-parsechangelog | sed -rne 's,^Version: (.*),\1, p')
+ BP_VERSION=${DEB_VERSION}${BPTAG}
+ dch -D $DIST --force-distribution -b -v "$BP_VERSION" "$comment"
+)
+dpkg-source -b ceph-${vers}
+
+echo "Building Debian"
+cd "$WORKSPACE"
+# Before, at this point, this script called the below contents that
+# was part of /srv/ceph-buid/build_debs.sh. Now everything is in here, in one
+# place, no need to checkout/clone anything. WYSIWYG::
+#
+# sudo $bindir/build_debs.sh ./release /srv/debian-base $vers
+
+
+releasedir="./release"
+pbuilddir="/srv/debian-base"
+cephver=$vers
+
+echo version $cephver
+
+# This used to live in a *file* on /src/ceph-build. Now it lives here because
+# it doesn't make sense to have a file that lives in /srv/ that we then
+# concatenate to get its contents.
+
+
+# FIXME this looks exactly like `setup_pbuilder`, we probably don't need this
+# or we need to refactor.
+sudo pbuilder --clean
+
+echo deb vers $bpvers
+
+
+echo building debs for $DIST
+sudo pbuilder build \
+ --distribution $DIST \
+ --basetgz $pbuilddir/$DIST.tgz \
+ --buildresult $releasedir/$cephver \
+ --debbuildopts "-j`grep -c processor /proc/cpuinfo`" \
+ $releasedir/$cephver/ceph_$bpvers.dsc
+
+# do lintian checks
+echo lintian checks for $bpvers
+echo lintian --allow-root $releasedir/$cephver/*$bpvers*.deb
+
+[ "$FORCE" = true ] && chacra_flags="--force" || chacra_flags=""
+
+if [ "$THROWAWAY" = false ] ; then
+ # push binaries to chacra
+ find release/$vers/ | egrep "*\.(changes|deb|dsc|gz)$" | egrep -v "(Packages|Sources|Contents)" | $VENV/chacractl binary ${chacra_flags} create ${chacra_endpoint}
+ # write json file with build info
+ cat > $WORKSPACE/repo-extra.json << EOF
+{
+ "version":"$vers",
+ "package_manager_version":"$bpvers",
+ "build_url":"$BUILD_URL",
+ "root_build_cause":"$ROOT_BUILD_CAUSE",
+ "node_name":"$NODE_NAME",
+ "job_name":"$JOB_NAME"
+}
+EOF
+ # post the json to repo-extra json to chacra
+ curl -X POST -H "Content-Type:application/json" --data "@$WORKSPACE/repo-extra.json" -u $CHACRACTL_USER:$CHACRACTL_KEY ${chacra_url}repos/${chacra_repo_endpoint}/extra/
+ # start repo creation
+ $VENV/chacractl repo update ${chacra_repo_endpoint}
+
+ echo Check the status of the repo at: https://shaman.ceph.com/api/repos/${chacra_repo_endpoint}/
+fi
+
+# update shaman with the completed build status
+update_build_status "completed" "ceph" $NORMAL_DISTRO $NORMAL_DISTRO_VERSION $NORMAL_ARCH
--- /dev/null
+#!/bin/bash
+set -ex
+
+
+# create a release directory for ceph-build tools
+mkdir -p release
+cp -a dist release/${vers}
+
+echo "Building RPMs"
+
+# The below contents ported from /srv/ceph-build/build_rpms.sh ::
+# $bindir/build_rpms.sh ./release $vers
+#
+
+releasedir="./release"
+cephver=$vers
+raw_version=`echo $vers | cut -d '-' -f 1`
+
+cd $releasedir/$cephver || exit 1
+
+# modify the spec file so that it understands we are dealing with a different directory
+sed -i "s/^%setup.*/%setup -q -n %{name}-$vers/" ceph.spec
+# it is entirely possible that `%setup` is not even used, but rather, autosetup
+sed -i "s/^%autosetup.*/%autosetup -p1 -n %{name}-$vers/" ceph.spec
+# This is a fallback to the spec rules that may have altered sections that want
+# to force a non-sha1 naming. This is only needed in development binary
+# building.
+sed -i "s/%{name}-%{version}/ceph-$vers/" ceph.spec
+
+# This is needed because the 'version' this job gets from upstream contains chars
+# that are not legal for an RPM file. These are already converted in the spec file whic
+# is what is consumed to create the RPM binary. Parse these values there so that they can
+# be reported as part of the build metadata
+RPM_RELEASE=`grep Release ceph.spec | sed 's/Release:[ \t]*//g' | cut -d '%' -f 1`
+RPM_VERSION=`grep Version ceph.spec | sed 's/Version:[ \t]*//g'`
+PACKAGE_MANAGER_VERSION="$RPM_VERSION-$RPM_RELEASE"
+
+# Set up build area
+BUILDAREA=./rpm/$dist
+mkdir -p ${BUILDAREA}/{SOURCES,SRPMS,SPECS,RPMS,BUILD}
+cp -a ceph-*.tar.bz2 ${BUILDAREA}/SOURCES/.
+cp -a ceph.spec ${BUILDAREA}/SPECS/.
+cp -a rpm/*.patch ${BUILDAREA}/SOURCES/. || true
+
+# Build RPMs
+BUILDAREA=`readlink -fn ${BUILDAREA}` ### rpm wants absolute path
+cd ${BUILDAREA}/SPECS
+rpmbuild -ba --define "_topdir ${BUILDAREA}" ceph.spec
+
+# The following was copied from autobuild-ceph/build-ceph-rpm.sh
+# which creates the ceph-release rpm meant to create the repository file for the repo
+# that will be built and served later.
+# Create and build an RPM for the repository
+
+cat <<EOF > ${BUILDAREA}/SPECS/ceph-release.spec
+Name: ceph-release
+Version: 1
+Release: 0%{?dist}
+Summary: Ceph Development repository configuration
+Group: System Environment/Base
+License: GPLv2
+URL: ${chacra_url}r/ceph/${chacra_ref}/${SHA1}/${DISTRO}/${RELEASE}/flavors/$FLAVOR/
+Source0: ceph.repo
+#Source0: RPM-GPG-KEY-CEPH
+#Source1: ceph.repo
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+BuildArch: noarch
+
+%description
+This package contains the Ceph repository GPG key as well as configuration
+for yum and up2date.
+
+%prep
+
+%setup -q -c -T
+install -pm 644 %{SOURCE0} .
+#install -pm 644 %{SOURCE1} .
+
+%build
+
+%install
+rm -rf %{buildroot}
+#install -Dpm 644 %{SOURCE0} \
+# %{buildroot}/%{_sysconfdir}/pki/rpm-gpg/RPM-GPG-KEY-CEPH
+%if 0%{defined suse_version}
+install -dm 755 %{buildroot}/%{_sysconfdir}/zypp
+install -dm 755 %{buildroot}/%{_sysconfdir}/zypp/repos.d
+install -pm 644 %{SOURCE0} \
+ %{buildroot}/%{_sysconfdir}/zypp/repos.d
+%else
+install -dm 755 %{buildroot}/%{_sysconfdir}/yum.repos.d
+install -pm 644 %{SOURCE0} \
+ %{buildroot}/%{_sysconfdir}/yum.repos.d
+%endif
+
+%clean
+#rm -rf %{buildroot}
+
+%post
+
+%postun
+
+%files
+%defattr(-,root,root,-)
+#%doc GPL
+%if 0%{defined suse_version}
+/etc/zypp/repos.d/*
+%else
+/etc/yum.repos.d/*
+%endif
+#/etc/pki/rpm-gpg/*
+
+%changelog
+* Fri Aug 12 2016 Alfredo Deza <adeza@redhat.com> 1-1
+- Initial Package
+EOF
+# End of ceph-release.spec file.
+
+# GPG Key
+#gpg --export --armor $keyid > ${BUILDAREA}/SOURCES/RPM-GPG-KEY-CEPH
+#chmod 644 ${BUILDAREA}/SOURCES/RPM-GPG-KEY-CEPH
+
+# Install ceph.repo file
+cat <<EOF > $BUILDAREA/SOURCES/ceph.repo
+[Ceph]
+name=Ceph packages for \$basearch
+baseurl=${chacra_url}/r/ceph/${chacra_ref}/${SHA1}/${DISTRO}/${RELEASE}/flavors/${FLAVOR}/\$basearch
+enabled=1
+gpgcheck=0
+type=rpm-md
+gpgkey=https://download.ceph.com/keys/autobuild.asc
+
+[Ceph-noarch]
+name=Ceph noarch packages
+baseurl=${chacra_url}/r/ceph/${chacra_ref}/${SHA1}/${DISTRO}/${RELEASE}/flavors/${FLAVOR}/noarch
+enabled=1
+gpgcheck=0
+type=rpm-md
+gpgkey=https://download.ceph.com/keys/autobuild.asc
+
+[ceph-source]
+name=Ceph source packages
+baseurl=${chacra_url}/r/ceph/${chacra_ref}/${SHA1}/${DISTRO}/${RELEASE}/flavors/${FLAVOR}/SRPMS
+enabled=1
+gpgcheck=0
+type=rpm-md
+gpgkey=https://download.ceph.com/keys/autobuild.asc
+EOF
+# End of ceph.repo file
+
+rpmbuild -bb --define "_topdir ${BUILDAREA}" ${BUILDAREA}/SPECS/ceph-release.spec
+
+
+# Make sure we execute at the top level directory
+cd "$WORKSPACE"
+
+[ "$FORCE" = true ] && chacra_flags="--force" || chacra_flags=""
+
+if [ "$THROWAWAY" = false ] ; then
+ # push binaries to chacra
+ find release/${vers}/rpm/*/SRPMS | grep rpm | $VENV/chacractl binary ${chacra_flags} create ${chacra_endpoint}/source/flavors/${FLAVOR}
+ find release/${vers}/rpm/*/RPMS/* | grep rpm | $VENV/chacractl binary ${chacra_flags} create ${chacra_endpoint}/${ARCH}/flavors/${FLAVOR}
+ # write json file with build info
+ cat > $WORKSPACE/repo-extra.json << EOF
+{
+ "version":"$vers",
+ "package_manager_version":"$PACKAGE_MANAGER_VERSION",
+ "build_url":"$BUILD_URL",
+ "root_build_cause":"$ROOT_BUILD_CAUSE",
+ "node_name":"$NODE_NAME",
+ "job_name":"$JOB_NAME"
+}
+EOF
+ chacra_repo_endpoint="${chacra_endpoint}/flavors/${FLAVOR}"
+ # post the json to repo-extra json to chacra
+ curl -X POST -H "Content-Type:application/json" --data "@$WORKSPACE/repo-extra.json" -u $CHACRACTL_USER:$CHACRACTL_KEY ${chacra_url}repos/${chacra_repo_endpoint}/extra/
+ # start repo creation
+ $VENV/chacractl repo update ${chacra_repo_endpoint}
+
+ echo Check the status of the repo at: https://shaman.ceph.com/api/repos/${chacra_endpoint}/flavors/${FLAVOR}/
+fi
+
+# update shaman with the completed build status
+update_build_status "completed" "ceph" $NORMAL_DISTRO $NORMAL_DISTRO_VERSION $NORMAL_ARCH
--- /dev/null
+#!/bin/bash -ex
+
+# note: the failed_build_status call relies on normalized variable names that
+# are infered by the builds themselves. If the build fails before these are
+# set, they will be posted with empty values
+BRANCH=`branch_slash_filter $BRANCH`
+
+# update shaman with the failed build status
+failed_build_status "ceph" $NORMAL_DISTRO $NORMAL_DISTRO_VERSION $NORMAL_ARCH
--- /dev/null
+#!/bin/bash
+
+set -ex
+HOST=$(hostname --short)
+echo "Building on $(hostname)"
+echo " DIST=${DIST}"
+echo " BPTAG=${BPTAG}"
+echo " KEYID=${KEYID}"
+echo " WS=$WORKSPACE"
+echo " PWD=$(pwd)"
+echo " BUILD SOURCE=$COPYARTIFACT_BUILD_NUMBER_CEPH_SETUP"
+echo "*****"
+env
+echo "*****"
+
+DIR=/tmp/install-deps.$$
+trap "rm -fr $DIR" EXIT
+mkdir -p $DIR
+if test $(id -u) != 0 ; then
+ SUDO=sudo
+fi
+export LC_ALL=C # the following is vulnerable to i18n
+
+$SUDO apt-get install -y lsb-release
+
+# unpack the tar.gz that contains the debian dir
+cd dist
+tar xzf *.orig.tar.gz
+cd ceph-*
+pwd
+
+BRANCH=`branch_slash_filter $BRANCH`
+
+cd $WORKSPACE
+
+get_bptag() {
+ dist=$1
+
+ [ "$dist" = "sid" ] && dver=""
+ [ "$dist" = "jessie" ] && dver="~bpo80+1"
+ [ "$dist" = "wheezy" ] && dver="~bpo70+1"
+ [ "$dist" = "squeeze" ] && dver="~bpo60+1"
+ [ "$dist" = "lenny" ] && dver="~bpo50+1"
+ [ "$dist" = "xenial" ] && dver="$dist"
+ [ "$dist" = "trusty" ] && dver="$dist"
+ [ "$dist" = "saucy" ] && dver="$dist"
+ [ "$dist" = "precise" ] && dver="$dist"
+ [ "$dist" = "oneiric" ] && dver="$dist"
+ [ "$dist" = "natty" ] && dver="$dist"
+ [ "$dist" = "maverick" ] && dver="$dist"
+ [ "$dist" = "lucid" ] && dver="$dist"
+ [ "$dist" = "karmic" ] && dver="$dist"
+
+ echo $dver
+}
+
+BPTAG=`get_bptag $DIST`
+
+chacra_ref="$BRANCH"
+vers=`cat ./dist/version`
+
+# We used to detect the $distro variable by inspecting at the host, but this is
+# not accurate because we are using pbuilder and just ubuntu to build
+# everything. That would cause POSTing binaries to incorrect chacra endpoints
+# like project/ref/ubuntu/jessie/.
+distro=""
+case $DIST in
+ jessie|wheezy)
+ distro="debian"
+ ;;
+ *)
+ distro="ubuntu"
+ ;;
+esac
+
+debian_version=${vers}-1
+
+gen_debian_version() {
+ raw=$1
+ dist=$2
+
+ [ "$dist" = "sid" ] && dver="$raw"
+ [ "$dist" = "jessie" ] && dver="$raw~bpo80+1"
+ [ "$dist" = "wheezy" ] && dver="$raw~bpo70+1"
+ [ "$dist" = "squeeze" ] && dver="$raw~bpo60+1"
+ [ "$dist" = "lenny" ] && dver="$raw~bpo50+1"
+ [ "$dist" = "precise" ] && dver="$raw$dist"
+ [ "$dist" = "saucy" ] && dver="$raw$dist"
+ [ "$dist" = "trusty" ] && dver="$raw$dist"
+ [ "$dist" = "xenial" ] && dver="$raw$dist"
+
+ echo $dver
+}
+
+bpvers=`gen_debian_version $debian_version $DIST`
+
+# Normalize variables across rpm/deb builds
+NORMAL_DISTRO=$distro
+NORMAL_DISTRO_VERSION=$DIST
+NORMAL_ARCH=$ARCH
+
+# create build status in shaman
+create_build_status "started" "ceph" $NORMAL_DISTRO $NORMAL_DISTRO_VERSION $NORMAL_ARCH
+
+pkgs=( "chacractl>=0.0.4" )
+install_python_packages "pkgs[@]"
+
+# ask shaman which chacra instance to use
+chacra_url=`curl -u $SHAMAN_API_USER:$SHAMAN_API_KEY https://shaman.ceph.com/api/nodes/next/`
+# create the .chacractl config file using global variables
+make_chacractl_config $chacra_url
+
+# look for a specific package to tell if we can avoid the build
+chacra_endpoint="ceph/${chacra_ref}/${SHA1}/${distro}/${DIST}/${ARCH}/flavors/${FLAVOR}"
+chacra_repo_endpoint="ceph/${chacra_ref}/${SHA1}/${distro}/${DIST}/flavors/${FLAVOR}"
+DEB_ARCH=`dpkg-architecture | grep DEB_BUILD_ARCH\= | cut -d '=' -f 2`
+chacra_check_url="${chacra_endpoint}/librados2_${bpvers}_${DEB_ARCH}.deb"
+
+if [ "$THROWAWAY" = false ] ; then
+ # this exists in scripts/build_utils.sh
+ # TODO if this exits we need to post to shaman a success
+ check_binary_existence $chacra_check_url
+fi
--- /dev/null
+#!/bin/sh -x
+# This file will set the tgz images needed for pbuilder on a given host. It has
+# some hard-coded values like `/srv/debian-base` because it gets built every
+# time this file is executed - completely ephemeral. If a Debian host will use
+# pbuilder, then it will need this. Since it is not idempotent it makes
+# everything a bit slower. ## FIXME ##
+
+set -e
+
+# Only run when we are a Debian or Debian-based distro
+if test -f /etc/redhat-release ; then
+ exit 0
+fi
+
+basedir="/srv/debian-base"
+
+# Ensure that the basedir directory exists
+sudo mkdir -p "$basedir"
+
+# This used to live in a *file* on /srv/ceph-build as
+# /srv/ceph-build/update_pbuilder.sh Now it lives here because it doesn't make
+# sense to have a file that lives in /srv/ that we then concatenate to get its
+# contents. what.
+# By using $DIST we are narrowing down to updating only the distro image we
+# need, unlike before where we updated everything on every server on every
+# build.
+
+os="debian"
+[ "$DIST" = "precise" ] && os="ubuntu"
+[ "$DIST" = "saucy" ] && os="ubuntu"
+[ "$DIST" = "trusty" ] && os="ubuntu"
+[ "$DIST" = "xenial" ] && os="ubuntu"
+
+if [ $os = "debian" ]; then
+ mirror="http://www.gtlib.gatech.edu/pub/debian"
+ # this assumes that newer Debian releases are being added to
+ # /etc/apt/trusted.gpg that is also the default location for Ubuntu trusted
+ # keys. The slave should ensure that the needed keys are added accordingly
+ # to this location.
+ debootstrapopts='DEBOOTSTRAPOPTS=( "--keyring" "/etc/apt/trusted.gpg" )'
+ components='COMPONENTS="main contrib"'
+elif [ "$ARCH" = "arm64" ]; then
+ mirror="http://ports.ubuntu.com/ubuntu-ports"
+ debootstrapopts=""
+ components='COMPONENTS="main universe"'
+else
+ mirror="http://us.archive.ubuntu.com/ubuntu"
+ debootstrapopts=""
+ components='COMPONENTS="main universe"'
+fi
+
+# ensure that the tgz is valid, otherwise remove it so that it can be recreated
+# again
+pbuild_tar="$basedir/$DIST.tgz"
+is_not_tar=`python -c "exec 'try: import tarfile;print int(not int(tarfile.is_tarfile(\"$pbuild_tar\")))\nexcept IOError: print 1'"`
+file_size_kb=`du -k "$pbuild_tar" | cut -f1`
+
+if $is_not_tar; then
+ sudo rm -f "$pbuild_tar"
+fi
+
+if [ $file_size_kb -lt 1 ]; then
+ sudo rm -f "$pbuild_tar"
+fi
+
+# Ordinarily pbuilder only pulls packages from "main". ceph depends on
+# packages like python-virtualenv which are in "universe". We have to configure
+# pbuilder to look in "universe". Otherwise the build would fail with a message similar
+# to:
+# The following packages have unmet dependencies:
+# pbuilder-satisfydepends-dummy : Depends: python-virtualenv which is a virtual package.
+# Depends: xmlstarlet which is a virtual package.
+# Unable to resolve dependencies! Giving up...
+echo "$components" > ~/.pbuilderrc
+echo "$debootstrapopts" >> ~/.pbuilderrc
+# Newer pbuilder versions set $HOME to /nonexistent which breaks all kinds of
+# things that rely on a proper (writable) path. Setting this to the system user's $HOME is not enough
+# because of how pbuilder uses a chroot environment for builds, using a temporary directory here ensures
+# that writes will be successful.
+echo "BUILD_HOME=`mktemp -d`" >> ~/.pbuilderrc
+# Some Ceph components will want to use cached wheels that may have older versions of buggy executables
+# like: /usr/share/python-wheels/pip-8.1.1-py2.py3-none-any.whl which causes errors that are already fixed
+# in newer versions. This ticket solves the specific issue in 8.1.1 (which vendors urllib3):
+# https://github.com/shazow/urllib3/issues/567
+echo "USENETWORK=yes" >> ~/.pbuilderrc
+
+sudo pbuilder --clean
+
+if [ -e $basedir/$DIST.tgz ]; then
+ echo updating $DIST base.tgz
+ sudo pbuilder update \
+ --basetgz $basedir/$DIST.tgz \
+ --distribution $DIST \
+ --mirror "$mirror"
+else
+ echo building $DIST base.tgz
+ sudo pbuilder create \
+ --basetgz $basedir/$DIST.tgz \
+ --distribution $DIST \
+ --mirror "$mirror"
+fi
--- /dev/null
+#!/bin/bash
+
+set -ex
+HOST=$(hostname --short)
+echo "Building on $(hostname)"
+echo " DIST=${DIST}"
+echo " BPTAG=${BPTAG}"
+echo " KEYID=${KEYID}"
+echo " WS=$WORKSPACE"
+echo " PWD=$(pwd)"
+echo " BUILD SOURCE=$COPYARTIFACT_BUILD_NUMBER_CEPH_SETUP"
+echo "*****"
+env
+echo "*****"
+
+DIR=/tmp/install-deps.$$
+trap "rm -fr $DIR" EXIT
+mkdir -p $DIR
+if test $(id -u) != 0 ; then
+ SUDO=sudo
+fi
+export LC_ALL=C # the following is vulnerable to i18n
+
+$SUDO yum install -y redhat-lsb-core
+
+# unpack the tar.gz that contains the debian dir
+cd dist
+tar xzf *.orig.tar.gz
+cd ceph-*
+pwd
+
+case $(lsb_release -si) in
+CentOS|Fedora|SUSE*|RedHatEnterpriseServer)
+ case $(lsb_release -si) in
+ SUSE*)
+ $SUDO zypper -y yum-utils
+ ;;
+ *)
+ $SUDO yum install -y yum-utils
+ ;;
+ esac
+ sed -e 's/@//g' < ceph.spec.in > $DIR/ceph.spec
+ $SUDO yum-builddep -y $DIR/ceph.spec
+ ;;
+*)
+ echo "$(lsb_release -si) is unknown, dependencies will have to be installed manually."
+ ;;
+esac
+
+BRANCH=`branch_slash_filter $BRANCH`
+
+if [[ ! -f /etc/redhat-release && ! -f /usr/bin/zypper ]] ; then
+ exit 0
+fi
+
+cd $WORKSPACE
+
+get_rpm_dist() {
+ LSB_RELEASE=/usr/bin/lsb_release
+ [ ! -x $LSB_RELEASE ] && echo unknown && exit
+
+ ID=`$LSB_RELEASE --short --id`
+
+ case $ID in
+ RedHatEnterpriseServer)
+ RELEASE=`$LSB_RELEASE --short --release | cut -d. -f1`
+ DIST=rhel$RELEASE
+ DISTRO=rhel
+ ;;
+ CentOS)
+ RELEASE=`$LSB_RELEASE --short --release | cut -d. -f1`
+ DIST=el$RELEASE
+ DISTRO=centos
+ ;;
+ Fedora)
+ RELEASE=`$LSB_RELEASE --short --release`
+ DIST=fc$RELEASE
+ DISTRO=fedora
+ ;;
+ SUSE\ LINUX)
+ DESC=`$LSB_RELEASE --short --description`
+ RELEASE=`$LSB_RELEASE --short --release`
+ case $DESC in
+ *openSUSE*)
+ DIST=opensuse$RELEASE
+ DISTRO=opensuse
+ ;;
+ *Enterprise*)
+ DIST=sles$RELEASE
+ DISTRO=sles
+ ;;
+ esac
+ ;;
+ *)
+ DIST=unknown
+ DISTRO=unknown
+ ;;
+ esac
+
+ echo $DIST
+}
+
+get_rpm_dist
+
+# Normalize variables across rpm/deb builds
+NORMAL_DISTRO=$DISTRO
+NORMAL_DISTRO_VERSION=$RELEASE
+NORMAL_ARCH=$ARCH
+
+# create build status in shaman
+create_build_status "started" "ceph" $NORMAL_DISTRO $NORMAL_DISTRO_VERSION $NORMAL_ARCH
+
+pkgs=( "chacractl>=0.0.4" )
+install_python_packages "pkgs[@]"
+
+# ask shaman which chacra instance to use
+chacra_url=`curl -u $SHAMAN_API_USER:$SHAMAN_API_KEY https://shaman.ceph.com/api/nodes/next/`
+# create the .chacractl config file using global variables
+make_chacractl_config $chacra_url
+
+dist=$DIST
+[ -z "$dist" ] && echo no dist && exit 1
+echo dist $dist
+
+vers=`cat ./dist/version`
+chacra_ref="$BRANCH"
+
+chacra_endpoint="ceph/${chacra_ref}/${SHA1}/${DISTRO}/${RELEASE}"
+chacra_check_url="${chacra_endpoint}/${ARCH}/flavors/${FLAVOR}/librados2-${vers}-0.${DIST}.${ARCH}.rpm"
+
+
+if [ "$THROWAWAY" = false ] ; then
+ # this exists in scripts/build_utils.sh
+ # TODO if this exits we need to post to shaman a success
+ check_binary_existence $chacra_check_url
+fi
--- /dev/null
+#!/bin/bash
+set -ex
+
+# Only do actual work when we are a DEB distro
+if test -f /etc/redhat-release ; then
+ exit 0
+fi
--- /dev/null
+#!/bin/bash
+set -ex
+
+# only do work if we are a RPM distro
+if [[ ! -f /etc/redhat-release && ! -f /usr/bin/zypper ]] ; then
+ exit 0
+fi
--- /dev/null
+- job:
+ name: ceph-dev-new-build
+ node: master
+ project-type: matrix
+ defaults: global
+ display-name: 'ceph-dev-new-build'
+ block-downstream: false
+ block-upstream: false
+ concurrent: true
+ properties:
+ - github:
+ url: https://github.com/ceph/ceph-ci
+ execution-strategy:
+ combination-filter: DIST==AVAILABLE_DIST && ARCH==AVAILABLE_ARCH && (ARCH=="x86_64" || (ARCH == "arm64" && (DIST == "xenial" || DIST == "centos7")))
+ axes:
+ - axis:
+ type: label-expression
+ name: MACHINE_SIZE
+ values:
+ - huge
+ - axis:
+ type: label-expression
+ name: AVAILABLE_ARCH
+ values:
+ - x86_64
+ - arm64
+ - axis:
+ type: label-expression
+ name: AVAILABLE_DIST
+ values:
+ - trusty
+ - xenial
+ - centos7
+ - jessie
+ - precise
+ - centos6
+ - wheezy
+ - axis:
+ type: dynamic
+ name: DIST
+ values:
+ - DISTROS
+ - axis:
+ type: dynamic
+ name: ARCH
+ values:
+ - ARCHS
+
+
+
+ builders:
+ - shell: |
+ echo "Cleaning up top-level workarea (shared among workspaces)"
+ rm -rf dist
+ rm -rf venv
+ rm -rf release
+ - copyartifact:
+ project: ceph-dev-new-setup
+ filter: 'dist/**'
+ which-build: multijob-build
+ - inject:
+ properties-file: ${WORKSPACE}/dist/sha1
+ - inject:
+ properties-file: ${WORKSPACE}/dist/branch
+ - inject:
+ properties-file: ${WORKSPACE}/dist/other_envvars
+ # debian build scripts
+ - shell:
+ !include-raw:
+ - ../../build/validate_deb
+ - ../../../scripts/build_utils.sh
+ - ../../build/setup_deb
+ - ../../build/setup_pbuilder
+ - ../../build/build_deb
+ # rpm build scripts
+ - shell:
+ !include-raw:
+ - ../../build/validate_rpm
+ - ../../../scripts/build_utils.sh
+ - ../../build/setup_rpm
+ - ../../build/build_rpm
+
+ publishers:
+ - postbuildscript:
+ script-only-if-failed: True
+ script-only-if-succeeded: False
+ builders:
+ - inject:
+ properties-file: ${WORKSPACE}/build_info
+ - shell:
+ !include-raw:
+ - ../../../scripts/build_utils.sh
+ - ../../build/failure
+
+ wrappers:
+ - inject-passwords:
+ global: true
+ mask-password-params: true