--- /dev/null
+#!/bin/bash
+#
+# Copyright (C) 2014 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source test/docker-test-helper.sh
+
+supported='([ubuntu]="14.04" [centos]="centos7")'
+main_docker "$@" --all "$supported" --compile
+main_docker "$@" --all "$supported" --user root --dev test/ceph-disk.sh test_activate_dev
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/ceph-disk-root.sh"
+# End:
export PATH=:$PATH # make sure program from sources are prefered
DIR=test-ceph-disk
+OSD_DATA=$DIR/osd
MON_ID=a
MONA=127.0.0.1:7451
TEST_POOL=rbd
CEPH_ARGS+=" --pid-file=$DIR/\$name.pidfile"
CEPH_ARGS+=" --osd-pool-default-erasure-code-directory=.libs"
CEPH_ARGS+=" --auth-supported=none"
+CEPH_ARGS+=" --osd-journal-size=100"
CEPH_DISK_ARGS=
CEPH_DISK_ARGS+=" --statedir=$DIR"
CEPH_DISK_ARGS+=" --sysconfdir=$DIR"
cat=$(which cat)
timeout=$(which timeout)
diff=$(which diff)
+mkdir=$(which mkdir)
+rm=$(which rm)
function setup() {
teardown
mkdir $DIR
- touch $DIR/ceph.conf
+ mkdir $OSD_DATA
+# mkdir $OSD_DATA/ceph-0
+ touch $DIR/ceph.conf # so ceph-disk think ceph is the cluster
}
function teardown() {
}
# ceph-disk prepare returns immediately on success if the magic file
-# exists on the --osd-data directory.
+# exists in the --osd-data directory.
function test_activate_dir_magic() {
local uuid=$(uuidgen)
local osd_data=$DIR/osd
grep --quiet $uuid $osd_data/ceph_fsid || return 1
}
-function test_activate_dir() {
- run_mon
+function test_activate() {
+ local to_prepare=$1
+ local to_activate=$2
- local osd_data=$DIR/osd
+ $mkdir -p $OSD_DATA
- /bin/mkdir -p $osd_data
./ceph-disk $CEPH_DISK_ARGS \
- prepare $osd_data || return 1
+ prepare $to_prepare || return 1
- CEPH_ARGS="$CEPH_ARGS --osd-journal-size=100 --osd-data=$osd_data" \
- $timeout $TIMEOUT ./ceph-disk $CEPH_DISK_ARGS \
- activate \
- --mark-init=none \
- $osd_data || return 1
+ $timeout $TIMEOUT ./ceph-disk $CEPH_DISK_ARGS \
+ activate \
+ --mark-init=none \
+ $to_activate || return 1
$timeout $TIMEOUT ./ceph osd pool set $TEST_POOL size 1 || return 1
- local id=$($cat $osd_data/whoami)
+
+ local id=$($cat $OSD_DATA/ceph-?/whoami || $cat $to_activate/whoami)
local weight=1
./ceph osd crush add osd.$id $weight root=default host=localhost || return 1
echo FOO > $DIR/BAR
$diff $DIR/BAR $DIR/BAR.copy || return 1
}
+function test_activate_dir() {
+ run_mon
+
+ local osd_data=$DIR/dir
+ $mkdir -p $osd_data
+ test_activate $osd_data $osd_data || return 1
+ $rm -fr $osd_data
+}
+
+function test_activate_dev() {
+ run_mon
+
+ if test $(id -u) != 0 ; then
+ echo "SKIP because not root"
+ return 0
+ fi
+
+ dd if=/dev/zero of=vde.disk bs=1024k count=200
+ losetup --find vde.disk
+ local disk=$(losetup --associated vde.disk | cut -f1 -d:)
+ ./ceph-disk zap $disk
+ test_activate ${disk} ${disk}p1
+ kill_daemons
+ umount ${disk}p1
+ ./ceph-disk zap $disk
+ status=$?
+ losetup --detach $disk
+ rm vde.disk
+ return $status
+}
+
function test_find_cluster_by_uuid() {
setup
test_activate_dir 2>&1 | tee $DIR/test_find