#
+function get_asok_dir() {
+ if [ -n "$CEPH_ASOK_DIR" ]; then
+ echo "$CEPH_ASOK_DIR"
+ else
+ echo ${TMPDIR:-/tmp}/ceph-asok.$$
+ fi
+}
+
+function get_asok_path() {
+ local name=$1
+ if [ -n "$name" ]; then
+ echo $(get_asok_dir)/ceph-$name.asok
+ else
+ echo $(get_asok_dir)/\$cluster-\$name.asok
+ fi
+}
##
# Cleanup any leftovers found in **dir** via **teardown**
# and reset **dir** as an empty environment.
local dir=$1
teardown $dir || return 1
mkdir -p $dir
+ mkdir -p $(get_asok_dir)
}
function test_setup() {
__teardown_btrfs $dir
fi
rm -fr $dir
+ rm -rf $(get_asok_dir)
}
function __teardown_btrfs() {
--chdir= \
--mon-data=$data \
--log-file=$dir/\$name.log \
- --admin-socket=$dir/\$cluster-\$name.asok \
+ --admin-socket=$(get_asok_path) \
--mon-cluster-log-file=$dir/log \
--run-dir=$dir \
--pid-file=$dir/\$name.pid \
run_mon $dir a || return 1
# rbd has been deleted / created, hence it does not have pool id 0
! ceph osd dump | grep "pool 1 'rbd'" || return 1
- local size=$(CEPH_ARGS='' ceph --format=json daemon $dir/ceph-mon.a.asok \
+ local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
config get osd_pool_default_size)
test "$size" = '{"osd_pool_default_size":"3"}' || return 1
kill_daemons $dir || return 1
run_mon $dir a --osd_pool_default_size=1 || return 1
- local size=$(CEPH_ARGS='' ceph --format=json daemon $dir/ceph-mon.a.asok \
+ local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
config get osd_pool_default_size)
test "$size" = '{"osd_pool_default_size":"1"}' || return 1
kill_daemons $dir || return 1
CEPH_ARGS="$CEPH_ARGS --osd_pool_default_size=2" \
run_mon $dir a || return 1
- local size=$(CEPH_ARGS='' ceph --format=json daemon $dir/ceph-mon.a.asok \
+ local size=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path mon.a) \
config get osd_pool_default_size)
test "$size" = '{"osd_pool_default_size":"2"}' || return 1
kill_daemons $dir || return 1
--chdir= \
--mgr-data=$data \
--log-file=$dir/\$name.log \
- --admin-socket=$dir/\$cluster-\$name.asok \
+ --admin-socket=$(get_asok_path) \
--run-dir=$dir \
--pid-file=$dir/\$name.pid \
"$@" || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
- local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.0.asok \
+ local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills)
echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
run_osd $dir 1 --osd-max-backfills 20 || return 1
- local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.1.asok \
+ local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.1) \
config get osd_max_backfills)
test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
CEPH_ARGS="$CEPH_ARGS --osd-max-backfills 30" run_osd $dir 2 || return 1
- local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.2.asok \
+ local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.2) \
config get osd_max_backfills)
test "$backfills" = '{"osd_max_backfills":"30"}' || return 1
ceph_args+=" --plugin-dir=$CEPH_LIB"
ceph_args+=" --osd-class-dir=$CEPH_LIB"
ceph_args+=" --run-dir=$dir"
+ ceph_args+=" --admin-socket=$(get_asok_path)"
ceph_args+=" --debug-osd=20"
ceph_args+=" --log-file=$dir/\$name.log"
ceph_args+=" --pid-file=$dir/\$name.pid"
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
- local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.0.asok \
+ local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills)
echo "$backfills" | grep --quiet 'osd_max_backfills' || return 1
kill_daemons $dir TERM osd || return 1
activate_osd $dir 0 --osd-max-backfills 20 || return 1
- local backfills=$(CEPH_ARGS='' ceph --format=json daemon $dir//ceph-osd.0.asok \
+ local backfills=$(CEPH_ARGS='' ceph --format=json daemon $(get_asok_path osd.0) \
config get osd_max_backfills)
test "$backfills" = '{"osd_max_backfills":"20"}' || return 1
local config=$3
CEPH_ARGS='' \
- ceph --format json daemon $dir/ceph-$daemon.$id.asok \
+ ceph --format json daemon $(get_asok_path $daemon.$id) \
config get $config 2> /dev/null | \
jq -r ".$config"
}
local config=$3
local value=$4
- test $(env CEPH_ARGS='' ceph --format json daemon $dir/ceph-$daemon.$id.asok \
+ test $(env CEPH_ARGS='' ceph --format json daemon $(get_asok_path $daemon.$id) \
config set $config $value 2> /dev/null | \
jq 'has("success")') == true
}
{
local client=$1
- if test -n "$CEPH_OUT_DIR";
+ if test -n "$CEPH_ASOK_DIR";
then
- echo $CEPH_OUT_DIR/$client.asok
+ echo $(get_asok_dir)/$client.asok
else
local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/')
echo "/var/run/ceph/$cluster-$client.asok"
$DRY_RUN ./do_cmake.sh $@ || return 1
$DRY_RUN cd build
$DRY_RUN make $BUILD_MAKEOPTS tests || return 1
- $DRY_RUN ctest $CHECK_MAKEOPTS --output-on-failure || return 1
+ if ! $DRY_RUN ctest $CHECK_MAKEOPTS --output-on-failure; then
+ rm -f ${TMPDIR:-/tmp}/ceph-asok.*
+ return 1
+ fi
}
function main() {
i = sys.argv.index("injectargs")
sys.argv = sys.argv[:i] + ceph_args.split() + sys.argv[i:]
else:
- sys.argv.extend(ceph_args.split())
+ sys.argv.extend([arg for arg in ceph_args.split()
+ if '--admin-socket' not in arg])
parser, parsed_args, childargs = parse_cmdargs()
if parsed_args.version:
cd $script_root/../build
script_root=$PWD
fi
+ceph_bin=$script_root/bin
vstart_path=`dirname $0`
[ "$#" -lt 2 ] && echo "usage: $0 <name> <port> [params...]" && exit 1
run_root=$script_root/run/$name
pidfile=$run_root/out/radosgw.${port}.pid
-asokfile=$run_root/out/radosgw.${port}.asok
+asokfile=$($ceph_bin/ceph-conf --show-config-value admin_socket --name radosgw.${port})
logfile=$run_root/out/radosgw.${port}.log
$vstart_path/mstop.sh $name radosgw $port
test -d dev/osd0/. && test -e dev/sudo && SUDO="sudo"
if [ -e CMakeCache.txt ]; then
- [ -z "$CEPH_BIN" ] && CEPH_BIN=src
-else
[ -z "$CEPH_BIN" ] && CEPH_BIN=bin
fi
pkill -u $MYUID -f valgrind.bin.\*ceph-mon
$SUDO pkill -u $MYUID -f valgrind.bin.\*ceph-osd
pkill -u $MYUID -f valgrind.bin.\*ceph-mds
+ asok_dir=`dirname $("${CEPH_BIN}"/ceph-conf --show-config-value admin_socket)`
+ rm -rf "${asok_dir}"
else
[ $stop_mon -eq 1 ] && do_killall ceph-mon
[ $stop_mds -eq 1 ] && do_killall ceph-mds
setup $dir || return 1
run_mon $dir a --osd_erasure_code_plugins="${plugin}" || return 1
run_mgr $dir x || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
run_osd $dir 0 --osd_erasure_code_plugins="${plugin}" || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep "WARNING: osd_erasure_code_plugins contains plugin ${plugin}" $dir/mon.a.log || return 1
grep "WARNING: osd_erasure_code_plugins contains plugin ${plugin}" $dir/osd.0.log || return 1
teardown $dir || return 1
setup $dir || return 1
run_mon $dir a --osd_erasure_code_plugins="${plugin}" || return 1
run_mgr $dir x || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
run_osd $dir 0 --osd_erasure_code_plugins="${plugin}" || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
! grep "WARNING: osd_erasure_code_plugins contains plugin" $dir/mon.a.log || return 1
! grep "WARNING: osd_erasure_code_plugins contains plugin" $dir/osd.0.log || return 1
teardown $dir || return 1
setup $dir || return 1
run_mon $dir a || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
run_mgr $dir x || return 1
run_osd $dir 0 || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
! grep "WARNING: osd_erasure_code_plugins" $dir/mon.a.log || return 1
! grep "WARNING: osd_erasure_code_plugins" $dir/osd.0.log || return 1
teardown $dir || return 1
for plugin in ${legacy_jerasure_plugins[*]}; do
ceph osd erasure-code-profile set prof-${plugin} crush-failure-domain=osd technique=reed_sol_van plugin=${plugin} || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep "WARNING: erasure coding profile prof-${plugin} uses plugin ${plugin}" $dir/mon.a.log || return 1
done
for plugin in ${legacy_shec_plugins[*]}; do
ceph osd erasure-code-profile set prof-${plugin} crush-failure-domain=osd plugin=${plugin} || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep "WARNING: erasure coding profile prof-${plugin} uses plugin ${plugin}" $dir/mon.a.log || return 1
done
run_mon $dir a || return 1
run_mgr $dir x || return 1
# check that erasure code plugins are preloaded
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
for id in $(seq 0 10) ; do
run_osd $dir $id || return 1
done
wait_for_clean || return 1
# check that erasure code plugins are preloaded
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
create_erasure_coded_pool ecpool || return 1
run_mon $dir a || return 1
run_mgr $dir x || return 1
# check that erasure code plugins are preloaded
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1
$func $dir || return 1
teardown $dir || return 1
wait_for_clean || return 1
# check that erasure code plugins are preloaded
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1
}
local -a initial_osds=($(get_osds $poolname $objname))
local osd_id=${initial_osds[$shard_id]}
set_config osd $osd_id filestore_debug_inject_read_err true || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.$osd_id.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \
injectdataerr $poolname $objname $shard_id || return 1
}
timeout 360 ceph --mon-host $MONA mon stat || return 1
# check that MONB is indeed a peon
- ceph --admin-daemon $dir/ceph-mon.b.asok mon_status |
+ ceph --admin-daemon $(get_asok_path mon.b) mon_status |
grep '"peon"' || return 1
# when the leader ( MONA ) is used, there is no message forwarding
ceph --mon-host $MONA osd pool create POOL1 12
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep 'mon_command(.*"POOL1"' $dir/a/mon.a.log
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.b.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.b) log flush || return 1
grep 'mon_command(.*"POOL1"' $dir/mon.b.log && return 1
# when the peon ( MONB ) is used, the message is forwarded to the leader
ceph --mon-host $MONB osd pool create POOL2 12
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.b.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.b) log flush || return 1
grep 'forward_request.*mon_command(.*"POOL2"' $dir/mon.b.log
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep ' forward(mon_command(.*"POOL2"' $dir/mon.a.log
# forwarded messages must retain features from the original connection
features=$(sed -n -e 's|.*127.0.0.1:0.*accept features \([0-9][0-9]*\)|\1|p' < \
ceph osd erasure-code-profile rm default || return 1
! ceph osd erasure-code-profile ls | grep default || return 1
ceph osd crush rule create-erasure $ruleset || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-mon.a.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
grep 'profile set default' $dir/mon.a.log || return 1
ceph osd erasure-code-profile ls | grep default || return 1
ceph osd crush rule rm $ruleset || return 1
--osd-map-cache-size $cache \
--osd-pg-epoch-persisted-max-stale $stale \
|| return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
}
! grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
local cache=$(($osd_map_max_advance / 2))
ceph tell osd.0 injectargs "--osd-map-cache-size $cache" || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
rm $dir/osd.0.log
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log reopen || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log reopen || return 1
#
# reset cache_size to the default and assert that it does not trigger the warning
! grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
local cache=$osd_map_cache_size
ceph tell osd.0 injectargs "--osd-map-cache-size $cache" || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
! grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
rm $dir/osd.0.log
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log reopen || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log reopen || return 1
#
# increase the osd_map_max_advance above the default cache_size
! grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
local advance=$(($osd_map_cache_size * 2))
ceph tell osd.0 injectargs "--osd-map-max-advance $advance" || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep 'is not > osd_map_max_advance' $dir/osd.0.log || return 1
rm $dir/osd.0.log
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log reopen || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log reopen || return 1
#
# increase the osd_pg_epoch_persisted_max_stale above the default cache_size
! grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
local stale=$(($osd_map_cache_size * 2))
ceph tell osd.0 injectargs "--osd-pg-epoch-persisted-max-stale $stale" || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log flush || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
rm $dir/osd.0.log
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok log reopen || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log reopen || return 1
}
main osd-config "$@"
local pg=$(get_pg $poolname ROBJ0)
# Compute an old omap digest and save oi
- CEPH_ARGS='' ceph daemon $dir//ceph-osd.0.asok \
+ CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) \
config set osd_deep_scrub_update_digest_min_age 0
- CEPH_ARGS='' ceph daemon $dir//ceph-osd.1.asok \
+ CEPH_ARGS='' ceph daemon $(get_asok_path osd.1) \
config set osd_deep_scrub_update_digest_min_age 0
pg_deep_scrub $pg
set_config osd 0 filestore_debug_inject_read_err true || return 1
set_config osd 1 filestore_debug_inject_read_err true || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.1.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.1) \
injectdataerr $poolname ROBJ11 || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) \
injectmdataerr $poolname ROBJ12 || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) \
injectmdataerr $poolname ROBJ13 || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.1.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.1) \
injectdataerr $poolname ROBJ13 || return 1
pg_scrub $pg
set_config osd 0 filestore_debug_inject_read_err true || return 1
set_config osd 1 filestore_debug_inject_read_err true || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.1.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.1) \
injectdataerr $poolname ROBJ11 || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) \
injectmdataerr $poolname ROBJ12 || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.0.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) \
injectmdataerr $poolname ROBJ13 || return 1
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.1.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.1) \
injectdataerr $poolname ROBJ13 || return 1
pg_deep_scrub $pg
local last_scrub=$(get_last_scrub_stamp $pg)
# Fake a schedule scrub
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.${primary}.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) \
trigger_scrub $pg || return 1
# Wait for schedule regular scrub
wait_for_scrub $pg "$last_scrub"
# Fake a schedule scrub
local last_scrub=$(get_last_scrub_stamp $pg)
- CEPH_ARGS='' ceph --admin-daemon $dir/ceph-osd.${primary}.asok \
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) \
trigger_scrub $pg || return 1
# Wait for schedule regular scrub
# to notice scrub and skip it
export CEPH_DIR="${TMPDIR:-$PWD}/td/t-$CEPH_PORT"
export CEPH_DEV_DIR="$CEPH_DIR/dev"
export CEPH_OUT_DIR="$CEPH_DIR/out"
+export CEPH_ASOK_DIR="$CEPH_DIR/out"
export MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
fi
if [ "$overwrite_conf" -eq 0 ]; then
+ CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf --show-config-value admin_socket)`
+ mkdir -p $CEPH_ASOK_DIR
MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mon 2>/dev/null` && \
CEPH_NUM_MON="$MON"
OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_osd 2>/dev/null` && \
else
if [ "$new" -ne 0 ]; then
# only delete if -n
+ asok_dir=`dirname $($CEPH_BIN/ceph-conf --show-config-value admin_socket)`
+ [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
+ if [ -z "$CEPH_ASOK_DIR" ]; then
+ CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
+ fi
[ -e "$conf_fn" ] && rm -- "$conf_fn"
else
+ CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf --show-config-value admin_socket)`
# -k is implied... (doesn't make sense otherwise)
overwrite_conf=0
fi
prepare_conf() {
local DAEMONOPTS="
log file = $CEPH_OUT_DIR/\$name.log
- admin socket = $CEPH_OUT_DIR/\$name.asok
+ admin socket = $CEPH_ASOK_DIR/\$name.asok
chdir = \"\"
pid file = $CEPH_OUT_DIR/\$name.pid
heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
[client]
keyring = $keyring_fn
log file = $CEPH_OUT_DIR/\$name.\$pid.log
- admin socket = $CEPH_OUT_DIR/\$name.\$pid.asok
+ admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
[client.rgw]
prun $SUDO rm -f core*
+test -d $CEPH_ASOK_DIR || mkdir $CEPH_ASOK_DIR
test -d $CEPH_OUT_DIR || mkdir $CEPH_OUT_DIR
test -d $CEPH_DEV_DIR || mkdir $CEPH_DEV_DIR
$SUDO rm -rf $CEPH_OUT_DIR/*