2 # -*- mode:sh; tab-width:4; sh-basic-offset:4; indent-tabs-mode:nil -*-
3 # vim: softtabstop=4 shiftwidth=4 expandtab
10 if [[ "$s" =~ \ ]]; then
11 printf -- "'%s' " "$s"
24 debug quoted_print "$@" '&'
29 debug quoted_print "$@"
34 if [ -n "$VSTART_DEST" ]; then
36 SRC_PATH=`(cd $SRC_PATH; pwd)`
42 CEPH_CONF_PATH=$VSTART_DEST
43 CEPH_DEV_DIR=$VSTART_DEST/dev
44 CEPH_OUT_DIR=$VSTART_DEST/out
45 CEPH_ASOK_DIR=$VSTART_DEST/out
48 get_cmake_variable() {
50 grep "${variable}:" CMakeCache.txt | cut -d "=" -f 2
53 # for running out of the CMake build directory
54 if [ -e CMakeCache.txt ]; then
55 # Out of tree build, learn source location from CMakeCache.txt
56 CEPH_ROOT=$(get_cmake_variable ceph_SOURCE_DIR)
58 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
61 # use CEPH_BUILD_ROOT to vstart from a 'make install'
62 if [ -n "$CEPH_BUILD_ROOT" ]; then
63 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
64 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
65 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
66 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
67 # make install should install python extensions into PYTHONPATH
68 elif [ -n "$CEPH_ROOT" ]; then
69 [ -z "$CEPHFS_SHELL" ] && CEPHFS_SHELL=$CEPH_ROOT/src/tools/cephfs/cephfs-shell
70 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
71 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
72 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
73 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
74 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
75 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
76 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
77 [ -z "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON=$CEPH_ROOT/src/python-common
80 if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
84 [ -z "$PYBIND" ] && PYBIND=./pybind
86 [ -n "$CEPH_PYTHON_COMMON" ] && CEPH_PYTHON_COMMON="$CEPH_PYTHON_COMMON:"
87 CYTHON_PYTHONPATH="$CEPH_LIB/cython_modules/lib.3"
88 export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON$PYTHONPATH
90 export LD_LIBRARY_PATH=$CEPH_LIB:$LD_LIBRARY_PATH
91 export DYLD_LIBRARY_PATH=$CEPH_LIB:$DYLD_LIBRARY_PATH
92 # Suppress logging for regular use that indicated that we are using a
93 # development version. vstart.sh is only used during testing and
97 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
98 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
99 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
100 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
101 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
102 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
103 [ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM="$NFS"
105 # if none of the CEPH_NUM_* number is specified, kill the existing
107 if [ -z "$CEPH_NUM_MON" -a \
108 -z "$CEPH_NUM_OSD" -a \
109 -z "$CEPH_NUM_MDS" -a \
110 -z "$CEPH_NUM_MGR" -a \
111 -z "$GANESHA_DAEMON_NUM" ]; then
117 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
118 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
119 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
120 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
121 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
122 [ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
123 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
124 [ -z "$GANESHA_DAEMON_NUM" ] && GANESHA_DAEMON_NUM=0
126 [ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
127 [ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
128 [ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
129 [ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
130 [ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
132 if [ $CEPH_NUM_OSD -gt 3 ]; then
133 OSD_POOL_DEFAULT_SIZE=3
135 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
152 cephx=1 #turn cephx on by default
155 if [ `uname` = FreeBSD ]; then
156 objectstore="filestore"
158 objectstore="bluestore"
163 lockdep=${LOCKDEP:-1}
164 spdk_enabled=0 #disable SPDK by default
167 with_mgr_dashboard=true
168 if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
169 [[ "$(get_cmake_variable WITH_RBD)" != "ON" ]]; then
170 debug echo "ceph-mgr dashboard not built - disabling."
171 with_mgr_dashboard=false
178 VSTART_SEC="client.vstart.sh"
184 conf_fn="$CEPH_CONF_PATH/ceph.conf"
185 keyring_fn="$CEPH_CONF_PATH/keyring"
186 osdmap_fn="/tmp/ceph_osdmap.$$"
187 monmap_fn="/tmp/ceph_monmap.$$"
192 usage="usage: $0 [option]... \nex: MON=3 OSD=1 MDS=1 MGR=1 RGW=1 NFS=1 $0 -n -d\n"
193 usage=$usage"options:\n"
194 usage=$usage"\t-d, --debug\n"
195 usage=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
196 usage=$usage"\t-l, --localhost: use localhost instead of hostname\n"
197 usage=$usage"\t-i <ip>: bind to specific ip\n"
198 usage=$usage"\t-n, --new\n"
199 usage=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
200 usage=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
201 usage=$usage"\t--redirect-output: only useful with nodaemon, directs output to log file\n"
202 usage=$usage"\t--smallmds: limit mds cache memory limit\n"
203 usage=$usage"\t-m ip:port\t\tspecify monitor address\n"
204 usage=$usage"\t-k keep old configuration files (default)\n"
205 usage=$usage"\t-x enable cephx (on by default)\n"
206 usage=$usage"\t-X disable cephx\n"
207 usage=$usage"\t-g --gssapi enable Kerberos/GSSApi authentication\n"
208 usage=$usage"\t-G disable Kerberos/GSSApi authentication\n"
209 usage=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
210 usage=$usage"\t-e : create an erasure pool\n";
211 usage=$usage"\t-o config\t\t add extra config parameters to all sections\n"
212 usage=$usage"\t--rgw_port specify ceph rgw http listen port\n"
213 usage=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
214 usage=$usage"\t--rgw_compression specify the rgw compression plugin\n"
215 usage=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend (default)\n"
216 usage=$usage"\t-f, --filestore use filestore as the osd objectstore backend\n"
217 usage=$usage"\t-K, --kstore use kstore as the osd objectstore backend\n"
218 usage=$usage"\t--memstore use memstore as the osd objectstore backend\n"
219 usage=$usage"\t--cache <pool>: enable cache tiering on pool\n"
220 usage=$usage"\t--short: short object names only; necessary for ext4 dev\n"
221 usage=$usage"\t--nolockdep disable lockdep\n"
222 usage=$usage"\t--multimds <count> allow multimds with maximum active count\n"
223 usage=$usage"\t--without-dashboard: do not run using mgr dashboard\n"
224 usage=$usage"\t--bluestore-spdk: enable SPDK and with a comma-delimited list of PCI-IDs of NVME device (e.g, 0000:81:00.0)\n"
225 usage=$usage"\t--msgr1: use msgr1 only\n"
226 usage=$usage"\t--msgr2: use msgr2 only\n"
227 usage=$usage"\t--msgr21: use msgr2 and msgr1\n"
228 usage=$usage"\t--crimson: use crimson-osd instead of ceph-osd\n"
229 usage=$usage"\t--osd-args: specify any extra osd specific options\n"
230 usage=$usage"\t--bluestore-devs: comma-separated list of blockdevs to use for bluestore\n"
231 usage=$usage"\t--bluestore-zoned: blockdevs listed by --bluestore-devs are zoned devices (HM-SMR HDD or ZNS SSD)\n"
232 usage=$usage"\t--inc-osd: append some more osds into existing vcluster\n"
233 usage=$usage"\t--cephadm: enable cephadm orchestrator with ~/.ssh/id_rsa[.pub]\n"
234 usage=$usage"\t--no-parallel: dont start all OSDs in parallel\n"
241 while [ $# -ge 1 ]; do
253 [ -z "$2" ] && usage_exit
267 if [ "$inc_osd_num" == "" ]; then
299 [ -z "$2" ] && usage_exit
308 [ -z "$2" ] && usage_exit
313 [ -z "$2" ] && usage_exit
318 [ -z "$2" ] && usage_exit
323 [ -z "$2" ] && usage_exit
328 [ -z "$2" ] && usage_exit
362 [ -z "$2" ] && usage_exit
367 cephx=1 # this is on be default, flag exists for historical consistency
381 if [ ! -r $conf_fn ]; then
382 echo "cannot use old configuration: $conf_fn not readable." >&2
388 objectstore="memstore"
391 objectstore="bluestore"
394 objectstore="filestore"
400 hitset="$hitset $2 $3"
405 extra_conf="$extra_conf $2
410 if [ -z "$cache" ]; then
425 with_mgr_dashboard=false
428 [ -z "$2" ] && usage_exit
429 IFS=',' read -r -a bluestore_spdk_dev <<< "$2"
434 IFS=',' read -r -a bluestore_dev <<< "$2"
435 for dev in "${bluestore_dev[@]}"; do
436 if [ ! -b $dev -o ! -w $dev ]; then
437 echo "All --bluestore-devs must refer to writable block devices"
452 if [ $kill_all -eq 1 ]; then
453 $SUDO $INIT_CEPH stop
456 if [ "$new" -eq 0 ]; then
457 if [ -z "$CEPH_ASOK_DIR" ]; then
458 CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
460 mkdir -p $CEPH_ASOK_DIR
461 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mon 2>/dev/null` && \
463 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_osd 2>/dev/null` && \
465 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mds 2>/dev/null` && \
467 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_mgr 2>/dev/null` && \
469 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_rgw 2>/dev/null` && \
471 NFS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC --lookup num_ganesha 2>/dev/null` && \
472 GANESHA_DAEMON_NUM="$NFS"
475 if [ -e "$conf_fn" ]; then
476 asok_dir=`dirname $($CEPH_BIN/ceph-conf -c $conf_fn --show-config-value admin_socket)`
478 if [ $asok_dir != /var/run/ceph ]; then
479 [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
482 if [ -z "$CEPH_ASOK_DIR" ]; then
483 CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
494 eval "valg=\$valgrind_$type"
495 [ -z "$valg" ] && valg="$valgrind"
497 if [ -n "$valg" ]; then
498 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
501 if [ "$nodaemon" -eq 0 ]; then
503 elif [ "$redirect" -eq 0 ]; then
504 prunb ${CEPH_ROOT}/src/ceph-run "$@" -f
506 ( prunb ${CEPH_ROOT}/src/ceph-run "$@" -f ) >$CEPH_OUT_DIR/$type.$num.stdout 2>&1
512 if [ "$new" -eq 1 -o "$overwrite_conf" -eq 1 ]; then
520 if [ $CEPH_NUM_RGW -eq 0 ]; then
524 # setup each rgw on a sequential port, starting at $CEPH_RGW_PORT.
525 # individual rgw's ids will be their ports.
526 current_port=$CEPH_RGW_PORT
527 for n in $(seq 1 $CEPH_NUM_RGW); do
529 [client.rgw.${current_port}]
530 rgw frontends = $rgw_frontend port=${current_port}
531 admin socket = ${CEPH_OUT_DIR}/radosgw.${current_port}.asok
533 current_port=$((current_port + 1))
540 log file = $CEPH_OUT_DIR/\$name.log
541 admin socket = $CEPH_ASOK_DIR/\$name.asok
543 pid file = $CEPH_OUT_DIR/\$name.pid
544 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
547 local mgr_modules="restful iostat"
548 if $with_mgr_dashboard; then
549 mgr_modules="dashboard $mgr_modules"
553 if [ $msgr -eq 21 ]; then
559 if [ $msgr -eq 2 ]; then
562 ms bind msgr1 = false
565 if [ $msgr -eq 1 ]; then
567 ms bind msgr2 = false
573 ; generated by vstart.sh on `date`
575 num mon = $CEPH_NUM_MON
576 num osd = $CEPH_NUM_OSD
577 num mds = $CEPH_NUM_MDS
578 num mgr = $CEPH_NUM_MGR
579 num rgw = $CEPH_NUM_RGW
580 num ganesha = $GANESHA_DAEMON_NUM
584 osd failsafe full ratio = .99
585 mon osd full ratio = .99
586 mon osd nearfull ratio = .99
587 mon osd backfillfull ratio = .99
588 mon_max_pg_per_osd = ${MON_MAX_PG_PER_OSD:-1000}
589 erasure code dir = $EC_PATH
590 plugin dir = $CEPH_LIB
591 filestore fd cache size = 32
592 run dir = $CEPH_OUT_DIR
593 crash dir = $CEPH_OUT_DIR
594 enable experimental unrecoverable data corrupting features = *
595 osd_crush_chooseleaf_type = 0
596 debug asok assert abort = true
600 if [ "$lockdep" -eq 1 ] ; then
605 if [ "$cephx" -eq 1 ] ; then
607 auth cluster required = cephx
608 auth service required = cephx
609 auth client required = cephx
611 elif [ "$gssapi_authx" -eq 1 ] ; then
613 auth cluster required = gss
614 auth service required = gss
615 auth client required = gss
616 gss ktab client file = $CEPH_DEV_DIR/gss_\$name.keytab
620 auth cluster required = none
621 auth service required = none
622 auth client required = none
625 if [ "$short" -eq 1 ]; then
626 COSDSHORT=" osd max object name len = 460
627 osd max object namespace len = 64"
629 if [ "$objectstore" == "bluestore" ]; then
630 if [ "$spdk_enabled" -eq 1 ]; then
631 BLUESTORE_OPTS=" bluestore_block_db_path = \"\"
632 bluestore_block_db_size = 0
633 bluestore_block_db_create = false
634 bluestore_block_wal_path = \"\"
635 bluestore_block_wal_size = 0
636 bluestore_block_wal_create = false
637 bluestore_spdk_mem = 2048"
639 BLUESTORE_OPTS=" bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
640 bluestore block db size = 1073741824
641 bluestore block db create = true
642 bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
643 bluestore block wal size = 1048576000
644 bluestore block wal create = true"
646 if [ "$zoned_enabled" -eq 1 ]; then
647 BLUESTORE_OPTS="${BLUESTORE_OPTS}
648 bluestore min alloc size = 65536
649 bluestore prefer deferred size = 0
650 bluestore prefer deferred size hdd = 0
651 bluestore prefer deferred size ssd = 0
652 bluestore allocator = zoned"
657 keyring = $keyring_fn
658 log file = $CEPH_OUT_DIR/\$name.\$pid.log
659 admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
662 rgw crypt s3 kms backend = testing
663 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
664 rgw crypt require ssl = false
665 ; uncomment the following to set LC days as the value in seconds;
666 ; needed for passing lc time based s3-tests (can be verbose)
667 ; rgw lc debug interval = 10
675 mds data = $CEPH_DEV_DIR/mds.\$id
676 mds root ino uid = `id -u`
677 mds root ino gid = `id -g`
680 mgr data = $CEPH_DEV_DIR/mgr.\$id
681 mgr module path = $MGR_PYTHON_PATH
682 cephadm path = $CEPH_ROOT/src/cephadm/cephadm
687 osd_check_max_object_name_len_on_startup = false
688 osd data = $CEPH_DEV_DIR/osd\$id
689 osd journal = $CEPH_DEV_DIR/osd\$id/journal
690 osd journal size = 100
692 osd class dir = $OBJCLASS_PATH
693 osd class load list = *
694 osd class default list = *
695 osd fast shutdown = false
697 filestore wbthrottle xfs ios start flusher = 10
698 filestore wbthrottle xfs ios hard limit = 20
699 filestore wbthrottle xfs inodes hard limit = 30
700 filestore wbthrottle btrfs ios start flusher = 10
701 filestore wbthrottle btrfs ios hard limit = 20
702 filestore wbthrottle btrfs inodes hard limit = 30
703 bluestore fsck on mount = true
704 bluestore block create = true
708 kstore fsck on mount = true
709 osd objectstore = $objectstore
713 mgr initial modules = $mgr_modules
717 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
718 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
722 write_logrotate_conf() {
723 out_dir=$(pwd)"/out/*.log"
736 # NOTE: assuring that the absence of one of the following processes
737 # won't abort the logrotate command.
738 killall -u $USER -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror || echo ""
745 logrotate_conf_path=$(pwd)"/logrotate.conf"
746 logrotate_state_path=$(pwd)"/logrotate.state"
748 if ! test -a $logrotate_conf_path; then
749 if test -a $logrotate_state_path; then
750 rm -f $logrotate_state_path
752 write_logrotate_conf > $logrotate_conf_path
759 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
761 [ $count -eq $CEPH_NUM_MON ] && break;
762 count=$(($count + 1))
763 if [ -z "$MONS" ]; then
770 if [ "$new" -eq 1 ]; then
771 if [ `echo $IP | grep '^127\\.'` ]; then
773 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
774 echo " connect. either adjust /etc/hosts, or edit this script to use your"
775 echo " machine's real IP."
779 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
780 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin \
781 --cap mon 'allow *' \
782 --cap osd 'allow *' \
783 --cap mds 'allow *' \
784 --cap mgr 'allow *' \
787 # build a fresh fs monmap, mon fs
793 if [ $msgr -eq 1 ]; then
794 A="v1:$IP:$(($CEPH_PORT+$count+1))"
796 if [ $msgr -eq 2 ]; then
797 A="v2:$IP:$(($CEPH_PORT+$count+1))"
799 if [ $msgr -eq 21 ]; then
800 A="[v2:$IP:$(($CEPH_PORT+$count)),v1:$IP:$(($CEPH_PORT+$count+1))]"
802 params+=("--addv" "$f" "$A")
803 mon_host="$mon_host $A"
807 mon data = $CEPH_DEV_DIR/mon.$f
809 count=$(($count + 2))
815 prun "$CEPH_BIN/monmaptool" --create --clobber "${params[@]}" --print "$monmap_fn"
819 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
820 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
821 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
824 prun rm -- "$monmap_fn"
830 run 'mon' $f $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
835 if [ $inc_osd_num -gt 0 ]; then
836 old_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
838 end=$(($start-1+$inc_osd_num))
839 overwrite_conf=1 # fake wconf
842 end=$(($CEPH_NUM_OSD-1))
845 for osd in `seq $start $end`
847 local extra_seastar_args
848 if [ "$ceph_osd" == "crimson-osd" ]; then
849 # designate a single CPU node $osd for osd.$osd
850 extra_seastar_args="--smp 1 --cpuset $osd"
851 if [ "$debug" -ne 0 ]; then
852 extra_seastar_args+=" --debug"
855 if [ "$new" -eq 1 -o $inc_osd_num -gt 0 ]; then
860 if [ "$spdk_enabled" -eq 1 ]; then
862 bluestore_block_path = spdk:${bluestore_spdk_dev[$osd]}
866 rm -rf $CEPH_DEV_DIR/osd$osd || true
867 if command -v btrfs > /dev/null; then
868 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
870 if [ -n "$filestore_path" ]; then
871 ln -s $filestore_path $CEPH_DEV_DIR/osd$osd
872 elif [ -n "$kstore_path" ]; then
873 ln -s $kstore_path $CEPH_DEV_DIR/osd$osd
875 mkdir -p $CEPH_DEV_DIR/osd$osd
876 if [ -n "${bluestore_dev[$osd]}" ]; then
877 dd if=/dev/zero of=${bluestore_dev[$osd]} bs=1M count=1
878 ln -s ${bluestore_dev[$osd]} $CEPH_DEV_DIR/osd$osd/block
880 bluestore fsck on mount = false
886 echo "add osd$osd $uuid"
887 OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
888 echo "{\"cephx_secret\": \"$OSD_SECRET\"}" > $CEPH_DEV_DIR/osd$osd/new.json
889 ceph_adm osd new $uuid -i $CEPH_DEV_DIR/osd$osd/new.json
890 rm $CEPH_DEV_DIR/osd$osd/new.json
891 $SUDO $CEPH_BIN/$ceph_osd $extra_osd_args -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid $extra_seastar_args
893 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
901 run 'osd' $osd $SUDO $CEPH_BIN/$ceph_osd \
902 $extra_seastar_args $extra_osd_args \
903 -i $osd $ARGS $COSD_ARGS &
912 for p in $osds_wait; do
915 debug echo OSDs started
917 if [ $inc_osd_num -gt 0 ]; then
919 new_maxosd=$($CEPH_BIN/ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//')
920 sed -i "s/num osd = .*/num osd = $new_maxosd/g" $conf_fn
926 local ssl=${DASHBOARD_SSL:-1}
927 # avoid monitors on nearby ports (which test/*.sh use extensively)
928 MGR_PORT=$(($CEPH_PORT + 1000))
930 for name in x y z a b c d e f g h i j k l m n o p
932 [ $mgr -eq $CEPH_NUM_MGR ] && break
934 if [ "$new" -eq 1 ]; then
935 mkdir -p $CEPH_DEV_DIR/mgr.$name
936 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
937 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
938 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
945 if $with_mgr_dashboard ; then
946 local port_option="ssl_server_port"
947 local http_proto="https"
948 if [ "$ssl" == "0" ]; then
949 port_option="server_port"
951 ceph_adm config set mgr mgr/dashboard/ssl false --force
953 ceph_adm config set mgr mgr/dashboard/$name/$port_option $MGR_PORT --force
954 if [ $mgr -eq 1 ]; then
955 DASH_URLS="$http_proto://$IP:$MGR_PORT"
957 DASH_URLS+=", $http_proto://$IP:$MGR_PORT"
960 MGR_PORT=$(($MGR_PORT + 1000))
961 ceph_adm config set mgr mgr/prometheus/$name/server_port $PROMETHEUS_PORT --force
962 PROMETHEUS_PORT=$(($PROMETHEUS_PORT + 1000))
964 ceph_adm config set mgr mgr/restful/$name/server_port $MGR_PORT --force
965 if [ $mgr -eq 1 ]; then
966 RESTFUL_URLS="https://$IP:$MGR_PORT"
968 RESTFUL_URLS+=", https://$IP:$MGR_PORT"
970 MGR_PORT=$(($MGR_PORT + 1000))
973 debug echo "Starting mgr.${name}"
974 run 'mgr' $name $CEPH_BIN/ceph-mgr -i $name $ARGS
977 if [ "$new" -eq 1 ]; then
978 # setting login credentials for dashboard
979 if $with_mgr_dashboard; then
980 while ! ceph_adm -h | grep -c -q ^dashboard ; do
981 debug echo 'waiting for mgr dashboard module to start'
984 ceph_adm dashboard ac-user-create --force-password admin admin administrator
985 if [ "$ssl" != "0" ]; then
986 if ! ceph_adm dashboard create-self-signed-cert; then
987 debug echo dashboard module not working correctly!
992 while ! ceph_adm -h | grep -c -q ^restful ; do
993 debug echo 'waiting for mgr restful module to start'
996 if ceph_adm restful create-self-signed-cert; then
998 ceph_adm restful create-key admin -o $SF
999 RESTFUL_SECRET=`cat $SF`
1002 debug echo MGR Restful is not working, perhaps the package is not installed?
1006 if [ "$cephadm" -eq 1 ]; then
1007 debug echo Enabling cephadm orchestrator
1008 if [ "$new" -eq 1 ]; then
1010 https://registry.hub.docker.com/v2/repositories/ceph/daemon-base/tags/latest-master-devel \
1011 | jq -r '.images[].digest')
1012 ceph_adm config set global container_image "docker.io/ceph/daemon-base@$digest"
1014 ceph_adm config-key set mgr/cephadm/ssh_identity_key -i ~/.ssh/id_rsa
1015 ceph_adm config-key set mgr/cephadm/ssh_identity_pub -i ~/.ssh/id_rsa.pub
1016 ceph_adm mgr module enable cephadm
1017 ceph_adm orch set backend cephadm
1018 ceph_adm orch host add "$(hostname)"
1019 ceph_adm orch apply crash '*'
1020 ceph_adm config set mgr mgr/cephadm/allow_ptrace true
1026 for name in a b c d e f g h i j k l m n o p
1028 [ $mds -eq $CEPH_NUM_MDS ] && break
1031 if [ "$new" -eq 1 ]; then
1032 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
1033 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
1038 if [ "$standby" -eq 1 ]; then
1039 mkdir -p $CEPH_DEV_DIR/mds.${name}s
1041 mds standby for rank = $mds
1043 mds standby replay = true
1044 mds standby for name = ${name}
1047 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
1048 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow rw tag cephfs *=*' mds 'allow' mgr 'allow profile mds'
1049 if [ "$standby" -eq 1 ]; then
1050 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
1051 "$CEPH_DEV_DIR/mds.${name}s/keyring"
1052 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
1053 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
1057 run 'mds' $name $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
1058 if [ "$standby" -eq 1 ]; then
1059 run 'mds' $name $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
1062 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
1063 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
1064 #ceph_adm mds set max_mds 2
1067 if [ $new -eq 1 ]; then
1068 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
1069 sleep 5 # time for MDS to come up as standby to avoid health warnings on fs creation
1070 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
1071 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
1074 # wait for volume module to load
1075 while ! ceph_adm fs volume ls ; do sleep 1 ; done
1077 for name in a b c d e f g h i j k l m n o p
1079 ceph_adm fs volume create ${name}
1080 ceph_adm fs authorize ${name} "client.fs_${name}" / rwp >> "$keyring_fn"
1082 [ $fs -eq $CEPH_NUM_FS ] && break
1089 # Ganesha Daemons requires nfs-ganesha nfs-ganesha-ceph nfs-ganesha-rados-grace
1090 # nfs-ganesha-rados-urls (version 3.3 and above) packages installed. On
1091 # Fedora>=31 these packages can be installed directly with 'dnf'. For CentOS>=8
1092 # the packages are available at
1093 # https://wiki.centos.org/SpecialInterestGroup/Storage
1094 # Similarly for Ubuntu>=16.04 follow the instructions on
1095 # https://launchpad.net/~nfs-ganesha
1099 GANESHA_PORT=$(($CEPH_PORT + 4000))
1101 test_user="ganesha-$cluster_id"
1102 pool_name="nfs-ganesha"
1103 namespace=$cluster_id
1104 url="rados://$pool_name/$namespace/conf-nfs.$test_user"
1106 prun ceph_adm auth get-or-create client.$test_user \
1108 osd "allow rw pool=$pool_name namespace=$namespace, allow rw tag cephfs data=a" \
1109 mds "allow rw path=/" \
1112 ceph_adm mgr module enable test_orchestrator
1113 ceph_adm orch set backend test_orchestrator
1114 ceph_adm test_orchestrator load_data -i $CEPH_ROOT/src/pybind/mgr/test_orchestrator/dummy_data.json
1115 prun ceph_adm nfs cluster create cephfs $cluster_id
1116 prun ceph_adm nfs export create cephfs "a" $cluster_id "/cephfs"
1118 for name in a b c d e f g h i j k l m n o p
1120 [ $ganesha -eq $GANESHA_DAEMON_NUM ] && break
1122 port=$(($GANESHA_PORT + ganesha))
1123 ganesha=$(($ganesha + 1))
1124 ganesha_dir="$CEPH_DEV_DIR/ganesha.$name"
1125 prun rm -rf $ganesha_dir
1126 prun mkdir -p $ganesha_dir
1128 echo "NFS_CORE_PARAM {
1130 Enable_RQUOTA = false;
1140 RecoveryBackend = rados_cluster;
1141 Minor_Versions = 1, 2;
1148 namespace = $namespace;
1149 UserId = $test_user;
1154 Userid = $test_user;
1155 watch_url = \"$url\";
1156 }" > "$ganesha_dir/ganesha-$name.conf"
1162 ganesha data = $ganesha_dir
1163 pid file = $ganesha_dir/ganesha-$name.pid
1166 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace add $name
1167 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
1169 prun env CEPH_CONF="${conf_fn}" ganesha.nfsd -L "$CEPH_OUT_DIR/ganesha-$name.log" -f "$ganesha_dir/ganesha-$name.conf" -p "$CEPH_OUT_DIR/ganesha-$name.pid" -N NIV_DEBUG
1171 # Wait few seconds for grace period to be removed
1174 prun env CEPH_CONF="${conf_fn}" ganesha-rados-grace --userid $test_user -p $pool_name -n $namespace
1176 if $with_mgr_dashboard; then
1177 $CEPH_BIN/rados -p $pool_name put "conf-$name" "$ganesha_dir/ganesha-$name.conf"
1180 echo "$test_user ganesha daemon $name started on port: $port"
1183 if $with_mgr_dashboard; then
1184 ceph_adm dashboard set-ganesha-clusters-rados-pool-namespace $pool_name
1188 if [ "$debug" -eq 0 ]; then
1193 debug echo "** going verbose **"
1202 if [ -n "$MON_ADDR" ]; then
1203 CMON_ARGS=" -m "$MON_ADDR
1204 COSD_ARGS=" -m "$MON_ADDR
1205 CMDS_ARGS=" -m "$MON_ADDR
1208 if [ -z "$CEPH_PORT" ]; then
1211 CEPH_PORT="$(echo $(( RANDOM % 1000 + 40000 )))"
1212 ss -a -n | egrep "\<LISTEN\>.+:${CEPH_PORT}\s+" 1>/dev/null 2>&1 || break
1216 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
1219 [ -d $CEPH_DEV_DIR/osd0/. ] && [ -e $CEPH_DEV_DIR/sudo ] && SUDO="sudo"
1221 if [ $inc_osd_num -eq 0 ]; then
1222 prun $SUDO rm -f core*
1225 [ -d $CEPH_ASOK_DIR ] || mkdir -p $CEPH_ASOK_DIR
1226 [ -d $CEPH_OUT_DIR ] || mkdir -p $CEPH_OUT_DIR
1227 [ -d $CEPH_DEV_DIR ] || mkdir -p $CEPH_DEV_DIR
1228 if [ $inc_osd_num -eq 0 ]; then
1229 $SUDO find "$CEPH_OUT_DIR" -type f -delete
1231 [ -d gmon ] && $SUDO rm -rf gmon/*
1233 [ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && [ -e $keyring_fn ] && rm $keyring_fn
1236 # figure machine's ip
1237 HOSTNAME=`hostname -s`
1238 if [ -n "$ip" ]; then
1241 echo hostname $HOSTNAME
1242 if [ -x "$(which ip 2>/dev/null)" ]; then
1247 # filter out IPv4 and localhost addresses
1248 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
1249 # if nothing left, try using localhost address, it might work
1250 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
1253 echo "port $CEPH_PORT"
1256 [ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
1259 if [ "$cephx" -eq 1 ]; then
1260 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
1262 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
1266 if [ $inc_osd_num -gt 0 ]; then
1271 if [ "$new" -eq 1 ]; then
1275 if [ $CEPH_NUM_MON -gt 0 ]; then
1278 debug echo Populating config ...
1279 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1281 osd_pool_default_size = $OSD_POOL_DEFAULT_SIZE
1282 osd_pool_default_min_size = 1
1285 mon_osd_reporter_subtree_level = osd
1286 mon_data_avail_warn = 2
1287 mon_data_avail_crit = 1
1288 mon_allow_pool_delete = true
1289 mon_allow_pool_size_one = true
1292 osd_scrub_load_threshold = 2000
1293 osd_debug_op_order = true
1294 osd_debug_misdirected_ops = true
1295 osd_copyfrom_max_chunk = 524288
1298 mds_debug_frag = true
1299 mds_debug_auth_pins = true
1300 mds_debug_subtrees = true
1303 mgr/telemetry/nag = false
1304 mgr/telemetry/enable = false
1308 if [ "$debug" -ne 0 ]; then
1309 debug echo Setting debug configs ...
1310 cat <<EOF | $CEPH_BIN/ceph -c $conf_fn config assimilate-conf -i -
1324 debug_filestore = 20
1325 debug_bluestore = 20
1337 mds_debug_scatterstat = true
1338 mds_verify_scatter = true
1341 if [ "$cephadm" -gt 0 ]; then
1342 debug echo Setting mon public_network ...
1343 public_network=$(ip route list | grep -w "$IP" | awk '{print $1}')
1344 ceph_adm config set mon public_network $public_network
1348 if [ $CEPH_NUM_MGR -gt 0 ]; then
1353 if [ $CEPH_NUM_OSD -gt 0 ]; then
1358 if [ "$smallmds" -eq 1 ]; then
1361 mds log max segments = 2
1362 # Default 'mds cache memory limit' is 1GiB, and here we set it to 100MiB.
1363 mds cache memory limit = 100M
1367 if [ $CEPH_NUM_MDS -gt 0 ]; then
1369 # key with access to all FS
1370 ceph_adm fs authorize \* "client.fs" / rwp >> "$keyring_fn"
1373 # Don't set max_mds until all the daemons are started, otherwise
1374 # the intended standbys might end up in active roles.
1375 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1376 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
1379 for name in a b c d e f g h i j k l m n o p
1381 [ $fs -eq $CEPH_NUM_FS ] && break
1383 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
1384 ceph_adm fs set "${name}" max_mds "$CEPH_MAX_MDS"
1390 if [ "$ec" -eq 1 ]; then
1392 osd erasure-code-profile set ec-profile m=2 k=2
1393 osd pool create ec erasure ec-profile
1398 if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
1399 pseudo_path="/cephfs"
1400 if [ "$cephadm" -gt 0 ]; then
1402 prun ceph_adm nfs cluster create cephfs $cluster_id
1403 prun ceph_adm nfs export create cephfs "a" $cluster_id $pseudo_path
1407 port="<ganesha-port-num>"
1409 echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
1413 while [ -n "$*" ]; do
1416 debug echo "creating cache for pool $p ..."
1418 osd pool create ${p}-cache
1419 osd tier add $p ${p}-cache
1420 osd tier cache-mode ${p}-cache writeback
1421 osd tier set-overlay $p ${p}-cache
1428 while [ -n "$*" ]; do
1433 debug echo "setting hit_set on pool $pool type $type ..."
1435 osd pool set $pool hit_set_type $type
1436 osd pool set $pool hit_set_count 8
1437 osd pool set $pool hit_set_period 30
1443 do_rgw_create_users()
1446 local akey='0555b35654ad1656d804'
1447 local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
1448 debug echo "setting up user testid"
1449 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $akey --secret $skey --display-name 'M. Tester' --email tester@ceph.com -c $conf_fn > /dev/null
1451 # Create S3-test users
1452 # See: https://github.com/ceph/s3-tests
1453 debug echo "setting up s3-test users"
1454 $CEPH_BIN/radosgw-admin user create \
1455 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1456 --access-key ABCDEFGHIJKLMNOPQRST \
1457 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
1458 --display-name youruseridhere \
1459 --email s3@example.com -c $conf_fn > /dev/null
1460 $CEPH_BIN/radosgw-admin user create \
1461 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
1462 --access-key NOPQRSTUVWXYZABCDEFG \
1463 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
1464 --display-name john.doe \
1465 --email john.doe@example.com -c $conf_fn > /dev/null
1466 $CEPH_BIN/radosgw-admin user create \
1468 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
1469 --access-key HIJKLMNOPQRSTUVWXYZA \
1470 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
1471 --display-name tenanteduser \
1472 --email tenanteduser@example.com -c $conf_fn > /dev/null
1475 debug echo "setting up user tester"
1476 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
1479 echo "S3 User Info:"
1480 echo " access key: $akey"
1481 echo " secret key: $skey"
1483 echo "Swift User Info:"
1484 echo " account : test"
1485 echo " user : tester"
1486 echo " password : testing"
1492 if [ "$new" -eq 1 ]; then
1494 if [ -n "$rgw_compression" ]; then
1495 debug echo "setting compression type=$rgw_compression"
1496 $CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
1501 if [ "$debug" -ne 0 ]; then
1502 RGWDEBUG="--debug-rgw=20 --debug-ms=1"
1505 local CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT}"
1506 local CEPH_RGW_HTTPS="${CEPH_RGW_PORT: -1}"
1507 if [[ "${CEPH_RGW_HTTPS}" = "s" ]]; then
1508 CEPH_RGW_PORT_NUM="${CEPH_RGW_PORT::-1}"
1513 [ $CEPH_RGW_PORT_NUM -lt 1024 ] && RGWSUDO=sudo
1515 current_port=$CEPH_RGW_PORT
1516 for n in $(seq 1 $CEPH_NUM_RGW); do
1517 rgw_name="client.rgw.${current_port}"
1519 ceph_adm auth get-or-create $rgw_name \
1525 debug echo start rgw on http${CEPH_RGW_HTTPS}://localhost:${current_port}
1526 run 'rgw' $current_port $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn \
1527 --log-file=${CEPH_OUT_DIR}/radosgw.${current_port}.log \
1528 --admin-socket=${CEPH_OUT_DIR}/radosgw.${current_port}.asok \
1529 --pid-file=${CEPH_OUT_DIR}/radosgw.${current_port}.pid \
1532 "--rgw_frontends=${rgw_frontend} port=${current_port}${CEPH_RGW_HTTPS}"
1535 [ $i -eq $CEPH_NUM_RGW ] && break
1537 current_port=$((current_port+1))
1540 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1544 debug echo "vstart cluster complete. Use stop.sh to stop. See out/* (e.g. 'tail -f out/????') for debug output."
1547 if [ "$new" -eq 1 ]; then
1548 if $with_mgr_dashboard; then
1549 echo "dashboard urls: $DASH_URLS"
1550 echo " w/ user/pass: admin / admin"
1552 echo "restful urls: $RESTFUL_URLS"
1553 echo " w/ user/pass: admin / $RESTFUL_SECRET"
1557 # add header to the environment file
1560 echo "# source this file into your shell to set up the environment."
1561 echo "# For example:"
1562 echo "# $ . $CEPH_DIR/vstart_environment.sh"
1564 } > $CEPH_DIR/vstart_environment.sh
1566 echo "export PYTHONPATH=$PYBIND:$CYTHON_PYTHONPATH:$CEPH_PYTHON_COMMON\$PYTHONPATH"
1567 echo "export LD_LIBRARY_PATH=$CEPH_LIB:\$LD_LIBRARY_PATH"
1569 if [ "$CEPH_DIR" != "$PWD" ]; then
1570 echo "export CEPH_CONF=$conf_fn"
1571 echo "export CEPH_KEYRING=$keyring_fn"
1574 if [ -n "$CEPHFS_SHELL" ]; then
1575 echo "alias cephfs-shell=$CEPHFS_SHELL"
1577 } | tee -a $CEPH_DIR/vstart_environment.sh
1581 # always keep this section at the very bottom of this file
1582 STRAY_CONF_PATH="/etc/ceph/ceph.conf"
1583 if [ -f "$STRAY_CONF_PATH" -a -n "$conf_fn" -a ! "$conf_fn" -ef "$STRAY_CONF_PATH" ]; then
1587 echo " Please remove stray $STRAY_CONF_PATH if not needed."
1588 echo " Your conf files $conf_fn and $STRAY_CONF_PATH may not be in sync"
1589 echo " and may lead to undesired results."
1592 echo " Remember to restart cluster after removing $STRAY_CONF_PATH"