+++ /dev/null
-#!/bin/bash
-
-OSDS=1
-HOSTS=0
-SKIP_LOOP=0
-SKIP_BOOTSTRAP=0
-
-function print_usage() {
- echo "./bootstrap.sh [OPTIONS]"
- echo "options:"
- echo " --hosts n: number of hosts to add"
- echo " --osds n: number of osds to add"
- echo " --update-ceph-image: create/update ceph image"
- echo " --update-box-image: create/update cephadm box image"
- echo " --skip-create-loop: skip creating loopback device"
- echo " --skip-bootstrap: skip deploying the containers"
- echo " -l | --list-hosts: list available cephad-box hosts/seed"
- echo " -h | --help: this help :)"
-}
-
-function docker-ips() {
- docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} %tab% {{.Name}} %tab% {{.Config.Hostname}}' $(docker ps -aq) | sed 's#%tab%#\t#g' | sed 's#/##g' | sort -t . -k 1,1n -k 2,2n -k 3,3n -k 4,4n
-}
-
-while [ $# -ge 1 ]; do
-case $1 in
- -h | --help)
- print_usage
- exit
- ;;
- -l | --list-hosts) # TODO remove when ceph-ci updated
- echo -e "IP\t\tName\t\t Hostname"
- docker-ips | grep box
- exit
- ;;
- --update-box-image)
- echo Updating box image
- docker build -t cephadm-box -f Dockerfile .
- ;;
- --update-ceph-image) # TODO remove when ceph-ci updated
- echo Updating ceph image
- source ./get_ceph_image.sh
- ;;
- --hosts)
- HOSTS="$2"
- echo "number of hosts: $HOSTS"
- shift
- ;;
- --osds)
- OSDS="$2"
- echo "number of osds: $OSDS"
- shift
- ;;
- --skip-create-loop)
- echo Skiping loop creation
- SKIP_LOOP=1
- ;;
- --skip-bootstrap)
- echo Skiping bootstrap of containers
- SKIP_BOOTSTRAP=1
- ;;
-esac
-shift
-done
-
-# TODO: remove when ceph-ci image has required deps
-if [[ ! -a docker/ceph/image/quay.ceph.image.tar ]]
-then
- echo -e "\033[33mWARNING:\033[0m run ./get_ceph_image.sh to get an updated ceph-ci/ceph image with correct deps."
- exit
-fi
-
-if [[ $OSDS -eq 0 ]]
-then
- SKIP_LOOP=1
-fi
-
-if [[ $SKIP_LOOP -eq 0 ]]
-then
- source setup_loop.sh
- create_loops $OSDS
-fi
-
-
-if [[ $SKIP_BOOTSTRAP -eq 0 ]]
-then
- # loops should be created before starting docker-compose or else docker could
- # not find lvs
- docker-compose down
- docker-compose up --scale hosts=$HOSTS -d
- sleep 3
-
- IPS=$(docker-ips | grep "box_hosts" | awk '{ print $1 }')
- echo "IPS: "
- echo $IPS
-
- sudo sysctl net.ipv4.conf.all.forwarding=1
- sudo iptables -P FORWARD ACCEPT
-
- for ((i=1;i<=$HOSTS;i++))
- do
- docker-compose exec --index=$i hosts /cephadm/box/setup_ssh.sh run-sshd
- done
-
- docker-compose exec -e NUM_OSDS=${OSDS} seed /cephadm/box/start
-
- docker-compose exec -e HOST_IPS="${IPS}" seed /cephadm/box/setup_ssh.sh copy-cluster-ssh-key
-fi
ctx.image = _get_default_image(ctx)
return func(ctx)
+
return cast(FuncT, _default_image)
# type: (CephadmContext, str, str, Union[int, str], int, int, Optional[str], Optional[str]) -> None
data_dir = make_data_dir(ctx, fsid, daemon_type, daemon_id, uid=uid, gid=gid)
make_log_dir(ctx, fsid, uid=uid, gid=gid)
+
if config:
config_path = os.path.join(data_dir, 'config')
with open(config_path, 'w') as f:
raise RuntimeError('uid/gid not found')
-def validate_osd_data_dir(data_dir):
- # type: (str) -> None
- required_files = ['keyring', 'block', 'type', 'config']
- current_files = os.listdir(data_dir)
- error_msg = ''
- for file in required_files:
- if file not in current_files:
- error_msg += f'File {file} not found in {data_dir}\n'
- if error_msg:
- raise RuntimeError(error_msg)
-
-
-def configure_osd_data_dir(ctx, fsid, daemon_id, uid, gid):
- # type: (CephadmContext, str, Union[int, str], int, int) -> None
- daemon_type = 'osd'
- data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id)
-
- # Ensure user:group is the expected
- for f in os.listdir(data_dir):
- os.chown(os.path.join(data_dir, f), uid, gid)
-
- # Create minimal config
- touch(os.path.join(data_dir, 'config'), uid, gid)
- mounts = get_container_mounts(ctx, fsid, daemon_type, daemon_id, no_config=True)
- mounts[data_dir] = '/var/lib/ceph/osd/ceph-%s' % daemon_id
- mounts['/etc/ceph/ceph.conf'] = '/etc/ceph/ceph.conf:z'
- mounts['/etc/ceph/ceph.client.admin.keyring'] = '/etc/ceph/ceph.keyring:z'
-
- CephContainer(
- ctx,
- image=ctx.image,
- entrypoint='/usr/bin/ceph',
- args=[
- 'config', 'generate-minimal-conf',
- '-o', '/var/lib/ceph/osd/ceph-%s/config' % daemon_id
- ],
- privileged=True,
- volume_mounts=mounts
- ).run()
-
- # Create keyring and then import
- key = CephContainer(
- ctx,
- image=ctx.image,
- entrypoint='/usr/bin/ceph-authtool',
- args=['--gen-print-key'],
- ).run().strip()
-
- keyring = ('[%s.%s]\n'
- '\tkey = %s\n'
- '\tcaps osd = allow *\n'
- '\tcaps mon = allow *\n'
- % (daemon_type, daemon_id, key))
- with open(os.path.join(data_dir, 'keyring'), 'w+') as f:
- os.fchmod(f.fileno(), 0o600)
- os.fchown(f.fileno(), uid, gid)
- f.write(keyring)
- CephContainer(
- ctx,
- image=ctx.image,
- entrypoint='/usr/bin/ceph',
- args=[
- 'auth', 'import',
- '-i', '/var/lib/ceph/osd/ceph-%s/keyring' % daemon_id
- ],
- privileged=True,
- volume_mounts=mounts
- ).run()
-
- # Validate we have needed files
- validate_osd_data_dir(data_dir)
-
-
def deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid,
config=None, keyring=None,
osd_fsid=None,
uid, gid,
config, keyring)
- if daemon_type == 'osd':
- configure_osd_data_dir(ctx, fsid, daemon_id, uid, gid)
-
if not reconfig:
if daemon_type == CephadmAgent.daemon_type:
if ctx.config_json == '-':
) -> None:
# cmd
data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id)
-
- # if osd then try to read parameters if not provided
- if daemon_type == 'osd':
- osd_fsid_path = os.path.join(data_dir, 'fsid')
- if 'fsid' in os.listdir(data_dir) and not osd_fsid:
- with open(osd_fsid_path, 'r') as f:
- osd_fsid = f.read()
-
with open(data_dir + '/unit.run.new', 'w') as f, \
open(data_dir + '/unit.meta.new', 'w') as metaf:
f.write('set -e\n')
verbosity=CallVerbosity.DEBUG)
if enable:
call_throws(ctx, ['systemctl', 'enable', unit_name])
-
if start:
-
clean_cgroup(ctx, fsid, unit_name)
call_throws(ctx, ['systemctl', 'start', unit_name])
@default_image
-@infer_fsid
def command_deploy(ctx):
# type: (CephadmContext) -> None
- assert ctx.fsid
daemon_type, daemon_id = ctx.name.split('.', 1)
lock = FileLock(ctx, ctx.fsid)
mounts[pathify(ctx.config)] = '/etc/ceph/ceph.conf:z'
if ctx.keyring:
mounts[pathify(ctx.keyring)] = '/etc/ceph/ceph.keyring:z'
-
if ctx.mount:
for _mount in ctx.mount:
split_src_dst = _mount.split(':')
(uid, gid) = (0, 0) # ceph-volume runs as root
mounts = get_container_mounts(ctx, ctx.fsid, 'osd', None)
+
tmp_config = None
tmp_keyring = None
tmp_config = write_tmp(config, uid, gid)
mounts[tmp_config.name] = '/etc/ceph/ceph.conf:z'
- # Ceph-volume uses the bootstrap-osd key in order to do its operations.
- # This function retrieves the keyring so it can be provided.
-
- def get_bootstrap_osd_keyring() -> Optional[str]:
- if not ctx.keyring and os.path.exists(SHELL_DEFAULT_KEYRING):
- ctx.keyring = SHELL_DEFAULT_KEYRING
- (config, keyring) = get_config_and_keyring(ctx)
-
- mounts = get_container_mounts(ctx, ctx.fsid, 'osd', None,
- no_config=True if ctx.config else False)
- if ctx.config:
- mounts[pathify(ctx.config)] = '/etc/ceph/ceph.conf:z'
- if ctx.keyring:
- mounts[pathify(ctx.keyring)] = '/etc/ceph/ceph.keyring:z'
- c = CephContainer(
- ctx,
- image=ctx.image,
- entrypoint='/usr/bin/ceph',
- args='auth get client.bootstrap-osd'.split(),
- volume_mounts=mounts,
- )
- out, err, code = call_throws(ctx, c.run_cmd())
- if not code:
- return out
- else:
- return None
-
- if not keyring:
- keyring = get_bootstrap_osd_keyring()
-
if keyring:
# tmp keyring file
tmp_keyring = write_tmp(keyring, uid, gid)
mounts[tmp_keyring.name] = '/var/lib/ceph/bootstrap-osd/ceph.keyring:z'
- # If ceph-volume creates osd data directories which won't be persisted
- # so we use a tmp dir for that.
- # FIXME: probably we can use /var/lib/ceph/{fsid}?
- with tempfile.TemporaryDirectory() as tmp_osd_dir:
- # match /var/lib/ceph/osd/ dir permissions
- os.chown(tmp_osd_dir, uid, gid)
- os.chmod(tmp_osd_dir, 0o755)
- # store newly created osds here
- mounts[tmp_osd_dir] = '/var/lib/ceph/osd/:z'
-
- c = CephContainer(
- ctx,
- image=ctx.image,
- entrypoint='/usr/sbin/ceph-volume',
- envs=ctx.env,
- args=ctx.command,
- privileged=True,
- volume_mounts=mounts,
- )
+ c = CephContainer(
+ ctx,
+ image=ctx.image,
+ entrypoint='/usr/sbin/ceph-volume',
+ envs=ctx.env,
+ args=ctx.command,
+ privileged=True,
+ volume_mounts=mounts,
+ )
- out, err, code = call_throws(ctx, c.run_cmd())
- if not code:
- print(out)
- else:
- print(err)
- # If osds were created move osd's data directories
- for osd_folder_name in os.listdir(tmp_osd_dir):
- if 'ceph-' in osd_folder_name[:5]:
- osd_id = osd_folder_name[5:]
- osd_data_dir = os.path.join(tmp_osd_dir, osd_folder_name)
- copy_tree(ctx, [osd_data_dir], f'/var/lib/ceph/{ctx.fsid}/osd.{osd_id}', uid=uid, gid=gid)
+ out, err, code = call_throws(ctx, c.run_cmd())
+ if not code:
+ print(out)
##################################
help='cluster FSID')
parser_ceph_volume.add_argument(
'--config-json',
- help='JSON file with config and (client.bootstrap-osd) key')
+ help='JSON file with config and (client.bootrap-osd) key')
parser_ceph_volume.add_argument(
'--config', '-c',
help='ceph conf file')
parser_ceph_volume.add_argument(
'command', nargs=argparse.REMAINDER,
help='command')
- parser_ceph_volume.add_argument(
- '--shared_ceph_folder',
- metavar='CEPH_SOURCE_FOLDER',
- help='Development mode. Several folders in containers are volumes mapped to different sub-folders in the ceph source folder')
parser_zap_osds = subparsers.add_parser(
'zap-osds', help='zap all OSDs associated with a particular fsid')
help='daemon name (type.id)')
parser_deploy.add_argument(
'--fsid',
+ required=True,
help='cluster FSID')
parser_deploy.add_argument(
'--config', '-c',