BuildArch: noarch
Requires: lvm2
Requires: python%{python3_pkgversion}
+Requires: openssh-server
+Requires: which
%if 0%{?weak_deps}
Recommends: podman >= 2.0.2
%endif
'type': int,
'default': 1,
},
+ '--no-tmpfs': {
+ 'action': 'store_true',
+ 'dest': 'no_tmpfs',
+ 'help': ('Disable tmpfs osd data directory with bluestore.'
+ 'Useful if you want to run lvm preprare from cephadm'),
+ },
}
filestore_args = {
)
-def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid):
+def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid, tmpfs=True):
"""
:param block: The name of the logical volume for the bluestore data
:param wal: a regular/plain disk or logical volume, to be used for block.wal
db = prepare_dmcrypt(key, db, 'db', tags)
# create the directory
- prepare_utils.create_osd_path(osd_id, tmpfs=True)
+ prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs)
# symlink the block
prepare_utils.link_block(block, osd_id)
# get the latest monmap
tags['ceph.type'] = 'block'
block_lv.set_tags(tags)
+ tmpfs = not self.args.no_tmpfs
+
prepare_bluestore(
block_lv.lv_path,
wal_device,
tags,
self.osd_id,
osd_fsid,
+ tmpfs=tmpfs
)
def main(self):
--- /dev/null
+# https://developers.redhat.com/blog/2014/05/05/running-systemd-within-docker-container/
+FROM centos:8 as centos-systemd
+ENV container docker
+ENV CEPHADM_PATH=/usr/local/sbin/cephadm
+#RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
+#rm -f /lib/systemd/system/multi-user.target.wants/*;\
+#rm -f /etc/systemd/system/*.wants/*;\
+#rm -f /lib/systemd/system/local-fs.target.wants/*; \
+#rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
+#rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
+#rm -f /lib/systemd/system/basic.target.wants/*;\
+#rm -f /lib/systemd/system/anaconda.target.wants/*;
+RUN dnf -y install chrony firewalld lvm2 \
+ openssh-server openssh-clients python3 \
+ yum-utils sudo which && dnf clean all
+
+RUN systemctl enable chronyd firewalld sshd
+
+
+FROM centos-systemd as centos-systemd-docker
+# To cache cephadm images
+RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+RUN dnf -y install docker-ce && \
+ dnf clean all && systemctl enable docker
+
+# ssh utilities
+RUN dnf install epel-release -y && dnf makecache && dnf install sshpass -y
+
+EXPOSE 8443
+EXPOSE 22
+
+FROM centos-systemd-docker
+WORKDIR /root
+COPY start /usr/local/bin
+
+CMD [ "/usr/sbin/init" ]
--- /dev/null
+#!/bin/bash
+
+OSDS=1
+HOSTS=0
+SKIP_LOOP=0
+SKIP_BOOTSTRAP=0
+
+function print_usage() {
+ echo "./bootstrap.sh [OPTIONS]"
+ echo "options:"
+ echo " --hosts n: number of hosts to add"
+ echo " --osds n: number of osds to add"
+ echo " --update-ceph-image: create/update ceph image"
+ echo " --update-box-image: create/update cephadm box image"
+ echo " --skip-create-loop: skip creating loopback device"
+ echo " --skip-bootstrap: skip deploying the containers"
+ echo " -l | --list-hosts: list available cephad-box hosts/seed"
+ echo " -h | --help: this help :)"
+}
+
+function docker-ips() {
+ docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} %tab% {{.Name}} %tab% {{.Config.Hostname}}' $(docker ps -aq) | sed 's#%tab%#\t#g' | sed 's#/##g' | sort -t . -k 1,1n -k 2,2n -k 3,3n -k 4,4n
+}
+
+while [ $# -ge 1 ]; do
+case $1 in
+ -h | --help)
+ print_usage
+ exit
+ ;;
+ -l | --list-hosts) # TODO remove when ceph-ci updated
+ echo -e "IP\t\tName\t\t Hostname"
+ docker-ips | grep box
+ exit
+ ;;
+ --update-box-image)
+ echo Updating box image
+ docker build -t cephadm-box -f Dockerfile .
+ ;;
+ --update-ceph-image) # TODO remove when ceph-ci updated
+ echo Updating ceph image
+ source ./get_ceph_image.sh
+ ;;
+ --hosts)
+ HOSTS="$2"
+ echo "number of hosts: $HOSTS"
+ shift
+ ;;
+ --osds)
+ OSDS="$2"
+ echo "number of osds: $OSDS"
+ shift
+ ;;
+ --skip-create-loop)
+ echo Skiping loop creation
+ SKIP_LOOP=1
+ ;;
+ --skip-bootstrap)
+ echo Skiping bootstrap of containers
+ SKIP_BOOTSTRAP=1
+ ;;
+esac
+shift
+done
+
+# TODO: remove when ceph-ci image has required deps
+if [[ ! -a docker/ceph/image/quay.ceph.image.tar ]]
+then
+ echo -e "\033[33mWARNING:\033[0m run ./get_ceph_image.sh to get an updated ceph-ci/ceph image with correct deps."
+ exit
+fi
+
+if [[ $OSDS -eq 0 ]]
+then
+ SKIP_LOOP=1
+fi
+
+if [[ $SKIP_LOOP -eq 0 ]]
+then
+ source setup_loop.sh
+ create_loops $OSDS
+fi
+
+
+if [[ $SKIP_BOOTSTRAP -eq 0 ]]
+then
+ # loops should be created before starting docker-compose or else docker could
+ # not find lvs
+ docker-compose down
+ docker-compose up --scale hosts=$HOSTS -d
+ sleep 3
+
+ IPS=$(docker-ips | grep "box_hosts" | awk '{ print $1 }')
+ echo "IPS: "
+ echo $IPS
+
+ sudo sysctl net.ipv4.conf.all.forwarding=1
+ sudo iptables -P FORWARD ACCEPT
+
+ for ((i=1;i<=$HOSTS;i++))
+ do
+ docker-compose exec --index=$i hosts /cephadm/box/setup_ssh.sh run-sshd
+ done
+
+ docker-compose exec -e NUM_OSDS=${OSDS} seed /cephadm/box/start
+
+ docker-compose exec -e HOST_IPS="${IPS}" seed /cephadm/box/setup_ssh.sh copy-cluster-ssh-key
+fi
--- /dev/null
+version: "2.4"
+services:
+ cephadm-host-base:
+ build:
+ context: .
+ environment:
+ - CEPH_BRANCH=master
+ image: cephadm-box
+ # probably not needed with rootless Docker and cgroups v2
+ privileged: true
+ cap_add:
+ - SYS_ADMIN
+ - NET_ADMIN
+ - SYS_TIME
+ - MKNOD
+ stop_signal: RTMIN+3
+ volumes:
+ - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
+ - ../../../:/ceph
+ - ..:/cephadm
+ networks:
+ - public
+ mem_limit: "20g"
+ devices:
+ - /dev/loop0:/dev/ttt:rwm
+ scale: -1
+ seed:
+ extends:
+ service: cephadm-host-base
+ ports:
+ - "3000:3000"
+ - "8443:8443"
+ - "9095:9095"
+ scale: 1
+ hosts:
+ extends:
+ service: cephadm-host-base
+ scale: 3
+
+
+volumes:
+ var-lib-docker:
+networks:
+ public:
--- /dev/null
+FROM quay.ceph.io/ceph-ci/ceph:master
+EXPOSE 8443
--- /dev/null
+LANG="en_US.UTF-8"
+LC_ALL="en_US.UTF-8"
--- /dev/null
+#!/bin/bash
+
+set -ex
+
+IMAGE=quay.ceph.io/ceph-ci/ceph:master
+docker pull $IMAGE
+# update image with deps
+docker build -t $IMAGE docker/ceph
+# store to later load within docker
+mkdir -p docker/ceph/image
+rm -f docker/ceph/image/quay.ceph.image.tar
+docker save quay.ceph.io/ceph-ci/ceph:master -o docker/ceph/image/quay.ceph.image.tar
--- /dev/null
+#!/bin/bash
+
+set -e
+
+function create_loops() {
+
+ NUM_OSDS=$1
+ if [[ -z $NUM_OSDS ]]; then
+ echo "Call setup_loop <num_osds> to setup with more osds"
+ echo "Using default number of osds: 1."
+ NUM_OSDS=1
+ fi
+
+ # minimum 5 GB for each osd
+ SIZE=$(expr $NUM_OSDS \* 5)
+ # extra space just in case
+ SIZE=$(expr $SIZE + 2)
+
+ echo "Using ${SIZE} GB of space"
+
+ # look for an available loop device
+ avail_loop=$(sudo losetup -f)
+ loop_name=$(basename -- $avail_loop)
+
+ if [[ ! -e $avail_loop ]]
+ then
+ # in case we have to create the loop, find the minor device number.
+ num_loops=$(lsmod | grep loop | awk '{print $3}')
+ num_loops=$((num_loops + 1))
+ echo creating loop $avail_loop minor: $num_loops
+ mknod $avail_loop b 7 $num_loops
+ fi
+
+ if mountpoint -q $avail_loop
+ then
+ sudo umount $avail_loop
+ fi
+
+ if [[ ! -z $(losetup -l | grep $avail_loop) ]]
+ then
+ sudo losetup -d $avail_loop
+ fi
+
+ if [[ ! -e loop-images ]]
+ then
+ mkdir -p loop-images
+ fi
+ sudo rm -f loop-images/*
+ sudo dd if=/dev/zero of="loop-images/disk${loop_name}.img" bs=1G count=$SIZE
+ sudo losetup $avail_loop "loop-images/disk${loop_name}.img"
+
+ if [[ ! -z $(sudo vgs | grep vg1) ]]
+ then
+ sudo lvm vgremove -f -y vg1
+ fi
+ sudo pvcreate $avail_loop
+ sudo vgcreate vg1 $avail_loop
+
+ for ((i=0;i<$NUM_OSDS;i++)); do
+ sudo vgchange --refresh
+ sudo lvcreate --size 5G --name "lv${i}" "vg1"
+ done;
+}
--- /dev/null
+#!/usr/bin/env bash
+
+set -e
+
+function run-sshd() {
+ echo "Creating sshd server on $(hostname):$(hostname -i)"
+ # SSH
+ if [[ ! -f "/root/.ssh/id_rsa" ]]; then
+ mkdir -p ~/.ssh
+ chmod 700 ~/.ssh
+ ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""
+ fi
+
+ cat ~/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
+ if [[ ! -f "/root/.ssh/known_hosts" ]]; then
+ ssh-keygen -A
+ fi
+
+ # change password
+ echo "root:root" | chpasswd
+ echo "PermitRootLogin yes" >> /etc/ssh/sshd_config
+ echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config
+
+ /usr/sbin/sshd
+ echo "sshd finished"
+}
+
+function copy-cluster-ssh-key() {
+ echo "Adding cluster ssh key to all hosts: ${HOST_IPS}"
+ HOST_IPS=$(echo $HOST_IPS)
+ for ip in $(echo $HOST_IPS)
+ do
+ if [[ ! $ip == $(hostname -i) ]]
+ then
+ echo $ip
+ # copy cluster key
+ sshpass -p "root" ssh-copy-id -f -o StrictHostKeyChecking=no -i /etc/ceph/ceph.pub "root@${ip}"
+ fi
+ done
+ echo "Finished adding keys, you can now add existing hosts containers to the cluster!"
+}
+
+case $1 in
+ run-sshd)
+ run-sshd
+ ;;
+ copy-cluster-ssh-key)
+ copy-cluster-ssh-key
+ ;;
+esac
--- /dev/null
+#!/usr/bin/env bash
+set -euxo pipefail
+
+# link so we can debug cephadm
+ln -s -f /cephadm/cephadm $CEPHADM_PATH
+chmod +x $CEPHADM_PATH
+
+tail -f /var/log/ceph/cephadm.log 1>&2 &
+
+EXTRA_ARGS=()
+if [[ -n "${SHARED_CEPH_FOLDER-}" ]]; then
+ EXTRA_ARGS+=(--shared_ceph_folder "$SHARED_CEPH_FOLDER")
+fi
+
+docker load < /cephadm/box/docker/ceph/image/quay.ceph.image.tar
+
+# cephadm guid error because it sometimes tries to use quay.ceph.io/ceph-ci/ceph:<none>
+# instead of master's tag
+export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master
+echo "export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master" >> ~/.bashrc
+
+if [[ -n "$CEPHADM_IMAGE" ]]; then
+ EXTRA_ARGS+=--skip-pull
+fi
+
+export CEPH_SOURCE_FOLDER=/ceph
+$CEPHADM_PATH --verbose bootstrap \
+ --mon-ip "$(hostname -i)" \
+ --allow-fqdn-hostname \
+ --initial-dashboard-password admin \
+ --dashboard-password-noupdate \
+ --shared_ceph_folder /ceph \
+ "${EXTRA_ARGS[@]}"
+
+# make sure vg and lvs are visible
+vgchange --refresh
+for((i=0;i<$NUM_OSDS;i++)); do
+ echo "Creating osd.${i}"
+ # create osd folder
+ $CEPHADM_PATH ceph-volume --shared_ceph_folder /ceph lvm create --bluestore --no-tmpfs --data "/dev/vg1/lv${i}" --no-systemd
+ echo "Deploying osd.${i}..."
+ # deploy osd with osd data folder
+ $CEPHADM_PATH deploy --name "osd.${i}"
+ # FIX: this command should substitute lvm create + deploy but there is a 'type' file not found error in ceph-osd
+ # $CEPHADM_PATH shell -- ceph orch daemon add osd "$(hostname):/dev/vg1/lv${i}"
+ echo "osd.${i} deployed!"
+done;
ctx.image = _get_default_image(ctx)
return func(ctx)
-
return cast(FuncT, _default_image)
raise RuntimeError('uid/gid not found')
+
+
def deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid,
config=None, keyring=None,
osd_fsid=None,
) -> None:
# cmd
data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id)
+
+ # if osd then try to read parameters if not provided
+ if daemon_type == 'osd':
+ osd_fsid_path = os.path.join(data_dir, 'fsid')
+ if 'fsid' in os.listdir(data_dir) and not osd_fsid:
+ with open(osd_fsid_path, 'r') as f:
+ osd_fsid = f.read()
+
with open(data_dir + '/unit.run.new', 'w') as f, \
open(data_dir + '/unit.meta.new', 'w') as metaf:
f.write('set -e\n')
verbosity=CallVerbosity.DEBUG)
if enable:
call_throws(ctx, ['systemctl', 'enable', unit_name])
+
if start:
+
clean_cgroup(ctx, fsid, unit_name)
call_throws(ctx, ['systemctl', 'start', unit_name])
@default_image
+@infer_fsid
def command_deploy(ctx):
# type: (CephadmContext) -> None
+ assert ctx.fsid
daemon_type, daemon_id = ctx.name.split('.', 1)
lock = FileLock(ctx, ctx.fsid)
mounts[pathify(ctx.config)] = '/etc/ceph/ceph.conf:z'
if ctx.keyring:
mounts[pathify(ctx.keyring)] = '/etc/ceph/ceph.keyring:z'
+
if ctx.mount:
for _mount in ctx.mount:
split_src_dst = _mount.split(':')
##################################
+def configure_osd_data_dir(ctx, fsid, daemon_id, uid, gid):
+ # type: (CephadmContext, str, Union[int, str], int, int) -> None
+ daemon_type = 'osd'
+ data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id)
+ print(data_dir)
+
+ # Ensure user:group is the expected
+ for f in os.listdir(data_dir):
+ os.chown(os.path.join(data_dir, f), uid, gid)
+
+ # Create minimal config
+ touch(os.path.join(data_dir, 'config'), uid, gid)
+ mounts = get_container_mounts(ctx, fsid, daemon_type, daemon_id, no_config=True)
+ mounts[data_dir] = '/var/lib/ceph/osd/ceph-%s' % daemon_id
+ mounts['/etc/ceph/ceph.conf'] = '/etc/ceph/ceph.conf:z'
+ mounts['/etc/ceph/ceph.client.admin.keyring'] = '/etc/ceph/ceph.keyring:z'
+
+ CephContainer(
+ ctx,
+ image=ctx.image,
+ entrypoint='/usr/bin/ceph',
+ args=[
+ 'config', 'generate-minimal-conf',
+ '-o', '/var/lib/ceph/osd/ceph-%s/config' % daemon_id
+ ],
+ privileged=True,
+ volume_mounts=mounts
+ ).run()
+
+ # Create keyring and then import
+ key = CephContainer(
+ ctx,
+ image=ctx.image,
+ entrypoint='/usr/bin/ceph-authtool',
+ args=['--gen-print-key'],
+ ).run().strip()
+
+ keyring = ('[%s.%s]\n'
+ '\tkey = %s\n'
+ '\tcaps osd = allow *\n'
+ '\tcaps mon = allow *\n'
+ % (daemon_type, daemon_id, key))
+ with open(os.path.join(data_dir, 'keyring'), 'w+') as f:
+ os.fchmod(f.fileno(), 0o600)
+ os.fchown(f.fileno(), uid, gid)
+ f.write(keyring)
+ CephContainer(
+ ctx,
+ image=ctx.image,
+ entrypoint='/usr/bin/ceph',
+ args=[
+ 'auth', 'import',
+ '-i', '/var/lib/ceph/osd/ceph-%s/keyring' % daemon_id
+ ],
+ privileged=True,
+ volume_mounts=mounts
+ ).run()
+
+
@infer_fsid
@infer_image
@validate_fsid
(uid, gid) = (0, 0) # ceph-volume runs as root
mounts = get_container_mounts(ctx, ctx.fsid, 'osd', None)
-
tmp_config = None
tmp_keyring = None
tmp_config = write_tmp(config, uid, gid)
mounts[tmp_config.name] = '/etc/ceph/ceph.conf:z'
+ # Ceph-volume uses the bootstrap-osd key in order to do its operations.
+ # This function retrieves the keyring so it can be provided.
+
+ def get_bootstrap_osd_keyring() -> Optional[str]:
+ if not ctx.keyring and os.path.exists(SHELL_DEFAULT_KEYRING):
+ ctx.keyring = SHELL_DEFAULT_KEYRING
+ (config, keyring) = get_config_and_keyring(ctx)
+
+ mounts = get_container_mounts(ctx, ctx.fsid, 'osd', None,
+ no_config=True if ctx.config else False)
+ if ctx.config:
+ mounts[pathify(ctx.config)] = '/etc/ceph/ceph.conf:z'
+ if ctx.keyring:
+ mounts[pathify(ctx.keyring)] = '/etc/ceph/ceph.keyring:z'
+ c = CephContainer(
+ ctx,
+ image=ctx.image,
+ entrypoint='/usr/bin/ceph',
+ args='auth get client.bootstrap-osd'.split(),
+ volume_mounts=mounts,
+ )
+ out, err, code = call_throws(ctx, c.run_cmd())
+ if not code:
+ return out
+ else:
+ return None
+
+ if not keyring:
+ keyring = get_bootstrap_osd_keyring()
+
if keyring:
# tmp keyring file
tmp_keyring = write_tmp(keyring, uid, gid)
mounts[tmp_keyring.name] = '/var/lib/ceph/bootstrap-osd/ceph.keyring:z'
- c = get_ceph_volume_container(
- ctx,
- envs=ctx.env,
- args=ctx.command,
- volume_mounts=mounts,
- )
+ # If ceph-volume creates osd data directories which won't be persisted
+ # so we use a tmp dir for that.
+ # FIXME: probably we can use /var/lib/ceph/{fsid}?
+ with tempfile.TemporaryDirectory() as tmp_osd_dir:
+ # match /var/lib/ceph/osd/ dir permissions
+ os.chown(tmp_osd_dir, uid, gid)
+ os.chmod(tmp_osd_dir, 0o755)
+ # store newly created osds here
+ mounts[tmp_osd_dir] = '/var/lib/ceph/osd/:z'
- out, err, code = call_throws(ctx, c.run_cmd())
- if not code:
- print(out)
+ c = get_ceph_volume_container(
+ ctx,
+ envs=ctx.env,
+ args=ctx.command,
+ volume_mounts=mounts,
+ )
+
+ out, err, code = call_throws(ctx, c.run_cmd())
+ if not code:
+ print(out)
+ else:
+ print(err)
+ # If osds were created move osd's data directories
+ for osd_folder_name in os.listdir(tmp_osd_dir):
+ if 'ceph-' in osd_folder_name[:5]:
+ osd_id = osd_folder_name[5:]
+ osd_data_dir = os.path.join(tmp_osd_dir, osd_folder_name)
+ copy_tree(ctx, [osd_data_dir],
+ f'/var/lib/ceph/{ctx.fsid}/osd.{osd_id}',
+ uid=uid, gid=gid)
+ (uid, gid) = extract_uid_gid(ctx)
+ # add missing data
+ configure_osd_data_dir(ctx, ctx.fsid, osd_id, uid, gid)
##################################
help='cluster FSID')
parser_ceph_volume.add_argument(
'--config-json',
- help='JSON file with config and (client.bootrap-osd) key')
+ help='JSON file with config and (client.bootstrap-osd) key')
parser_ceph_volume.add_argument(
'--config', '-c',
help='ceph conf file')
parser_ceph_volume.add_argument(
'command', nargs=argparse.REMAINDER,
help='command')
+ parser_ceph_volume.add_argument(
+ '--shared_ceph_folder',
+ metavar='CEPH_SOURCE_FOLDER',
+ help='Development mode. Several folders in containers are volumes mapped to different sub-folders in the ceph source folder')
parser_zap_osds = subparsers.add_parser(
'zap-osds', help='zap all OSDs associated with a particular fsid')
help='daemon name (type.id)')
parser_deploy.add_argument(
'--fsid',
- required=True,
help='cluster FSID')
parser_deploy.add_argument(
'--config', '-c',