]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm/box: Cephadm Docker in Docker dev box
authorPere Diaz Bou <pdiazbou@redhat.com>
Thu, 23 Sep 2021 11:12:09 +0000 (13:12 +0200)
committerPere Diaz Bou <pdiazbou@redhat.com>
Wed, 15 Dec 2021 14:36:33 +0000 (15:36 +0100)
Signed-off-by: Pere Diaz Bou <pdiazbou@redhat.com>
14 files changed:
ceph.spec.in
src/ceph-volume/ceph_volume/devices/lvm/common.py
src/ceph-volume/ceph_volume/devices/lvm/prepare.py
src/cephadm/box/Dockerfile [new file with mode: 0644]
src/cephadm/box/bootstrap.sh [new file with mode: 0755]
src/cephadm/box/docker-compose.yml [new file with mode: 0644]
src/cephadm/box/docker/ceph/.bashrc [new file with mode: 0644]
src/cephadm/box/docker/ceph/Dockerfile [new file with mode: 0644]
src/cephadm/box/docker/ceph/locale.conf [new file with mode: 0644]
src/cephadm/box/get_ceph_image.sh [new file with mode: 0755]
src/cephadm/box/setup_loop.sh [new file with mode: 0755]
src/cephadm/box/setup_ssh.sh [new file with mode: 0755]
src/cephadm/box/start [new file with mode: 0755]
src/cephadm/cephadm

index 092053a155554890eb6e99e51ee42d59985a9b52..66e968bc2ee4038ec795bb750625a1f63f7aa939 100644 (file)
@@ -464,6 +464,8 @@ Summary:        Utility to bootstrap Ceph clusters
 BuildArch:      noarch
 Requires:       lvm2
 Requires:       python%{python3_pkgversion}
+Requires:       openssh-server
+Requires:       which
 %if 0%{?weak_deps}
 Recommends:     podman >= 2.0.2
 %endif
index 05f83383f0ea13300e8a06abc117afa55295f914..fba659b8328b3ce2068955df24a9e1e9db270b95 100644 (file)
@@ -123,6 +123,12 @@ bluestore_args = {
         'type': int,
         'default': 1,
     },
+    '--no-tmpfs': {
+        'action': 'store_true',
+        'dest': 'no_tmpfs',
+        'help': ('Disable tmpfs osd data directory with bluestore.'
+            'Useful if you want to run lvm preprare from cephadm'),
+    },
 }
 
 filestore_args = {
index 2f715fdba122c8a87fc2a097a34df4c66c0766a1..6567c78d9f5ec790cbc449e0f4ebb198f444edea 100644 (file)
@@ -81,7 +81,7 @@ def prepare_filestore(device, journal, secrets, tags, osd_id, fsid):
         )
 
 
-def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid):
+def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid, tmpfs=True):
     """
     :param block: The name of the logical volume for the bluestore data
     :param wal: a regular/plain disk or logical volume, to be used for block.wal
@@ -104,7 +104,7 @@ def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid):
         db = prepare_dmcrypt(key, db, 'db', tags)
 
     # create the directory
-    prepare_utils.create_osd_path(osd_id, tmpfs=True)
+    prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs)
     # symlink the block
     prepare_utils.link_block(block, osd_id)
     # get the latest monmap
@@ -384,6 +384,8 @@ class Prepare(object):
             tags['ceph.type'] = 'block'
             block_lv.set_tags(tags)
 
+            tmpfs = not self.args.no_tmpfs
+
             prepare_bluestore(
                 block_lv.lv_path,
                 wal_device,
@@ -392,6 +394,7 @@ class Prepare(object):
                 tags,
                 self.osd_id,
                 osd_fsid,
+                tmpfs=tmpfs
             )
 
     def main(self):
diff --git a/src/cephadm/box/Dockerfile b/src/cephadm/box/Dockerfile
new file mode 100644 (file)
index 0000000..3b685a2
--- /dev/null
@@ -0,0 +1,36 @@
+# https://developers.redhat.com/blog/2014/05/05/running-systemd-within-docker-container/
+FROM centos:8 as centos-systemd
+ENV container docker
+ENV CEPHADM_PATH=/usr/local/sbin/cephadm
+#RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
+#rm -f /lib/systemd/system/multi-user.target.wants/*;\
+#rm -f /etc/systemd/system/*.wants/*;\
+#rm -f /lib/systemd/system/local-fs.target.wants/*; \
+#rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
+#rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
+#rm -f /lib/systemd/system/basic.target.wants/*;\
+#rm -f /lib/systemd/system/anaconda.target.wants/*;
+RUN dnf -y install chrony firewalld lvm2 \
+  openssh-server openssh-clients python3 \
+  yum-utils sudo which && dnf clean all
+
+RUN systemctl enable chronyd firewalld sshd
+
+
+FROM centos-systemd as centos-systemd-docker
+# To cache cephadm images
+RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+RUN dnf -y install docker-ce && \
+  dnf clean all && systemctl enable docker
+
+# ssh utilities
+RUN dnf install epel-release -y && dnf makecache && dnf install sshpass -y
+
+EXPOSE 8443
+EXPOSE 22
+
+FROM centos-systemd-docker
+WORKDIR /root
+COPY start /usr/local/bin
+
+CMD [ "/usr/sbin/init" ]
diff --git a/src/cephadm/box/bootstrap.sh b/src/cephadm/box/bootstrap.sh
new file mode 100755 (executable)
index 0000000..41ad554
--- /dev/null
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+OSDS=1
+HOSTS=0
+SKIP_LOOP=0
+SKIP_BOOTSTRAP=0
+
+function print_usage() {
+       echo "./bootstrap.sh [OPTIONS]"
+       echo "options:"
+       echo "    --hosts n: number of hosts to add"
+       echo "    --osds n: number of osds to add"
+       echo "    --update-ceph-image: create/update ceph image"
+       echo "    --update-box-image: create/update cephadm box image"
+       echo "    --skip-create-loop: skip creating loopback device"
+       echo "    --skip-bootstrap: skip deploying the containers"
+       echo "    -l | --list-hosts: list available cephad-box hosts/seed"
+       echo "    -h | --help: this help :)"
+}
+
+function docker-ips() {
+       docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} %tab% {{.Name}} %tab% {{.Config.Hostname}}' $(docker ps -aq) | sed 's#%tab%#\t#g' | sed 's#/##g' | sort -t . -k 1,1n -k 2,2n -k 3,3n -k 4,4n
+}
+
+while [ $# -ge 1 ]; do
+case $1 in
+    -h | --help)
+       print_usage
+       exit
+       ;;
+    -l | --list-hosts) # TODO remove when ceph-ci updated
+       echo -e "IP\t\tName\t\t Hostname"
+       docker-ips | grep box
+       exit
+        ;;
+    --update-box-image)
+       echo Updating box image
+       docker build -t cephadm-box -f Dockerfile .
+        ;;
+    --update-ceph-image) # TODO remove when ceph-ci updated
+       echo Updating ceph image
+       source ./get_ceph_image.sh
+        ;;
+    --hosts)
+        HOSTS="$2"
+       echo "number of hosts: $HOSTS"
+       shift
+        ;;
+    --osds)
+        OSDS="$2"
+       echo "number of osds: $OSDS"
+       shift
+        ;;
+    --skip-create-loop)
+       echo Skiping loop creation
+        SKIP_LOOP=1
+        ;;
+    --skip-bootstrap)
+       echo Skiping bootstrap of containers
+        SKIP_BOOTSTRAP=1
+        ;;
+esac
+shift
+done
+
+# TODO: remove when ceph-ci image has required deps
+if [[ ! -a docker/ceph/image/quay.ceph.image.tar ]]
+then
+       echo -e "\033[33mWARNING:\033[0m run ./get_ceph_image.sh to get an updated ceph-ci/ceph image with correct deps."
+       exit
+fi
+
+if [[ $OSDS -eq 0 ]]
+then
+       SKIP_LOOP=1
+fi
+
+if [[ $SKIP_LOOP -eq 0 ]]
+then
+       source setup_loop.sh
+       create_loops $OSDS
+fi
+
+
+if [[ $SKIP_BOOTSTRAP -eq 0 ]]
+then
+       # loops should be created before starting docker-compose or else docker could
+       # not find lvs
+       docker-compose down
+       docker-compose up --scale hosts=$HOSTS -d
+       sleep 3
+
+       IPS=$(docker-ips | grep "box_hosts" | awk '{ print $1 }')
+       echo "IPS: "
+       echo $IPS
+
+       sudo sysctl net.ipv4.conf.all.forwarding=1
+       sudo iptables -P FORWARD ACCEPT
+
+       for ((i=1;i<=$HOSTS;i++))
+       do
+               docker-compose exec --index=$i hosts /cephadm/box/setup_ssh.sh run-sshd
+       done
+
+       docker-compose exec -e NUM_OSDS=${OSDS} seed /cephadm/box/start
+
+       docker-compose exec -e HOST_IPS="${IPS}" seed /cephadm/box/setup_ssh.sh copy-cluster-ssh-key
+fi
diff --git a/src/cephadm/box/docker-compose.yml b/src/cephadm/box/docker-compose.yml
new file mode 100644 (file)
index 0000000..7927176
--- /dev/null
@@ -0,0 +1,44 @@
+version: "2.4"
+services:
+  cephadm-host-base:
+    build:
+      context: .
+    environment:
+      - CEPH_BRANCH=master
+    image: cephadm-box
+    # probably not needed with rootless Docker and cgroups v2
+    privileged: true
+    cap_add:
+      - SYS_ADMIN
+      - NET_ADMIN
+      - SYS_TIME
+      - MKNOD
+    stop_signal: RTMIN+3
+    volumes:
+      - "/sys/fs/cgroup:/sys/fs/cgroup:ro"
+      - ../../../:/ceph
+      - ..:/cephadm
+    networks:
+      - public
+    mem_limit: "20g"
+    devices:
+      - /dev/loop0:/dev/ttt:rwm
+    scale: -1
+  seed:
+    extends:
+      service: cephadm-host-base
+    ports:
+      - "3000:3000"
+      - "8443:8443"
+      - "9095:9095"
+    scale: 1
+  hosts:
+    extends:
+      service: cephadm-host-base
+    scale: 3
+
+
+volumes:
+  var-lib-docker:
+networks:
+  public:
diff --git a/src/cephadm/box/docker/ceph/.bashrc b/src/cephadm/box/docker/ceph/.bashrc
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/cephadm/box/docker/ceph/Dockerfile b/src/cephadm/box/docker/ceph/Dockerfile
new file mode 100644 (file)
index 0000000..b18aee4
--- /dev/null
@@ -0,0 +1,2 @@
+FROM quay.ceph.io/ceph-ci/ceph:master
+EXPOSE 8443
diff --git a/src/cephadm/box/docker/ceph/locale.conf b/src/cephadm/box/docker/ceph/locale.conf
new file mode 100644 (file)
index 0000000..00d76c8
--- /dev/null
@@ -0,0 +1,2 @@
+LANG="en_US.UTF-8"
+LC_ALL="en_US.UTF-8"
diff --git a/src/cephadm/box/get_ceph_image.sh b/src/cephadm/box/get_ceph_image.sh
new file mode 100755 (executable)
index 0000000..3c431c8
--- /dev/null
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+set -ex
+
+IMAGE=quay.ceph.io/ceph-ci/ceph:master
+docker pull $IMAGE
+# update image with deps
+docker build -t $IMAGE docker/ceph
+# store to later load within docker
+mkdir -p docker/ceph/image
+rm -f docker/ceph/image/quay.ceph.image.tar
+docker save quay.ceph.io/ceph-ci/ceph:master -o docker/ceph/image/quay.ceph.image.tar
diff --git a/src/cephadm/box/setup_loop.sh b/src/cephadm/box/setup_loop.sh
new file mode 100755 (executable)
index 0000000..661715e
--- /dev/null
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+set -e
+
+function create_loops() {
+
+       NUM_OSDS=$1
+       if [[ -z $NUM_OSDS ]]; then
+         echo "Call setup_loop <num_osds> to setup with more osds"
+         echo "Using default number of osds: 1."
+         NUM_OSDS=1
+       fi
+
+       # minimum 5 GB for each osd
+       SIZE=$(expr $NUM_OSDS \* 5)
+       # extra space just in case
+       SIZE=$(expr $SIZE + 2)
+
+       echo "Using ${SIZE} GB of space"
+
+       # look for an available loop device
+       avail_loop=$(sudo losetup -f)
+       loop_name=$(basename -- $avail_loop)
+
+       if [[ ! -e $avail_loop ]]
+       then
+               # in case we have to create the loop, find the minor device number.
+               num_loops=$(lsmod | grep loop | awk '{print $3}')
+               num_loops=$((num_loops + 1))
+               echo creating loop $avail_loop minor: $num_loops
+               mknod $avail_loop b 7 $num_loops
+       fi
+
+       if mountpoint -q $avail_loop
+       then
+               sudo umount $avail_loop
+       fi
+
+       if [[ ! -z $(losetup -l | grep $avail_loop) ]]
+       then
+               sudo losetup -d $avail_loop
+       fi
+
+       if [[ ! -e loop-images ]]
+       then
+               mkdir -p loop-images
+       fi
+       sudo rm -f loop-images/*
+       sudo dd if=/dev/zero of="loop-images/disk${loop_name}.img" bs=1G count=$SIZE
+       sudo losetup $avail_loop "loop-images/disk${loop_name}.img"
+
+       if [[ ! -z $(sudo vgs | grep vg1) ]]
+       then
+               sudo lvm vgremove -f -y vg1
+       fi
+       sudo pvcreate $avail_loop
+       sudo vgcreate vg1 $avail_loop
+
+       for ((i=0;i<$NUM_OSDS;i++)); do
+         sudo vgchange --refresh
+         sudo lvcreate --size 5G --name "lv${i}" "vg1"
+       done;
+}
diff --git a/src/cephadm/box/setup_ssh.sh b/src/cephadm/box/setup_ssh.sh
new file mode 100755 (executable)
index 0000000..5b81c2c
--- /dev/null
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+set -e
+
+function run-sshd() {
+       echo "Creating sshd server on $(hostname):$(hostname -i)"
+       # SSH
+       if [[ ! -f "/root/.ssh/id_rsa" ]]; then
+               mkdir -p ~/.ssh
+               chmod 700 ~/.ssh
+               ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N ""
+       fi
+
+       cat ~/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
+       if [[ ! -f "/root/.ssh/known_hosts" ]]; then
+               ssh-keygen -A
+       fi
+
+       # change password
+       echo "root:root" | chpasswd
+       echo "PermitRootLogin yes" >> /etc/ssh/sshd_config
+       echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config
+
+       /usr/sbin/sshd
+       echo "sshd finished"
+}
+
+function copy-cluster-ssh-key() {
+       echo "Adding cluster ssh key to all hosts: ${HOST_IPS}"
+       HOST_IPS=$(echo $HOST_IPS)
+       for ip in $(echo $HOST_IPS)
+       do
+               if [[ ! $ip == $(hostname -i) ]]
+               then
+                       echo $ip
+                       # copy cluster key
+                       sshpass -p "root" ssh-copy-id -f -o StrictHostKeyChecking=no -i /etc/ceph/ceph.pub "root@${ip}"
+               fi
+       done
+       echo "Finished adding keys, you can now add existing hosts containers to the cluster!"
+}
+
+case $1 in
+       run-sshd)
+               run-sshd
+               ;;
+       copy-cluster-ssh-key)
+               copy-cluster-ssh-key
+               ;;
+esac
diff --git a/src/cephadm/box/start b/src/cephadm/box/start
new file mode 100755 (executable)
index 0000000..ba38f3c
--- /dev/null
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+set -euxo pipefail
+
+# link so we can debug cephadm
+ln -s -f /cephadm/cephadm $CEPHADM_PATH
+chmod +x $CEPHADM_PATH
+
+tail -f /var/log/ceph/cephadm.log 1>&2 &
+
+EXTRA_ARGS=()
+if [[ -n "${SHARED_CEPH_FOLDER-}" ]]; then
+    EXTRA_ARGS+=(--shared_ceph_folder "$SHARED_CEPH_FOLDER")
+fi
+
+docker load < /cephadm/box/docker/ceph/image/quay.ceph.image.tar
+
+# cephadm guid error because it sometimes tries to use quay.ceph.io/ceph-ci/ceph:<none>
+# instead of master's tag
+export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master
+echo "export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master" >> ~/.bashrc
+
+if [[ -n "$CEPHADM_IMAGE" ]]; then
+       EXTRA_ARGS+=--skip-pull
+fi
+
+export CEPH_SOURCE_FOLDER=/ceph
+$CEPHADM_PATH --verbose bootstrap \
+  --mon-ip "$(hostname -i)" \
+  --allow-fqdn-hostname \
+  --initial-dashboard-password admin \
+  --dashboard-password-noupdate \
+  --shared_ceph_folder /ceph \
+  "${EXTRA_ARGS[@]}"
+
+# make sure vg and lvs are visible
+vgchange --refresh
+for((i=0;i<$NUM_OSDS;i++)); do
+       echo "Creating osd.${i}"
+       # create osd folder
+       $CEPHADM_PATH ceph-volume --shared_ceph_folder /ceph lvm create --bluestore --no-tmpfs --data "/dev/vg1/lv${i}" --no-systemd
+       echo "Deploying osd.${i}..."
+       # deploy osd with osd data folder
+       $CEPHADM_PATH deploy --name "osd.${i}"
+       # FIX: this command should substitute lvm create + deploy but there is a 'type' file not found error in ceph-osd
+       # $CEPHADM_PATH shell -- ceph orch daemon add osd "$(hostname):/dev/vg1/lv${i}"
+       echo "osd.${i} deployed!"
+done;
index 90232f890a90d18a225005be0d813cec94311034..6ab5e2310388a572bfd12f99d68fa12515487fa6 100755 (executable)
@@ -1783,7 +1783,6 @@ def default_image(func: FuncT) -> FuncT:
                 ctx.image = _get_default_image(ctx)
 
         return func(ctx)
-
     return cast(FuncT, _default_image)
 
 
@@ -2646,6 +2645,8 @@ def extract_uid_gid(ctx, img='', file_path='/var/lib/ceph'):
     raise RuntimeError('uid/gid not found')
 
 
+
+
 def deploy_daemon(ctx, fsid, daemon_type, daemon_id, c, uid, gid,
                   config=None, keyring=None,
                   osd_fsid=None,
@@ -2823,6 +2824,14 @@ def deploy_daemon_units(
 ) -> None:
     # cmd
     data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id)
+
+    # if osd then try to read parameters if not provided
+    if daemon_type == 'osd':
+        osd_fsid_path = os.path.join(data_dir, 'fsid')
+        if 'fsid' in os.listdir(data_dir) and not osd_fsid:
+            with open(osd_fsid_path, 'r') as f:
+                osd_fsid = f.read()
+
     with open(data_dir + '/unit.run.new', 'w') as f, \
             open(data_dir + '/unit.meta.new', 'w') as metaf:
         f.write('set -e\n')
@@ -2968,7 +2977,9 @@ def deploy_daemon_units(
          verbosity=CallVerbosity.DEBUG)
     if enable:
         call_throws(ctx, ['systemctl', 'enable', unit_name])
+
     if start:
+
         clean_cgroup(ctx, fsid, unit_name)
         call_throws(ctx, ['systemctl', 'start', unit_name])
 
@@ -4996,8 +5007,10 @@ def extract_uid_gid_monitoring(ctx, daemon_type):
 
 
 @default_image
+@infer_fsid
 def command_deploy(ctx):
     # type: (CephadmContext) -> None
+    assert ctx.fsid
     daemon_type, daemon_id = ctx.name.split('.', 1)
 
     lock = FileLock(ctx, ctx.fsid)
@@ -5179,6 +5192,7 @@ def command_shell(ctx):
         mounts[pathify(ctx.config)] = '/etc/ceph/ceph.conf:z'
     if ctx.keyring:
         mounts[pathify(ctx.keyring)] = '/etc/ceph/ceph.keyring:z'
+
     if ctx.mount:
         for _mount in ctx.mount:
             split_src_dst = _mount.split(':')
@@ -5262,6 +5276,65 @@ def command_enter(ctx):
 ##################################
 
 
+def configure_osd_data_dir(ctx, fsid, daemon_id, uid, gid):
+    # type: (CephadmContext, str, Union[int, str], int, int) -> None
+    daemon_type = 'osd'
+    data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id)
+    print(data_dir)
+
+    # Ensure user:group is the expected
+    for f in os.listdir(data_dir):
+        os.chown(os.path.join(data_dir, f), uid, gid)
+
+    # Create minimal config
+    touch(os.path.join(data_dir, 'config'), uid, gid)
+    mounts = get_container_mounts(ctx, fsid, daemon_type, daemon_id, no_config=True)
+    mounts[data_dir] = '/var/lib/ceph/osd/ceph-%s' % daemon_id
+    mounts['/etc/ceph/ceph.conf'] = '/etc/ceph/ceph.conf:z'
+    mounts['/etc/ceph/ceph.client.admin.keyring'] = '/etc/ceph/ceph.keyring:z'
+
+    CephContainer(
+        ctx,
+        image=ctx.image,
+        entrypoint='/usr/bin/ceph',
+        args=[
+            'config', 'generate-minimal-conf',
+            '-o', '/var/lib/ceph/osd/ceph-%s/config' % daemon_id
+        ],
+        privileged=True,
+        volume_mounts=mounts
+    ).run()
+
+    # Create keyring and then import
+    key = CephContainer(
+        ctx,
+        image=ctx.image,
+        entrypoint='/usr/bin/ceph-authtool',
+        args=['--gen-print-key'],
+    ).run().strip()
+
+    keyring = ('[%s.%s]\n'
+               '\tkey = %s\n'
+               '\tcaps osd = allow *\n'
+               '\tcaps mon = allow *\n'
+               % (daemon_type, daemon_id, key))
+    with open(os.path.join(data_dir, 'keyring'), 'w+') as f:
+        os.fchmod(f.fileno(), 0o600)
+        os.fchown(f.fileno(), uid, gid)
+        f.write(keyring)
+    CephContainer(
+        ctx,
+        image=ctx.image,
+        entrypoint='/usr/bin/ceph',
+        args=[
+            'auth', 'import',
+            '-i', '/var/lib/ceph/osd/ceph-%s/keyring' % daemon_id
+        ],
+        privileged=True,
+        volume_mounts=mounts
+    ).run()
+
+
 @infer_fsid
 @infer_image
 @validate_fsid
@@ -5280,7 +5353,6 @@ def command_ceph_volume(ctx):
 
     (uid, gid) = (0, 0)  # ceph-volume runs as root
     mounts = get_container_mounts(ctx, ctx.fsid, 'osd', None)
-
     tmp_config = None
     tmp_keyring = None
 
@@ -5291,21 +5363,74 @@ def command_ceph_volume(ctx):
         tmp_config = write_tmp(config, uid, gid)
         mounts[tmp_config.name] = '/etc/ceph/ceph.conf:z'
 
+    # Ceph-volume uses the bootstrap-osd key in order to do its operations.
+    # This function retrieves the keyring so it can be provided.
+
+    def get_bootstrap_osd_keyring() -> Optional[str]:
+        if not ctx.keyring and os.path.exists(SHELL_DEFAULT_KEYRING):
+            ctx.keyring = SHELL_DEFAULT_KEYRING
+        (config, keyring) = get_config_and_keyring(ctx)
+
+        mounts = get_container_mounts(ctx, ctx.fsid, 'osd', None,
+                                      no_config=True if ctx.config else False)
+        if ctx.config:
+            mounts[pathify(ctx.config)] = '/etc/ceph/ceph.conf:z'
+        if ctx.keyring:
+            mounts[pathify(ctx.keyring)] = '/etc/ceph/ceph.keyring:z'
+        c = CephContainer(
+            ctx,
+            image=ctx.image,
+            entrypoint='/usr/bin/ceph',
+            args='auth get client.bootstrap-osd'.split(),
+            volume_mounts=mounts,
+        )
+        out, err, code = call_throws(ctx, c.run_cmd())
+        if not code:
+            return out
+        else:
+            return None
+
+    if not keyring:
+        keyring = get_bootstrap_osd_keyring()
+
     if keyring:
         # tmp keyring file
         tmp_keyring = write_tmp(keyring, uid, gid)
         mounts[tmp_keyring.name] = '/var/lib/ceph/bootstrap-osd/ceph.keyring:z'
 
-    c = get_ceph_volume_container(
-        ctx,
-        envs=ctx.env,
-        args=ctx.command,
-        volume_mounts=mounts,
-    )
+    # If ceph-volume creates osd data directories which won't be persisted
+    # so we use a tmp dir for that.
+    # FIXME: probably we can use /var/lib/ceph/{fsid}?
+    with tempfile.TemporaryDirectory() as tmp_osd_dir:
+        # match /var/lib/ceph/osd/ dir permissions
+        os.chown(tmp_osd_dir, uid, gid)
+        os.chmod(tmp_osd_dir, 0o755)
+        # store newly created osds here
+        mounts[tmp_osd_dir] = '/var/lib/ceph/osd/:z'
 
-    out, err, code = call_throws(ctx, c.run_cmd())
-    if not code:
-        print(out)
+        c = get_ceph_volume_container(
+            ctx,
+            envs=ctx.env,
+            args=ctx.command,
+            volume_mounts=mounts,
+        )
+
+        out, err, code = call_throws(ctx, c.run_cmd())
+        if not code:
+            print(out)
+        else:
+            print(err)
+        # If osds were created move osd's data directories
+        for osd_folder_name in os.listdir(tmp_osd_dir):
+            if 'ceph-' in osd_folder_name[:5]:
+                osd_id = osd_folder_name[5:]
+                osd_data_dir = os.path.join(tmp_osd_dir, osd_folder_name)
+                copy_tree(ctx, [osd_data_dir],
+                          f'/var/lib/ceph/{ctx.fsid}/osd.{osd_id}',
+                          uid=uid, gid=gid)
+                (uid, gid) = extract_uid_gid(ctx)
+                # add missing data
+                configure_osd_data_dir(ctx, ctx.fsid, osd_id, uid, gid)
 
 ##################################
 
@@ -7864,7 +7989,7 @@ def _get_parser():
         help='cluster FSID')
     parser_ceph_volume.add_argument(
         '--config-json',
-        help='JSON file with config and (client.bootrap-osd) key')
+        help='JSON file with config and (client.bootstrap-osd) key')
     parser_ceph_volume.add_argument(
         '--config', '-c',
         help='ceph conf file')
@@ -7874,6 +7999,10 @@ def _get_parser():
     parser_ceph_volume.add_argument(
         'command', nargs=argparse.REMAINDER,
         help='command')
+    parser_ceph_volume.add_argument(
+        '--shared_ceph_folder',
+        metavar='CEPH_SOURCE_FOLDER',
+        help='Development mode. Several folders in containers are volumes mapped to different sub-folders in the ceph source folder')
 
     parser_zap_osds = subparsers.add_parser(
         'zap-osds', help='zap all OSDs associated with a particular fsid')
@@ -8095,7 +8224,6 @@ def _get_parser():
         help='daemon name (type.id)')
     parser_deploy.add_argument(
         '--fsid',
-        required=True,
         help='cluster FSID')
     parser_deploy.add_argument(
         '--config', '-c',