]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm/box: cleanup
authorPere Diaz Bou <pdiazbou@redhat.com>
Mon, 29 Nov 2021 11:50:35 +0000 (12:50 +0100)
committerPere Diaz Bou <pdiazbou@redhat.com>
Wed, 15 Dec 2021 14:38:08 +0000 (15:38 +0100)
Signed-off-by: Pere Diaz Bou <pdiazbou@redhat.com>
src/cephadm/box/Dockerfile
src/cephadm/box/__init__.py [new file with mode: 0644]
src/cephadm/box/bootstrap.sh [deleted file]
src/cephadm/box/box.py
src/cephadm/box/daemon.json [new file with mode: 0644]
src/cephadm/box/docker-compose.yml
src/cephadm/box/get_ceph_image.sh [deleted file]
src/cephadm/box/osd.py
src/cephadm/box/setup_loop.sh [deleted file]
src/cephadm/box/util.py
src/cephadm/cephadm

index 3b685a22e9cae4d6466ce802aac174364f4afe8a..7b1afd8952f9d9ac01c984a6ac8960cf57325c9e 100644 (file)
@@ -2,14 +2,6 @@
 FROM centos:8 as centos-systemd
 ENV container docker
 ENV CEPHADM_PATH=/usr/local/sbin/cephadm
-#RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
-#rm -f /lib/systemd/system/multi-user.target.wants/*;\
-#rm -f /etc/systemd/system/*.wants/*;\
-#rm -f /lib/systemd/system/local-fs.target.wants/*; \
-#rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
-#rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
-#rm -f /lib/systemd/system/basic.target.wants/*;\
-#rm -f /lib/systemd/system/anaconda.target.wants/*;
 RUN dnf -y install chrony firewalld lvm2 \
   openssh-server openssh-clients python3 \
   yum-utils sudo which && dnf clean all
@@ -21,7 +13,7 @@ FROM centos-systemd as centos-systemd-docker
 # To cache cephadm images
 RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
 RUN dnf -y install docker-ce && \
-  dnf clean all && systemctl enable docker
+    dnf clean all && systemctl enable docker
 
 # ssh utilities
 RUN dnf install epel-release -y && dnf makecache && dnf install sshpass -y
@@ -31,6 +23,7 @@ EXPOSE 22
 
 FROM centos-systemd-docker
 WORKDIR /root
+# VOLUME /var/run/docker.sock
 COPY start /usr/local/bin
 
 CMD [ "/usr/sbin/init" ]
diff --git a/src/cephadm/box/__init__.py b/src/cephadm/box/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/cephadm/box/bootstrap.sh b/src/cephadm/box/bootstrap.sh
deleted file mode 100755 (executable)
index f2e39ed..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/bin/bash
-set -x
-
-OSDS=1
-HOSTS=0
-SKIP_LOOP=0
-SKIP_BOOTSTRAP=0
-
-function print_usage() {
-       echo "./bootstrap.sh [OPTIONS]"
-       echo "options:"
-       echo "    --hosts n: number of hosts to add"
-       echo "    --osds n: number of osds to add"
-       echo "    --update-ceph-image: create/update ceph image"
-       echo "    --update-box-image: create/update cephadm box image"
-       echo "    --skip-create-loop: skip creating loopback device"
-       echo "    --skip-bootstrap: skip deploying the containers"
-       echo "    -l | --list-hosts: list available cephad-box hosts/seed"
-       echo "    -h | --help: this help :)"
-}
-
-function docker-ips() {
-       docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} %tab% {{.Name}} %tab% {{.Config.Hostname}}' $(docker ps -aq) | sed 's#%tab%#\t#g' | sed 's#/##g' | sort -t . -k 1,1n -k 2,2n -k 3,3n -k 4,4n
-}
-
-while [ $# -ge 1 ]; do
-case $1 in
-    -h | --help)
-       print_usage
-       exit
-       ;;
-    -l | --list-hosts) # TODO remove when ceph-ci updated
-       echo -e "IP\t\tName\t\t Hostname"
-       docker-ips | grep box
-       exit
-        ;;
-    --update-box-image)
-       echo Updating box image
-       docker build -t cephadm-box -f Dockerfile .
-        ;;
-    --update-ceph-image) # TODO remove when ceph-ci updated
-       echo Updating ceph image
-       source ./get_ceph_image.sh
-        ;;
-    --hosts)
-        HOSTS="$2"
-       echo "number of hosts: $HOSTS"
-       shift
-        ;;
-    --osds)
-        OSDS="$2"
-       echo "number of osds: $OSDS"
-       shift
-        ;;
-    --skip-create-loop)
-       echo Skiping loop creation
-        SKIP_LOOP=1
-        ;;
-    --skip-bootstrap)
-       echo Skiping bootstrap of containers
-        SKIP_BOOTSTRAP=1
-        ;;
-esac
-shift
-done
-
-# TODO: remove when ceph-ci image has required deps
-if [[ ! -a docker/ceph/image/quay.ceph.image.tar ]]
-then
-       echo -e "\033[33mWARNING:\033[0m run ./get_ceph_image.sh to get an updated ceph-ci/ceph image with correct deps."
-       exit
-fi
-
-if [[ $OSDS -eq 0 ]]
-then
-       SKIP_LOOP=1
-fi
-
-if [[ $SKIP_LOOP -eq 0 ]]
-then
-       source setup_loop.sh
-       create_loops $OSDS
-fi
-
-
-if [[ $SKIP_BOOTSTRAP -eq 0 ]]
-then
-       # loops should be created before starting docker-compose or else docker could
-       # not find lvs
-       docker-compose down
-       DCFLAGS="-f docker-compose.yml"
-        if [[ ! -f /sys/fs/cgroup/cgroup.controllers ]]; then
-            DCFLAGS+=" -f docker-compose.cgroup1.yml"
-        fi
-
-       docker-compose $DCFLAGS up --scale hosts=$HOSTS -d
-       sleep 3
-
-       IPS=$(docker-ips | grep "box_hosts" | awk '{ print $1 }')
-       echo "IPS: "
-       echo $IPS
-
-       sudo sysctl net.ipv4.conf.all.forwarding=1
-       sudo iptables -P FORWARD ACCEPT
-
-       for ((i=1;i<=$HOSTS;i++))
-       do
-               docker-compose exec --index=$i hosts /cephadm/box/setup_ssh.sh run-sshd
-       done
-
-       docker-compose exec -e NUM_OSDS=${OSDS} seed /cephadm/box/start
-
-       docker-compose exec -e HOST_IPS="${IPS}" seed /cephadm/box/setup_ssh.sh copy-cluster-ssh-key
-fi
index e9d26b4ddf2656493f305449ce8d09a3f6c94787..f05f0e2a688d3976de02d8e666ff4c72a27668a7 100755 (executable)
@@ -15,6 +15,9 @@ from util import Config, run_shell_command, run_cephadm_shell_command, \
 CEPH_IMAGE = 'quay.ceph.io/ceph-ci/ceph:master'
 BOX_IMAGE = 'cephadm-box:latest'
 
+def cleanup_box() -> None:
+    osd.cleanup()
+
 def image_exists(image_name: str):
     # extract_tag
     assert image_name.find(':')
@@ -48,9 +51,11 @@ def get_box_image():
     run_shell_command('docker build -t cephadm-box -f Dockerfile .')
     print('Box image added')
 
+    
+
 class Cluster:
     _help = 'Manage docker cephadm boxes'
-    actions = ['bootstrap', 'start', 'down', 'list', 'sh', 'setup']
+    actions = ['bootstrap', 'start', 'down', 'list', 'sh', 'setup', 'cleanup']
     parser = None
 
     @staticmethod
@@ -63,6 +68,8 @@ class Cluster:
         parser.add_argument('--hosts', type=int, default=1, help='Number of hosts')
         parser.add_argument('--skip_deploy_osds', action='store_true', help='skip deploy osd')
         parser.add_argument('--skip_create_loop', action='store_true', help='skip create loopback device')
+        parser.add_argument('--skip_monitoring_stack', action='store_true', help='skip monitoring stack')
+        parser.add_argument('--skip_dashboard', action='store_true', help='skip dashboard')
 
     def __init__(self, argv):
         self.argv = argv
@@ -72,11 +79,17 @@ class Cluster:
         get_ceph_image()
         get_box_image()
 
+    @ensure_outside_container
+    def cleanup(self):
+        cleanup_box()
+
     @ensure_inside_container
     def bootstrap(self):
         print('Running bootstrap on seed')
         cephadm_path = os.environ.get('CEPHADM_PATH')
         os.symlink('/cephadm/cephadm', cephadm_path)
+        run_shell_command('systemctl restart docker') # restart to ensure docker is using daemon.json
+
         st = os.stat(cephadm_path)
         os.chmod(cephadm_path, st.st_mode | stat.S_IEXEC)
 
@@ -91,17 +104,25 @@ class Cluster:
 
         shared_ceph_folder = os.environ.get('SHARED_CEPH_FOLDER')
         if shared_ceph_folder:
-            extra_args.extend(['--shared_ceph_folder', 'shared_ceph_folder'])
+            extra_args.extend(['--shared_ceph_folder', shared_ceph_folder])
 
-        cephadm_image = os.environ.get('CEPHADM_IMAGE')
-        if shared_ceph_folder:
-            extra_args.append('--skip-pull')
+        extra_args.append('--skip-pull')
 
         # cephadm prints in warning, let's redirect it to the output so shell_command doesn't
         # complain
         extra_args.append('2>&0')
 
-        extra_args = ''.join(extra_args)
+        extra_args = ' '.join(extra_args)
+        skip_monitoring_stack = '--skip_monitoring_stack' if Config.get('skip_monitoring_stack') else ''
+        skip_dashboard = '--skip_dashboard' if Config.get('skip_dashboard') else ''
+
+        fsid = Config.get('fsid')
+        config_folder = Config.get('config_folder')
+        config = Config.get('config')
+        mon_config = Config.get('mon_config')
+        keyring = Config.get('keyring')
+        if not os.path.exists(config_folder):
+            os.mkdir(config_folder)
 
         cephadm_bootstrap_command = (
             '$CEPHADM_PATH --verbose bootstrap '
@@ -110,6 +131,14 @@ class Cluster:
             '--initial-dashboard-password admin '
             '--dashboard-password-noupdate '
             '--shared_ceph_folder /ceph '
+            '--allow-overwrite '
+            f'--output-config {config} '
+            f'--output-keyring {keyring} '
+            f'--output-config {config} '
+            f'--fsid "{fsid}" '
+            '--log-to-file '
+            f'{skip_dashboard} '
+            f'{skip_monitoring_stack} '
             f'{extra_args} '
         )
 
@@ -119,13 +148,20 @@ class Cluster:
 
 
         run_shell_command('sudo vgchange --refresh')
+        run_shell_command('cephadm ls')
+        run_shell_command('ln -s /ceph/src/cephadm/box/box.py /usr/bin/box')
 
+        hostname = run_shell_command('hostname')
+        # NOTE: sometimes cephadm in the box takes a while to update the containers
+        # running in the cluster and it cannot deploy the osds. In this case
+        # run: box -v osd deploy --vg vg1 to deploy osds again.
         if not Config.get('skip_deploy_osds'):
             print('Deploying osds...')
             osds = Config.get('osds')
             for o in range(osds):
-                osd.deploy_osd(f'/dev/vg1/lv{o}')
+                osd.deploy_osd(f'vg1/lv{o}', hostname)
             print('Osds deployed')
+        run_cephadm_shell_command('ceph -s')
         print('Bootstrap completed!')
 
 
@@ -135,6 +171,9 @@ class Cluster:
         osds = Config.get('osds')
         hosts = Config.get('hosts')
 
+        # ensure boxes don't exist
+        run_shell_command('docker-compose down')
+
         print('Checking docker images')
         if not image_exists(CEPH_IMAGE):
             get_ceph_image()
@@ -147,8 +186,6 @@ class Cluster:
             print(f'Added {osds} logical volumes in a loopback device')
 
         print('Starting containers')
-        # ensure boxes don't exist
-        run_shell_command('docker-compose down')
 
         dcflags = '-f docker-compose.yml'
         if not os.path.exists('/sys/fs/cgroup/cgroup.controllers'):
@@ -166,6 +203,16 @@ class Cluster:
 
         verbose = '-v' if Config.get('verbose') else ''
         skip_deploy = '--skip_deploy_osds' if Config.get('skip_deploy_osds') else ''
+        skip_monitoring_stack = '--skip_monitoring_stack' if Config.get('skip_monitoring_stack') else ''
+        skip_dashboard = '--skip_dashboard' if Config.get('skip_dashboard') else ''
+        box_bootstrap_command = (
+            f'/cephadm/box/box.py {verbose} cluster bootstrap '
+            '--osds {osds} '
+            '--hosts {hosts} ' 
+            f'{skip_deploy} '
+            f'{skip_dashboard} '
+            f'{skip_monitoring_stack} '
+        )
         run_dc_shell_command(f'/cephadm/box/box.py {verbose} cluster bootstrap --osds {osds} --hosts {hosts} {skip_deploy}', 1, 'seed')
 
         host._copy_cluster_ssh_key(ips)
@@ -175,6 +222,7 @@ class Cluster:
     @ensure_outside_container
     def down(self):
         run_shell_command('docker-compose down')
+        cleanup_box()
         print('Successfully killed all boxes')
 
     @ensure_outside_container
@@ -185,6 +233,8 @@ class Cluster:
 
     @ensure_outside_container
     def sh(self):
+        # we need verbose to see the prompt after running shell command
+        Config.set('verbose', True)
         print('Seed bash')
         run_shell_command('docker-compose exec seed bash')
 
diff --git a/src/cephadm/box/daemon.json b/src/cephadm/box/daemon.json
new file mode 100644 (file)
index 0000000..5cfcaa8
--- /dev/null
@@ -0,0 +1,3 @@
+{
+    "storage-driver": "fuse-overlayfs"
+}
index 2151811919dc88d464afbdea222853f42c916759..3d9d3ea9ac0f0cc3de9a00228505192ca9f244b5 100644 (file)
@@ -8,20 +8,22 @@ services:
     image: cephadm-box
     # probably not needed with rootless Docker and cgroups v2
     privileged: true
-    cap_add:
-      - SYS_ADMIN
-      - NET_ADMIN
-      - SYS_TIME
-      - MKNOD
+    cap_add:
+      - SYS_ADMIN
+      - NET_ADMIN
+      - SYS_TIME
+      - MKNOD
     stop_signal: RTMIN+3
     volumes:
       - ../../../:/ceph
       - ..:/cephadm
+      - ./daemon.json:/etc/docker/daemon.json
+      # dangerous, maybe just map the loopback
+      # https://stackoverflow.com/questions/36880565/why-dont-my-udev-rules-work-inside-of-a-running-docker-container
+      - /dev:/dev
     networks:
       - public
     mem_limit: "20g"
-    devices:
-      - /dev/loop0:/dev/ttt:rwm
     scale: -1
   seed:
     extends:
diff --git a/src/cephadm/box/get_ceph_image.sh b/src/cephadm/box/get_ceph_image.sh
deleted file mode 100755 (executable)
index 3c431c8..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-IMAGE=quay.ceph.io/ceph-ci/ceph:master
-docker pull $IMAGE
-# update image with deps
-docker build -t $IMAGE docker/ceph
-# store to later load within docker
-mkdir -p docker/ceph/image
-rm -f docker/ceph/image/quay.ceph.image.tar
-docker save quay.ceph.io/ceph-ci/ceph:master -o docker/ceph/image/quay.ceph.image.tar
index 5454e115a46f200eabefe58bdf119445a7d7eff2..74e977d513f9ed884f043d3ccd701d255895f855 100644 (file)
@@ -1,9 +1,16 @@
 from typing import Dict
 import os
 import argparse
+import json
 from util import ensure_inside_container, ensure_outside_container, run_shell_command, \
     run_cephadm_shell_command, Config
 
+
+def remove_loop_img() -> None:
+    loop_image = Config.get('loop_img')
+    if os.path.exists(loop_image):
+        os.remove(loop_image)
+
 @ensure_outside_container
 def create_loopback_devices(osds: int) -> None:
     assert osds
@@ -21,28 +28,30 @@ def create_loopback_devices(osds: int) -> None:
     if os.path.ismount(avail_loop):
         os.umount(avail_loop)
 
-    if run_shell_command(f'losetup -l | grep {avail_loop}', expect_error=True):
-        run_shell_command(f'sudo losetup -d {avail_loop}')
+    loop_devices = json.loads(run_shell_command(f'losetup -l -J', expect_error=True))
+    for dev in loop_devices['loopdevices']:
+        if dev['name'] == avail_loop:
+            run_shell_command(f'sudo losetup -d {avail_loop}')
 
     if not os.path.exists('./loop-images'):
         os.mkdir('loop-images')
 
-    loop_image = 'loop-images/loop.img'
-    if os.path.exists(loop_image):
-        os.remove(loop_image)
+    remove_loop_img()
 
-    run_shell_command(f'sudo dd if=/dev/zero of={loop_image} bs=1G count={size}')
+    loop_image = Config.get('loop_img')
+    run_shell_command(f'sudo dd if=/dev/zero of={loop_image} bs=1 count=0 seek={size}G')
     run_shell_command(f'sudo losetup {avail_loop} {loop_image}')
 
-    vgs = run_shell_command('sudo vgs | grep vg1', expect_error=True)
-    if vgs:
-        run_shell_command('sudo lvm vgremove -f -y vg1')
+    # cleanup last call
+    cleanup()
 
-    run_shell_command(f'sudo pvcreate {avail_loop}')
+    run_shell_command(f'sudo pvcreate {avail_loop} ')
     run_shell_command(f'sudo vgcreate vg1 {avail_loop}')
+
+    p = int(100 / osds)
     for i in range(osds):
         run_shell_command('sudo vgchange --refresh')
-        run_shell_command(f'sudo lvcreate --size 5G --name lv{i} vg1')
+        run_shell_command(f'sudo lvcreate -l {p}%VG --name lv{i} vg1')
 
 def get_lvm_osd_data(data: str) -> Dict[str, str]:
     osd_lvm_info = run_cephadm_shell_command(f'ceph-volume lvm list {data}')
@@ -60,15 +69,24 @@ def get_lvm_osd_data(data: str) -> Dict[str, str]:
     return osd_data
 
 @ensure_inside_container
-def deploy_osd(data: str):
-    assert data
-    out = run_shell_command(f'cephadm ceph-volume lvm zap {data}')
-    out = run_shell_command(f'cephadm ceph-volume --shared_ceph_folder /ceph lvm prepare --data {data} --no-systemd --no-tmpfs')
-
-    osd_data = get_lvm_osd_data(data)
+def deploy_osd(data: str, hostname: str):
+    run_cephadm_shell_command(f'ceph orch daemon add osd "{hostname}:{data}"')
+
+def cleanup() -> None:
+    vg = 'vg1'
+    pvs = json.loads(run_shell_command('sudo pvs --reportformat json'))
+    for pv in pvs['report'][0]['pv']:
+        if pv['vg_name'] == vg:
+            device = pv['pv_name']
+            run_shell_command(f'sudo vgremove -f --yes {vg}')
+            run_shell_command(f'sudo losetup -d {device}')
+            run_shell_command(f'sudo wipefs -af {device}')
+            # FIX: this can fail with excluded filter
+            run_shell_command(f'sudo pvremove -f --yes {device}', expect_error=True)
+            break
+
+    remove_loop_img()
 
-    osd = 'osd.' + osd_data['osd_id']
-    run_shell_command(f'cephadm deploy --name {osd}')
 class Osd:
     _help = '''
     Deploy osds and create needed block devices with loopback devices:
@@ -90,12 +108,26 @@ class Osd:
         parser = Osd.parser
         parser.add_argument('action', choices=Osd.actions)
         parser.add_argument('--data', type=str, help='path to a block device')
+        parser.add_argument('--hostname', type=str, help='host to deploy osd')
         parser.add_argument('--osds', type=int, default=0, help='number of osds')
+        parser.add_argument('--vg', type=str, help='Deploy with all lv from virtual group')
 
     @ensure_inside_container
     def deploy(self):
         data = Config.get('data')
-        deploy_osd(data)
+        hostname = Config.get('hostname')
+        vg = Config.get('vg')
+        if not hostname:
+            # assume this host
+            hostname = run_shell_command('hostname')
+        if vg:
+            # deploy with vg
+            lvs = json.loads(run_shell_command('lvs --reportformat json'))
+            for lv in lvs['report'][0]['lv']:
+                if lv['vg_name'] == vg:
+                    deploy_osd(f'{vg}/{lv["lv_name"]}', hostname)
+        else:
+            deploy_osd(data, hostname)
 
     @ensure_outside_container
     def create_loop(self):
diff --git a/src/cephadm/box/setup_loop.sh b/src/cephadm/box/setup_loop.sh
deleted file mode 100755 (executable)
index 661715e..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/bin/bash
-
-set -e
-
-function create_loops() {
-
-       NUM_OSDS=$1
-       if [[ -z $NUM_OSDS ]]; then
-         echo "Call setup_loop <num_osds> to setup with more osds"
-         echo "Using default number of osds: 1."
-         NUM_OSDS=1
-       fi
-
-       # minimum 5 GB for each osd
-       SIZE=$(expr $NUM_OSDS \* 5)
-       # extra space just in case
-       SIZE=$(expr $SIZE + 2)
-
-       echo "Using ${SIZE} GB of space"
-
-       # look for an available loop device
-       avail_loop=$(sudo losetup -f)
-       loop_name=$(basename -- $avail_loop)
-
-       if [[ ! -e $avail_loop ]]
-       then
-               # in case we have to create the loop, find the minor device number.
-               num_loops=$(lsmod | grep loop | awk '{print $3}')
-               num_loops=$((num_loops + 1))
-               echo creating loop $avail_loop minor: $num_loops
-               mknod $avail_loop b 7 $num_loops
-       fi
-
-       if mountpoint -q $avail_loop
-       then
-               sudo umount $avail_loop
-       fi
-
-       if [[ ! -z $(losetup -l | grep $avail_loop) ]]
-       then
-               sudo losetup -d $avail_loop
-       fi
-
-       if [[ ! -e loop-images ]]
-       then
-               mkdir -p loop-images
-       fi
-       sudo rm -f loop-images/*
-       sudo dd if=/dev/zero of="loop-images/disk${loop_name}.img" bs=1G count=$SIZE
-       sudo losetup $avail_loop "loop-images/disk${loop_name}.img"
-
-       if [[ ! -z $(sudo vgs | grep vg1) ]]
-       then
-               sudo lvm vgremove -f -y vg1
-       fi
-       sudo pvcreate $avail_loop
-       sudo vgcreate vg1 $avail_loop
-
-       for ((i=0;i<$NUM_OSDS;i++)); do
-         sudo vgchange --refresh
-         sudo lvcreate --size 5G --name "lv${i}" "vg1"
-       done;
-}
index e006ddd2cd50a3ce75e443f915626da88abd6158..3fcce5d811375e95b6cc26107fdb4e83a2d67909 100644 (file)
@@ -5,7 +5,16 @@ import os
 import sys
 
 class Config:
-    args = {}
+    args = {
+        'fsid': '00000000-0000-0000-0000-0000deadbeef',
+        'config_folder': '/etc/ceph/',
+        'config': '/etc/ceph/ceph.conf',
+        'keyring': '/etc/ceph/ceph.keyring',
+        'loop_img': 'loop-images/loop.img',
+    }
+    @staticmethod
+    def set(key, value):
+        Config.args[key] = value
 
     @staticmethod
     def get(key):
@@ -64,7 +73,11 @@ def run_shell_command(command: str, expect_error=False) -> str:
 
 @ensure_inside_container
 def run_cephadm_shell_command(command: str, expect_error=False) -> str:
-    out = run_shell_command(f'cephadm shell -- {command}', expect_error)
+    config = Config.get('config')
+    keyring = Config.get('keyring')
+
+    with_cephadm_image = 'CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master'
+    out = run_shell_command(f'{with_cephadm_image} cephadm --verbose shell --config {config} --keyring {keyring} -- {command}', expect_error)
     return out
 
 def run_dc_shell_command(command: str, index: int, box_type: str, expect_error=False) -> str:
index 6ab5e2310388a572bfd12f99d68fa12515487fa6..8aa6836d7441d659f6905ec12339141c485922b9 100755 (executable)
@@ -2825,13 +2825,6 @@ def deploy_daemon_units(
     # cmd
     data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id)
 
-    # if osd then try to read parameters if not provided
-    if daemon_type == 'osd':
-        osd_fsid_path = os.path.join(data_dir, 'fsid')
-        if 'fsid' in os.listdir(data_dir) and not osd_fsid:
-            with open(osd_fsid_path, 'r') as f:
-                osd_fsid = f.read()
-
     with open(data_dir + '/unit.run.new', 'w') as f, \
             open(data_dir + '/unit.meta.new', 'w') as metaf:
         f.write('set -e\n')
@@ -5007,10 +5000,8 @@ def extract_uid_gid_monitoring(ctx, daemon_type):
 
 
 @default_image
-@infer_fsid
 def command_deploy(ctx):
     # type: (CephadmContext) -> None
-    assert ctx.fsid
     daemon_type, daemon_id = ctx.name.split('.', 1)
 
     lock = FileLock(ctx, ctx.fsid)
@@ -5275,66 +5266,6 @@ def command_enter(ctx):
 
 ##################################
 
-
-def configure_osd_data_dir(ctx, fsid, daemon_id, uid, gid):
-    # type: (CephadmContext, str, Union[int, str], int, int) -> None
-    daemon_type = 'osd'
-    data_dir = get_data_dir(fsid, ctx.data_dir, daemon_type, daemon_id)
-    print(data_dir)
-
-    # Ensure user:group is the expected
-    for f in os.listdir(data_dir):
-        os.chown(os.path.join(data_dir, f), uid, gid)
-
-    # Create minimal config
-    touch(os.path.join(data_dir, 'config'), uid, gid)
-    mounts = get_container_mounts(ctx, fsid, daemon_type, daemon_id, no_config=True)
-    mounts[data_dir] = '/var/lib/ceph/osd/ceph-%s' % daemon_id
-    mounts['/etc/ceph/ceph.conf'] = '/etc/ceph/ceph.conf:z'
-    mounts['/etc/ceph/ceph.client.admin.keyring'] = '/etc/ceph/ceph.keyring:z'
-
-    CephContainer(
-        ctx,
-        image=ctx.image,
-        entrypoint='/usr/bin/ceph',
-        args=[
-            'config', 'generate-minimal-conf',
-            '-o', '/var/lib/ceph/osd/ceph-%s/config' % daemon_id
-        ],
-        privileged=True,
-        volume_mounts=mounts
-    ).run()
-
-    # Create keyring and then import
-    key = CephContainer(
-        ctx,
-        image=ctx.image,
-        entrypoint='/usr/bin/ceph-authtool',
-        args=['--gen-print-key'],
-    ).run().strip()
-
-    keyring = ('[%s.%s]\n'
-               '\tkey = %s\n'
-               '\tcaps osd = allow *\n'
-               '\tcaps mon = allow *\n'
-               % (daemon_type, daemon_id, key))
-    with open(os.path.join(data_dir, 'keyring'), 'w+') as f:
-        os.fchmod(f.fileno(), 0o600)
-        os.fchown(f.fileno(), uid, gid)
-        f.write(keyring)
-    CephContainer(
-        ctx,
-        image=ctx.image,
-        entrypoint='/usr/bin/ceph',
-        args=[
-            'auth', 'import',
-            '-i', '/var/lib/ceph/osd/ceph-%s/keyring' % daemon_id
-        ],
-        privileged=True,
-        volume_mounts=mounts
-    ).run()
-
-
 @infer_fsid
 @infer_image
 @validate_fsid
@@ -5353,6 +5284,7 @@ def command_ceph_volume(ctx):
 
     (uid, gid) = (0, 0)  # ceph-volume runs as root
     mounts = get_container_mounts(ctx, ctx.fsid, 'osd', None)
+
     tmp_config = None
     tmp_keyring = None
 
@@ -5362,75 +5294,21 @@ def command_ceph_volume(ctx):
         # tmp config file
         tmp_config = write_tmp(config, uid, gid)
         mounts[tmp_config.name] = '/etc/ceph/ceph.conf:z'
-
-    # Ceph-volume uses the bootstrap-osd key in order to do its operations.
-    # This function retrieves the keyring so it can be provided.
-
-    def get_bootstrap_osd_keyring() -> Optional[str]:
-        if not ctx.keyring and os.path.exists(SHELL_DEFAULT_KEYRING):
-            ctx.keyring = SHELL_DEFAULT_KEYRING
-        (config, keyring) = get_config_and_keyring(ctx)
-
-        mounts = get_container_mounts(ctx, ctx.fsid, 'osd', None,
-                                      no_config=True if ctx.config else False)
-        if ctx.config:
-            mounts[pathify(ctx.config)] = '/etc/ceph/ceph.conf:z'
-        if ctx.keyring:
-            mounts[pathify(ctx.keyring)] = '/etc/ceph/ceph.keyring:z'
-        c = CephContainer(
-            ctx,
-            image=ctx.image,
-            entrypoint='/usr/bin/ceph',
-            args='auth get client.bootstrap-osd'.split(),
-            volume_mounts=mounts,
-        )
-        out, err, code = call_throws(ctx, c.run_cmd())
-        if not code:
-            return out
-        else:
-            return None
-
-    if not keyring:
-        keyring = get_bootstrap_osd_keyring()
-
     if keyring:
         # tmp keyring file
         tmp_keyring = write_tmp(keyring, uid, gid)
         mounts[tmp_keyring.name] = '/var/lib/ceph/bootstrap-osd/ceph.keyring:z'
 
-    # If ceph-volume creates osd data directories which won't be persisted
-    # so we use a tmp dir for that.
-    # FIXME: probably we can use /var/lib/ceph/{fsid}?
-    with tempfile.TemporaryDirectory() as tmp_osd_dir:
-        # match /var/lib/ceph/osd/ dir permissions
-        os.chown(tmp_osd_dir, uid, gid)
-        os.chmod(tmp_osd_dir, 0o755)
-        # store newly created osds here
-        mounts[tmp_osd_dir] = '/var/lib/ceph/osd/:z'
-
-        c = get_ceph_volume_container(
-            ctx,
-            envs=ctx.env,
-            args=ctx.command,
-            volume_mounts=mounts,
-        )
+    c = get_ceph_volume_container(
+        ctx,
+        envs=ctx.env,
+        args=ctx.command,
+        volume_mounts=mounts,
+    )
 
-        out, err, code = call_throws(ctx, c.run_cmd())
-        if not code:
-            print(out)
-        else:
-            print(err)
-        # If osds were created move osd's data directories
-        for osd_folder_name in os.listdir(tmp_osd_dir):
-            if 'ceph-' in osd_folder_name[:5]:
-                osd_id = osd_folder_name[5:]
-                osd_data_dir = os.path.join(tmp_osd_dir, osd_folder_name)
-                copy_tree(ctx, [osd_data_dir],
-                          f'/var/lib/ceph/{ctx.fsid}/osd.{osd_id}',
-                          uid=uid, gid=gid)
-                (uid, gid) = extract_uid_gid(ctx)
-                # add missing data
-                configure_osd_data_dir(ctx, ctx.fsid, osd_id, uid, gid)
+    out, err, code = call_throws(ctx, c.run_cmd())
+    if not code:
+        print(out)
 
 ##################################
 
@@ -7999,10 +7877,6 @@ def _get_parser():
     parser_ceph_volume.add_argument(
         'command', nargs=argparse.REMAINDER,
         help='command')
-    parser_ceph_volume.add_argument(
-        '--shared_ceph_folder',
-        metavar='CEPH_SOURCE_FOLDER',
-        help='Development mode. Several folders in containers are volumes mapped to different sub-folders in the ceph source folder')
 
     parser_zap_osds = subparsers.add_parser(
         'zap-osds', help='zap all OSDs associated with a particular fsid')