From dd1b5eb38ce891c9d0786b48c42152c6cade9b62 Mon Sep 17 00:00:00 2001 From: Pere Diaz Bou Date: Mon, 7 Feb 2022 13:16:08 +0100 Subject: [PATCH] cephadm/box: default add hosts Signed-off-by: Pere Diaz Bou --- doc/dev/cephadm/developing-cephadm.rst | 13 ++- src/cephadm/box/Dockerfile | 2 +- src/cephadm/box/box.py | 114 +++++++++++++++---------- src/cephadm/box/host.py | 86 +++++++++++++++---- src/cephadm/box/osd.py | 74 +++++++++++----- src/cephadm/box/util.py | 96 +++++++++++++-------- 6 files changed, 268 insertions(+), 117 deletions(-) diff --git a/doc/dev/cephadm/developing-cephadm.rst b/doc/dev/cephadm/developing-cephadm.rst index 5b9ab747cf175..64f29c9a1f69c 100644 --- a/doc/dev/cephadm/developing-cephadm.rst +++ b/doc/dev/cephadm/developing-cephadm.rst @@ -264,9 +264,18 @@ In order to setup Cephadm's box run:: .. note:: It is recommended to run box with verbose (-v). -After getting all needed images we can run:: +After getting all needed images we can create a simple cluster without osds and hosts with:: - sudo box -v cluster start --osds 3 --hosts 3 + sudo box -v cluster start + +If you want to deploy the cluster with more osds and hosts:: + # 3 osds and 3 hosts by default + sudo box -v cluster start --extended + # explicitly change number of hosts and osds + sudo box -v cluster start --extended --osds 5 --hosts 5 + +Without the extended option, explicitly adding either more hosts or osds won't change the state +of the cluster. .. note:: Cluster start will try to setup even if cluster setup was not called. .. note:: Osds are created with loopback devices and hence, sudo is needed to diff --git a/src/cephadm/box/Dockerfile b/src/cephadm/box/Dockerfile index d3bd9c28a88e4..e927bcb70793c 100644 --- a/src/cephadm/box/Dockerfile +++ b/src/cephadm/box/Dockerfile @@ -5,7 +5,7 @@ ENV CEPHADM_PATH=/usr/local/sbin/cephadm # Centos met EOL and the content of the CentOS 8 repos has been moved to vault.centos.org RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* -RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* +RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=https://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* RUN dnf -y install chrony firewalld lvm2 \ openssh-server openssh-clients python3 \ diff --git a/src/cephadm/box/box.py b/src/cephadm/box/box.py index da41e2a767518..000e9adb88965 100755 --- a/src/cephadm/box/box.py +++ b/src/cephadm/box/box.py @@ -6,10 +6,16 @@ import sys import host import osd -from util import (Config, Target, ensure_inside_container, - ensure_outside_container, get_boxes_container_info, - get_host_ips, inside_container, run_cephadm_shell_command, - run_dc_shell_command, run_shell_command) +from util import ( + Config, + Target, + ensure_inside_container, + ensure_outside_container, + get_boxes_container_info, + run_cephadm_shell_command, + run_dc_shell_command, + run_shell_command, +) CEPH_IMAGE = 'quay.ceph.io/ceph-ci/ceph:master' BOX_IMAGE = 'cephadm-box:latest' @@ -19,14 +25,17 @@ BOX_IMAGE = 'cephadm-box:latest' # image yourself with `box cluster setup` CEPH_IMAGE_TAR = 'docker/ceph/image/quay.ceph.image.tar' + def remove_ceph_image_tar(): if os.path.exists(CEPH_IMAGE_TAR): os.remove(CEPH_IMAGE_TAR) + def cleanup_box() -> None: osd.cleanup() remove_ceph_image_tar() + def image_exists(image_name: str): # extract_tag assert image_name.find(':') @@ -42,6 +51,7 @@ def image_exists(image_name: str): return True return False + def get_ceph_image(): print('Getting ceph image') run_shell_command(f'docker pull {CEPH_IMAGE}') @@ -55,24 +65,29 @@ def get_ceph_image(): run_shell_command(f'docker save {CEPH_IMAGE} -o {CEPH_IMAGE_TAR}') print('Ceph image added') + def get_box_image(): print('Getting box image') run_shell_command('docker build -t cephadm-box -f Dockerfile .') print('Box image added') - + class Cluster(Target): _help = 'Manage docker cephadm boxes' actions = ['bootstrap', 'start', 'down', 'list', 'sh', 'setup', 'cleanup'] def set_args(self): - self.parser.add_argument('action', choices=Cluster.actions, help='Action to perform on the box') - self.parser.add_argument('--osds', type=int, default=1, help='Number of osds') - self.parser.add_argument('--hosts', type=int, default=1, help='Number of hosts') - self.parser.add_argument('--skip_deploy_osds', action='store_true', help='skip deploy osd') - self.parser.add_argument('--skip_create_loop', action='store_true', help='skip create loopback device' ) - self.parser.add_argument('--skip_monitoring_stack', action='store_true', help='skip monitoring stack') - self.parser.add_argument('--skip_dashboard', action='store_true', help='skip dashboard') + self.parser.add_argument( + 'action', choices=Cluster.actions, help='Action to perform on the box' + ) + self.parser.add_argument('--osds', type=int, default=3, help='Number of osds') + + self.parser.add_argument('--hosts', type=int, default=2, help='Number of hosts') + self.parser.add_argument('--skip-deploy-osds', action='store_true', help='skip deploy osd') + self.parser.add_argument('--skip-create-loop', action='store_true', help='skip create loopback device') + self.parser.add_argument('--skip-monitoring-stack', action='store_true', help='skip monitoring stack') + self.parser.add_argument('--skip-dashboard', action='store_true', help='skip dashboard') + self.parser.add_argument('--expanded', action='store_true', help='deploy 3 hosts and 3 osds') @ensure_outside_container def setup(self): @@ -88,7 +103,9 @@ class Cluster(Target): print('Running bootstrap on seed') cephadm_path = os.environ.get('CEPHADM_PATH') os.symlink('/cephadm/cephadm', cephadm_path) - run_shell_command('systemctl restart docker') # restart to ensure docker is using daemon.json + run_shell_command( + 'systemctl restart docker' + ) # restart to ensure docker is using daemon.json st = os.stat(cephadm_path) os.chmod(cephadm_path, st.st_mode | stat.S_IEXEC) @@ -98,7 +115,9 @@ class Cluster(Target): # instead of master's tag run_shell_command('export CEPH_SOURCE_FOLDER=/ceph') run_shell_command('export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master') - run_shell_command('echo "export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master" >> ~/.bashrc') + run_shell_command( + 'echo "export CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master" >> ~/.bashrc' + ) extra_args = [] @@ -109,13 +128,14 @@ class Cluster(Target): extra_args.append('2>&0') extra_args = ' '.join(extra_args) - skip_monitoring_stack = '--skip_monitoring_stack' if Config.get('skip_monitoring_stack') else '' - skip_dashboard = '--skip_dashboard' if Config.get('skip_dashboard') else '' + skip_monitoring_stack = ( + '--skip-monitoring-stack' if Config.get('skip-monitoring-stack') else '' + ) + skip_dashboard = '--skip-dashboard' if Config.get('skip-dashboard') else '' fsid = Config.get('fsid') config_folder = Config.get('config_folder') config = Config.get('config') - mon_config = Config.get('mon_config') keyring = Config.get('keyring') if not os.path.exists(config_folder): os.mkdir(config_folder) @@ -142,26 +162,16 @@ class Cluster(Target): run_shell_command(cephadm_bootstrap_command) print('Cephadm bootstrap complete') - run_shell_command('sudo vgchange --refresh') run_shell_command('cephadm ls') run_shell_command('ln -s /ceph/src/cephadm/box/box.py /usr/bin/box') - hostname = run_shell_command('hostname') # NOTE: sometimes cephadm in the box takes a while to update the containers # running in the cluster and it cannot deploy the osds. In this case # run: box -v osd deploy --vg vg1 to deploy osds again. - if not Config.get('skip_deploy_osds'): - print('Deploying osds...') - osds = Config.get('osds') - for o in range(osds): - osd.deploy_osd(f'vg1/lv{o}', hostname) - print('Osds deployed') run_cephadm_shell_command('ceph -s') print('Bootstrap completed!') - - @ensure_outside_container def start(self): osds = Config.get('osds') @@ -192,27 +202,40 @@ class Cluster(Target): run_shell_command('sudo iptables -P FORWARD ACCEPT') print('Seting up host ssh servers') - ips = get_host_ips() - print(ips) for h in range(hosts): - host._setup_ssh(h+1) + host._setup_ssh(h + 1) verbose = '-v' if Config.get('verbose') else '' - skip_deploy = '--skip_deploy_osds' if Config.get('skip_deploy_osds') else '' - skip_monitoring_stack = '--skip_monitoring_stack' if Config.get('skip_monitoring_stack') else '' - skip_dashboard = '--skip_dashboard' if Config.get('skip_dashboard') else '' + skip_deploy = '--skip-deploy-osds' if Config.get('skip-deploy-osds') else '' + skip_monitoring_stack = ( + '--skip-monitoring-stack' if Config.get('skip-monitoring-stack') else '' + ) + skip_dashboard = '--skip-dashboard' if Config.get('skip-dashboard') else '' box_bootstrap_command = ( f'/cephadm/box/box.py {verbose} cluster bootstrap ' - '--osds {osds} ' - '--hosts {hosts} ' + f'--osds {osds} ' + f'--hosts {hosts} ' f'{skip_deploy} ' f'{skip_dashboard} ' f'{skip_monitoring_stack} ' ) - run_dc_shell_command(f'/cephadm/box/box.py {verbose} cluster bootstrap --osds {osds} --hosts {hosts} {skip_deploy}', 1, 'seed') + run_dc_shell_command(box_bootstrap_command, 1, 'seed') + info = get_boxes_container_info() + ips = info['ips'] + hostnames = info['hostnames'] + print(ips) host._copy_cluster_ssh_key(ips) + expanded = Config.get('expanded') + if expanded: + host._add_hosts(ips, hostnames) + + if expanded and not Config.get('skip-deploy-osds'): + print('Deploying osds... This could take up to minutes') + osd.deploy_osds_in_vg('vg1') + print('Osds deployed') + print('Bootstrap finished successfully') @ensure_outside_container @@ -223,9 +246,12 @@ class Cluster(Target): @ensure_outside_container def list(self): - info = get_boxes_container_info() - for container in info: - print('\t'.join(container)) + info = get_boxes_container_info(with_seed=True) + for i in range(info['size']): + ip = info['ips'][i] + name = info['container_names'][i] + hostname = info['hostnames'][i] + print(f'{name} \t{ip} \t{hostname}') @ensure_outside_container def sh(self): @@ -235,17 +261,18 @@ class Cluster(Target): run_shell_command('docker-compose exec seed bash') - - targets = { 'cluster': Cluster, 'osd': osd.Osd, 'host': host.Host, } + def main(): parser = argparse.ArgumentParser() - parser.add_argument('-v', action='store_true', dest='verbose', help='be more verbose') + parser.add_argument( + '-v', action='store_true', dest='verbose', help='be more verbose' + ) subparsers = parser.add_subparsers() target_instances = {} @@ -257,7 +284,7 @@ def main(): instance = target_instances[arg] if hasattr(instance, 'main'): instance.argv = sys.argv[count:] - instance.set_args() + instance.set_args() args = parser.parse_args() Config.add_args(vars(args)) instance.main() @@ -265,5 +292,6 @@ def main(): parser.print_help() + if __name__ == '__main__': main() diff --git a/src/cephadm/box/host.py b/src/cephadm/box/host.py index df56fb53d7e45..d7907812d6ec8 100644 --- a/src/cephadm/box/host.py +++ b/src/cephadm/box/host.py @@ -1,9 +1,15 @@ -import argparse import os -from typing import List +from typing import List, Union -from util import (Config, Target, inside_container, run_dc_shell_command, - run_shell_command) +from util import ( + Config, + Target, + get_boxes_container_info, + inside_container, + run_cephadm_shell_command, + run_dc_shell_command, + run_shell_command, +) def _setup_ssh(container_index): @@ -18,43 +24,89 @@ def _setup_ssh(container_index): f.flush() run_shell_command('/usr/sbin/sshd') else: - print('Redirecting to _setup_ssh to container') + print('Redirecting to _setup_ssh to container') verbose = '-v' if Config.get('verbose') else '' - run_dc_shell_command(f'/cephadm/box/box.py {verbose} host setup_ssh {container_index}', container_index, 'hosts') - + run_dc_shell_command( + f'/cephadm/box/box.py {verbose} host setup_ssh {container_index}', + container_index, + 'hosts', + ) -def _copy_cluster_ssh_key(ips: List[str]): + +def _add_hosts(ips: Union[List[str], str], hostnames: Union[List[str], str]): + if inside_container(): + assert len(ips) == len(hostnames) + for i in range(len(ips)): + run_cephadm_shell_command(f'ceph orch host add {hostnames[i]} {ips[i]}') + else: + print('Redirecting to _add_hosts to container') + verbose = '-v' if Config.get('verbose') else '' + print(ips) + ips = ' '.join(ips) + ips = f'{ips}' + hostnames = ' '.join(hostnames) + hostnames = f'{hostnames}' + run_dc_shell_command( + f'/cephadm/box/box.py {verbose} host add_hosts 1 --ips {ips} --hostnames {hostnames}', + 1, + 'seed', + ) + + +def _copy_cluster_ssh_key(ips: Union[List[str], str]): if inside_container(): local_ip = run_shell_command('hostname -i') for ip in ips: if ip != local_ip: - run_shell_command(('sshpass -p "root" ssh-copy-id -f ' - f'-o StrictHostKeyChecking=no -i /etc/ceph/ceph.pub "root@{ip}"')) + run_shell_command( + ( + 'sshpass -p "root" ssh-copy-id -f ' + f'-o StrictHostKeyChecking=no -i /etc/ceph/ceph.pub "root@{ip}"' + ) + ) else: - print('Redirecting to _copy_cluster_ssh to container') + print('Redirecting to _copy_cluster_ssh to container') verbose = '-v' if Config.get('verbose') else '' print(ips) ips = ' '.join(ips) - ips = f"{ips}" + ips = f'{ips}' # assume we only have one seed - run_dc_shell_command(f'/cephadm/box/box.py {verbose} host copy_cluster_ssh_key 1 --ips {ips}', - 1, 'seed') + run_dc_shell_command( + f'/cephadm/box/box.py {verbose} host copy_cluster_ssh_key 1 --ips {ips}', + 1, + 'seed', + ) + + class Host(Target): _help = 'Run seed/host related commands' - actions = ['setup_ssh', 'copy_cluster_ssh_key'] + actions = ['setup_ssh', 'copy_cluster_ssh_key', 'add_hosts'] def set_args(self): self.parser.add_argument('action', choices=Host.actions) - self.parser.add_argument('host_container_index', type=str, help='box_host_{index}') + self.parser.add_argument( + 'host_container_index', type=str, help='box_host_{index}' + ) self.parser.add_argument('--ips', nargs='*', help='List of host ips') + self.parser.add_argument( + '--hostnames', nargs='*', help='List of hostnames ips(relative to ip list)' + ) def setup_ssh(self): _setup_ssh(Config.get('host_container_index')) + def add_hosts(self): + ips = Config.get('ips') + if not ips: + ips = get_boxes_container_info()['ips'] + hostnames = Config.get('hostnames') + if not hostnames: + hostnames = get_boxes_container_info()['hostnames'] + _add_hosts(ips, hostnames) def copy_cluster_ssh_key(self): ips = Config.get('ips') if not ips: - ips = get_host_ips() + ips = get_boxes_container_info()['ips'] _copy_cluster_ssh_key(ips) diff --git a/src/cephadm/box/osd.py b/src/cephadm/box/osd.py index eaf520e77eb04..72693ac85bf58 100644 --- a/src/cephadm/box/osd.py +++ b/src/cephadm/box/osd.py @@ -1,11 +1,18 @@ -import argparse import json import os from typing import Dict -from util import (Config, Target, ensure_inside_container, - ensure_outside_container, run_cephadm_shell_command, - run_shell_command) +from util import ( + Config, + Target, + ensure_inside_container, + ensure_outside_container, + get_orch_hosts, + inside_container, + run_cephadm_shell_command, + run_dc_shell_command, + run_shell_command, +) def remove_loop_img() -> None: @@ -13,24 +20,24 @@ def remove_loop_img() -> None: if os.path.exists(loop_image): os.remove(loop_image) + @ensure_outside_container def create_loopback_devices(osds: int) -> None: assert osds size = (5 * osds) + 1 print(f'Using {size}GB of data to store osds') avail_loop = run_shell_command('sudo losetup -f') - base_name = os.path.basename(avail_loop) # create loop if we cannot find it if not os.path.exists(avail_loop): - num_loops = int(run_shell_command('lsmod | grep loop | awk \'{print $3}\'')) + num_loops = int(run_shell_command("lsmod | grep loop | awk '{print $3}'")) num_loops += 1 run_shell_command(f'mknod {avail_loop} b 7 {num_loops}') if os.path.ismount(avail_loop): os.umount(avail_loop) - loop_devices = json.loads(run_shell_command(f'losetup -l -J', expect_error=True)) + loop_devices = json.loads(run_shell_command('losetup -l -J', expect_error=True)) for dev in loop_devices['loopdevices']: if dev['name'] == avail_loop: run_shell_command(f'sudo losetup -d {avail_loop}') @@ -55,6 +62,7 @@ def create_loopback_devices(osds: int) -> None: run_shell_command('sudo vgchange --refresh') run_shell_command(f'sudo lvcreate -l {p}%VG --name lv{i} vg1') + def get_lvm_osd_data(data: str) -> Dict[str, str]: osd_lvm_info = run_cephadm_shell_command(f'ceph-volume lvm list {data}') osd_data = {} @@ -70,9 +78,12 @@ def get_lvm_osd_data(data: str) -> Dict[str, str]: osd_data[key] = line[-1] return osd_data + @ensure_inside_container -def deploy_osd(data: str, hostname: str): - run_cephadm_shell_command(f'ceph orch daemon add osd "{hostname}:{data}"') +def deploy_osd(data: str, hostname: str) -> bool: + out = run_cephadm_shell_command(f'ceph orch daemon add osd "{hostname}:{data}"') + return 'Created osd(s)' in out + def cleanup() -> None: vg = 'vg1' @@ -89,14 +100,43 @@ def cleanup() -> None: remove_loop_img() + +def deploy_osds_in_vg(vg: str): + """ + rotate host will deploy each osd in a different host + + deploying osds will not succeed with starting services so this + makes another process to run on the background + """ + if inside_container(): + lvs = json.loads(run_shell_command('lvs --reportformat json')) + # distribute osds per host + hosts = get_orch_hosts() + host_index = 0 + for lv in lvs['report'][0]['lv']: + if lv['vg_name'] == vg: + deployed = False + while not deployed: + deployed = deploy_osd( + f'{vg}/{lv["lv_name"]}', hosts[host_index]['hostname'] + ) + host_index = (host_index + 1) % len(hosts) + else: + verbose = '-v' if Config.get('verbose') else '' + print('Redirecting deploy osd in vg to inside container') + run_dc_shell_command( + f'/cephadm/box/box.py {verbose} osd deploy --vg {vg}', 1, 'seed' + ) + + class Osd(Target): - _help = ''' + _help = """ Deploy osds and create needed block devices with loopback devices: Actions: - deploy: Deploy an osd given a block device - create_loop: Create needed loopback devices and block devices in logical volumes for a number of osds. - ''' + """ actions = ['deploy', 'create_loop'] def set_args(self): @@ -104,9 +144,10 @@ class Osd(Target): self.parser.add_argument('--data', type=str, help='path to a block device') self.parser.add_argument('--hostname', type=str, help='host to deploy osd') self.parser.add_argument('--osds', type=int, default=0, help='number of osds') - self.parser.add_argument('--vg', type=str, help='Deploy with all lv from virtual group') + self.parser.add_argument( + '--vg', type=str, help='Deploy with all lv from virtual group' + ) - @ensure_inside_container def deploy(self): data = Config.get('data') hostname = Config.get('hostname') @@ -115,11 +156,7 @@ class Osd(Target): # assume this host hostname = run_shell_command('hostname') if vg: - # deploy with vg - lvs = json.loads(run_shell_command('lvs --reportformat json')) - for lv in lvs['report'][0]['lv']: - if lv['vg_name'] == vg: - deploy_osd(f'{vg}/{lv["lv_name"]}', hostname) + deploy_osds_in_vg(vg) else: deploy_osd(data, hostname) @@ -128,4 +165,3 @@ class Osd(Target): osds = Config.get('osds') create_loopback_devices(osds) print('Successfully added logical volumes in loopback devices') - diff --git a/src/cephadm/box/util.py b/src/cephadm/box/util.py index 01ca3dc702a93..6b939b6befc6e 100644 --- a/src/cephadm/box/util.py +++ b/src/cephadm/box/util.py @@ -1,8 +1,8 @@ -import argparse +import json import os import subprocess import sys -from typing import Dict, List +from typing import Any, Callable, Dict class Config: @@ -13,6 +13,7 @@ class Config: 'keyring': '/etc/ceph/ceph.keyring', 'loop_img': 'loop-images/loop.img', } + @staticmethod def set(key, value): Config.args[key] = value @@ -24,14 +25,16 @@ class Config: return None @staticmethod - def add_args(args: Dict[str, str]) -> argparse.ArgumentParser: + def add_args(args: Dict[str, str]) -> None: Config.args.update(args) + class Target: def __init__(self, argv, subparsers): self.argv = argv - self.parser = subparsers.add_parser(self.__class__.__name__.lower(), - help=self.__class__._help) + self.parser = subparsers.add_parser( + self.__class__.__name__.lower(), help=self.__class__._help + ) def set_args(self): """ @@ -50,33 +53,39 @@ class Target: function = getattr(self, args.action) function() -def ensure_outside_container(func) -> bool: + +def ensure_outside_container(func) -> Callable: def wrapper(*args, **kwargs): if not inside_container(): return func(*args, **kwargs) else: raise RuntimeError('This command should be ran outside a container') + return wrapper - + + def ensure_inside_container(func) -> bool: def wrapper(*args, **kwargs): if inside_container(): return func(*args, **kwargs) else: raise RuntimeError('This command should be ran inside a container') + return wrapper def run_shell_command(command: str, expect_error=False) -> str: if Config.get('verbose'): print(f'Running command: {command}') - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen( + command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) out = '' # let's read when output comes so it is in real time while True: # TODO: improve performance of this part, I think this part is a problem - pout = process.stdout.read(1).decode('latin1') + pout = process.stdout.read(1).decode('latin1') if pout == '' and process.poll() is not None: break if pout: @@ -87,7 +96,9 @@ def run_shell_command(command: str, expect_error=False) -> str: process.wait() # no last break line - err = process.stderr.read().decode().rstrip() # remove trailing whitespaces and new lines + err = ( + process.stderr.read().decode().rstrip() + ) # remove trailing whitespaces and new lines out = out.strip() if process.returncode != 0 and not expect_error: @@ -95,42 +106,57 @@ def run_shell_command(command: str, expect_error=False) -> str: sys.exit(1) return out + @ensure_inside_container def run_cephadm_shell_command(command: str, expect_error=False) -> str: config = Config.get('config') keyring = Config.get('keyring') with_cephadm_image = 'CEPHADM_IMAGE=quay.ceph.io/ceph-ci/ceph:master' - out = run_shell_command(f'{with_cephadm_image} cephadm --verbose shell --config {config} --keyring {keyring} -- {command}', expect_error) + out = run_shell_command( + f'{with_cephadm_image} cephadm --verbose shell --config {config} --keyring {keyring} -- {command}', + expect_error, + ) return out -def run_dc_shell_command(command: str, index: int, box_type: str, expect_error=False) -> str: - out = run_shell_command(f'docker-compose exec --index={index} {box_type} {command}', expect_error) + +def run_dc_shell_command( + command: str, index: int, box_type: str, expect_error=False +) -> str: + out = run_shell_command( + f'docker-compose exec --index={index} {box_type} {command}', expect_error + ) return out + def inside_container() -> bool: return os.path.exists('/.dockerenv') -@ensure_outside_container -def get_host_ips() -> List[List[str]]: - containers_info = get_boxes_container_info() - if Config.get('verbose'): - print(containers_info) - ips = [] - for container in containers_info: - if container[1][:len('box_hosts')] == 'box_hosts': - ips.append(container[0]) - return ips - -@ensure_outside_container -def get_boxes_container_info() -> List[List[str]]: - ips_query = "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} %tab% {{.Name}} %tab% {{.Config.Hostname}}' $(docker ps -aq) | sed 's#%tab%#\t#g' | sed 's#/##g' | sort -t . -k 1,1n -k 2,2n -k 3,3n -k 4,4n" - out = run_shell_command(ips_query) - info = [] - for line in out.split('\n'): - container = line.split() - if container[1].strip()[:4] == 'box_': - info.append(container) - return info - +@ensure_outside_container +def get_boxes_container_info(with_seed: bool = False) -> Dict[str, Any]: + # NOTE: this could be cached + IP = 0 + CONTAINER_NAME = 1 + HOSTNAME = 2 + ips_query = "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}} %tab% {{.Name}} %tab% {{.Config.Hostname}}' $(docker ps -aq) | sed 's#%tab%#\t#g' | sed 's#/##g' | sort -t . -k 1,1n -k 2,2n -k 3,3n -k 4,4n" + out = run_shell_command(ips_query) + # FIXME: if things get more complex a class representing a container info might be useful, + # for now representing data this way is faster. + info = {'size': 0, 'ips': [], 'container_names': [], 'hostnames': []} + for line in out.split('\n'): + container = line.split() + # Most commands use hosts only + name_filter = 'box_' if with_seed else 'box_hosts' + if container[1].strip()[: len(name_filter)] == name_filter: + info['size'] += 1 + info['ips'].append(container[IP]) + info['container_names'].append(container[CONTAINER_NAME]) + info['hostnames'].append(container[HOSTNAME]) + return info + + +def get_orch_hosts(): + orch_host_ls_out = run_cephadm_shell_command('ceph orch host ls --format json') + hosts = json.loads(orch_host_ls_out) + return hosts -- 2.39.5