From 35c6363a1eaf5011e26be8561ffcd4e9324d13a2 Mon Sep 17 00:00:00 2001 From: Dan Mick Date: Mon, 13 Jul 2015 20:48:33 -0700 Subject: [PATCH] calamari_setup: handle new structure in 1.3.0 (MON and OSD repos) Also, clean up the command handling; failure will throw an exception, so no need to accumulate the errors in an int Fixes: #12228 Signed-off-by: Dan Mick --- tasks/calamari_setup.py | 60 ++++++++++++++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 10 deletions(-) diff --git a/tasks/calamari_setup.py b/tasks/calamari_setup.py index c39b3fa74c749..431e3699393d3 100644 --- a/tasks/calamari_setup.py +++ b/tasks/calamari_setup.py @@ -295,22 +295,64 @@ def deploy_ceph(ctx, cal_svr): osd_to_name = {} all_machines = set() all_mons = set() + all_osds = set() + + # collect which remotes are osds and which are mons for remote in ctx.cluster.remotes: all_machines.add(remote.shortname) roles = ctx.cluster.remotes[remote] for role in roles: daemon_type, number = role.split('.') if daemon_type == 'osd': + all_osds.add(remote.shortname) osd_to_name[number] = remote.shortname if daemon_type == 'mon': all_mons.add(remote.shortname) - first_cmds = [['new'] + list(all_mons), ['install'] + list(all_machines), - ['mon', 'create-initial']] - ret = True - for entry in first_cmds: - arg_list = ['ceph-deploy'] + entry - log.info('Running: %s' % ' '.join(arg_list)) - ret &= cal_svr.run(args=arg_list).exitstatus + + # figure out whether we're in "1.3+" mode: prior to 1.3, there was + # only one Ceph repo, and it was all installed on every Ceph host. + # with 1.3, we've split that into MON and OSD repos (in order to + # be able to separately track subscriptions per-node). This + # requires new switches to ceph-deploy to select which locally-served + # repo is connected to which cluster host. + # + # (TODO: A further issue is that the installation/setup may not have + # created local repos at all, but that is the subject of a future + # change.) + + r = cal_svr.run(args='/usr/bin/test -d /mnt/MON') + use_install_repo = (r.returncode == 0) + + # pre-1.3: + # ceph-deploy new + # ceph-deploy install + # ceph-deploy mon create-initial + # + # 1.3 and later: + # ceph-deploy new + # ceph-deploy install --repo --release=ceph-mon + # ceph-deploy install --mon + # ceph-deploy install --repo --release=ceph-osd + # ceph-deploy install --osd + # ceph-deploy mon create-initial + + cmds = ['ceph-deploy new ' + ' '.join(all_mons)] + + if use_install_repo: + cmds.append('ceph-deploy install --repo --release=ceph-mon ' + + ' '.join(all_mons)) + cmds.append('ceph-deploy install --mon ' + ' '.join(all_mons)) + cmds.append('ceph-deploy install --repo --release=ceph-osd ' + + ' '.join(all_osds)) + cmds.append('ceph-deploy install --mon ' + ' '.join(all_osds)) + else: + cmds.append('ceph-deploy install ' + ' '.join(all_machines)) + + cmds.append('ceph-deploy mon create-initial') + + for cmd in cmds: + cal_svr.run(args=cmd).exitstatus + disk_labels = '_dcba' # NEEDS WORK assumes disks start with vd (need to check this somewhere) for cmd_pts in [['disk', 'zap'], ['osd', 'prepare'], ['osd', 'activate']]: @@ -325,9 +367,7 @@ def deploy_ceph(ctx, cal_svr): if 'activate' in cmd_pts: disk_id += '1' arg_list.append(disk_id) - log.info('Running: %s' % ' '.join(arg_list)) - ret &= cal_svr.run(args=arg_list).exitstatus - return ret + cal_svr.run(args=arg_list).exitstatus def undeploy_ceph(ctx, cal_svr): -- 2.39.5