exitstatus = proc.exitstatus
return exitstatus
+
@contextlib.contextmanager
def build_ceph_cluster(ctx, config):
"""Build a ceph cluster"""
- log.info('Building ceph cluster using ceph-deploy...')
- testdir = teuthology.get_testdir(ctx)
- ceph_branch = None
- if config.get('branch') is not None:
- cbranch = config.get('branch')
- for var, val in cbranch.iteritems():
- if var == 'testing':
- ceph_branch = '--{var}'.format(var=var)
- ceph_branch = '--{var}={val}'.format(var=var, val=val)
- node_dev_list = []
- all_nodes = get_all_nodes(ctx, config)
- mds_nodes = get_nodes_using_roles(ctx, config, 'mds')
- mds_nodes = " ".join(mds_nodes)
- mon_node = get_nodes_using_roles(ctx, config, 'mon')
- mon_nodes = " ".join(mon_node)
- new_mon = './ceph-deploy new'+" "+mon_nodes
- install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes
- purge_nodes = './ceph-deploy purge'+" "+all_nodes
- purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
- mon_hostname = mon_nodes.split(' ')[0]
- mon_hostname = str(mon_hostname)
- gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname
- deploy_mds = './ceph-deploy mds create'+" "+mds_nodes
- no_of_osds = 0
-
- if mon_nodes is None:
- raise RuntimeError("no monitor nodes in the config file")
-
- estatus_new = execute_ceph_deploy(ctx, config, new_mon)
- if estatus_new != 0:
- raise RuntimeError("ceph-deploy: new command failed")
-
- log.info('adding config inputs...')
- testdir = teuthology.get_testdir(ctx)
- conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
- first_mon = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(first_mon).remotes.keys()
-
- lines = None
- if config.get('conf') is not None:
- confp = config.get('conf')
- for section, keys in confp.iteritems():
- lines = '[{section}]\n'.format(section=section)
- teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True)
- for key, value in keys.iteritems():
- log.info("[%s] %s = %s" % (section, key, value))
- lines = '{key} = {value}\n'.format(key=key, value=value)
- teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True)
-
- estatus_install = execute_ceph_deploy(ctx, config, install_nodes)
- if estatus_install != 0:
- raise RuntimeError("ceph-deploy: Failed to install ceph")
-
- mon_no = None
- mon_no = config.get('mon_initial_members')
- if mon_no is not None:
- i = 0
- mon1 = []
- while(i < mon_no):
- mon1.append(mon_node[i])
- i = i + 1
- initial_mons = " ".join(mon1)
- for k in range(mon_no, len(mon_node)):
- mon_create_nodes = './ceph-deploy mon create'+" "+initial_mons+" "+mon_node[k]
+
+ try:
+ log.info('Building ceph cluster using ceph-deploy...')
+ testdir = teuthology.get_testdir(ctx)
+ ceph_branch = None
+ if config.get('branch') is not None:
+ cbranch = config.get('branch')
+ for var, val in cbranch.iteritems():
+ if var == 'testing':
+ ceph_branch = '--{var}'.format(var=var)
+ ceph_branch = '--{var}={val}'.format(var=var, val=val)
+ node_dev_list = []
+ all_nodes = get_all_nodes(ctx, config)
+ mds_nodes = get_nodes_using_roles(ctx, config, 'mds')
+ mds_nodes = " ".join(mds_nodes)
+ mon_node = get_nodes_using_roles(ctx, config, 'mon')
+ mon_nodes = " ".join(mon_node)
+ new_mon = './ceph-deploy new'+" "+mon_nodes
+ install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes
+ purge_nodes = './ceph-deploy purge'+" "+all_nodes
+ purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
+ mon_hostname = mon_nodes.split(' ')[0]
+ mon_hostname = str(mon_hostname)
+ gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname
+ deploy_mds = './ceph-deploy mds create'+" "+mds_nodes
+ no_of_osds = 0
+
+ if mon_nodes is None:
+ raise RuntimeError("no monitor nodes in the config file")
+
+ estatus_new = execute_ceph_deploy(ctx, config, new_mon)
+ if estatus_new != 0:
+ raise RuntimeError("ceph-deploy: new command failed")
+
+ log.info('adding config inputs...')
+ testdir = teuthology.get_testdir(ctx)
+ conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (remote,) = ctx.cluster.only(first_mon).remotes.keys()
+
+ lines = None
+ if config.get('conf') is not None:
+ confp = config.get('conf')
+ for section, keys in confp.iteritems():
+ lines = '[{section}]\n'.format(section=section)
+ teuthology.append_lines_to_file(remote, conf_path, lines,
+ sudo=True)
+ for key, value in keys.iteritems():
+ log.info("[%s] %s = %s" % (section, key, value))
+ lines = '{key} = {value}\n'.format(key=key, value=value)
+ teuthology.append_lines_to_file(remote, conf_path, lines,
+ sudo=True)
+
+ estatus_install = execute_ceph_deploy(ctx, config, install_nodes)
+ if estatus_install != 0:
+ raise RuntimeError("ceph-deploy: Failed to install ceph")
+
+ mon_no = None
+ mon_no = config.get('mon_initial_members')
+ if mon_no is not None:
+ i = 0
+ mon1 = []
+ while(i < mon_no):
+ mon1.append(mon_node[i])
+ i = i + 1
+ initial_mons = " ".join(mon1)
+ for k in range(mon_no, len(mon_node)):
+ mon_create_nodes = './ceph-deploy mon create' + " " + \
+ initial_mons + " " + mon_node[k]
+ estatus_mon = execute_ceph_deploy(ctx, config,
+ mon_create_nodes)
+ if estatus_mon != 0:
+ raise RuntimeError("ceph-deploy: Failed to create monitor")
+ else:
+ mon_create_nodes = './ceph-deploy mon create-initial'
estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
if estatus_mon != 0:
- raise RuntimeError("ceph-deploy: Failed to create monitor")
- else:
- mon_create_nodes = './ceph-deploy mon create-initial'
- estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
- if estatus_mon != 0:
- raise RuntimeError("ceph-deploy: Failed to create monitors")
-
- estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
- max_gather_tries = 90
- gather_tries = 0
- while (estatus_gather != 0):
- gather_tries += 1
- if gather_tries >= max_gather_tries:
- msg = 'ceph-deploy was not able to gatherkeys after 15 minutes'
- raise RuntimeError(msg)
- estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
- time.sleep(10)
+ raise RuntimeError("ceph-deploy: Failed to create monitors")
- if mds_nodes:
- estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds)
- if estatus_mds != 0:
- raise RuntimeError("ceph-deploy: Failed to deploy mds")
-
- if config.get('test_mon_destroy') is not None:
- for d in range(1, len(mon_node)):
- mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d]
- estatus_mon_d = execute_ceph_deploy(ctx, config, mon_destroy_nodes)
- if estatus_mon_d != 0:
- raise RuntimeError("ceph-deploy: Failed to delete monitor")
-
- node_dev_list = get_dev_for_osd(ctx, config)
- for d in node_dev_list:
- osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d
- estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
- if estatus_osd == 0:
- log.info('successfully created osd')
- no_of_osds += 1
- else:
- zap_disk = './ceph-deploy disk zap'+" "+d
- execute_ceph_deploy(ctx, config, zap_disk)
+ estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
+ max_gather_tries = 90
+ gather_tries = 0
+ while (estatus_gather != 0):
+ gather_tries += 1
+ if gather_tries >= max_gather_tries:
+ msg = 'ceph-deploy was not able to gatherkeys after 15 minutes'
+ raise RuntimeError(msg)
+ estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
+ time.sleep(10)
+
+ if mds_nodes:
+ estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds)
+ if estatus_mds != 0:
+ raise RuntimeError("ceph-deploy: Failed to deploy mds")
+
+ if config.get('test_mon_destroy') is not None:
+ for d in range(1, len(mon_node)):
+ mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d]
+ estatus_mon_d = execute_ceph_deploy(ctx, config,
+ mon_destroy_nodes)
+ if estatus_mon_d != 0:
+ raise RuntimeError("ceph-deploy: Failed to delete monitor")
+
+ node_dev_list = get_dev_for_osd(ctx, config)
+ for d in node_dev_list:
+ osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d
estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
if estatus_osd == 0:
log.info('successfully created osd')
no_of_osds += 1
else:
- raise RuntimeError("ceph-deploy: Failed to create osds")
-
- if config.get('wait-for-healthy', True) and no_of_osds >= 2:
- is_healthy(ctx=ctx, config=None)
-
- log.info('Setting up client nodes...')
- conf_path = '/etc/ceph/ceph.conf'
- admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
- conf_data = teuthology.get_file(
- remote=mon0_remote,
- path=conf_path,
- sudo=True,
- )
- admin_keyring = teuthology.get_file(
- remote=mon0_remote,
- path=admin_keyring_path,
- sudo=True,
- )
-
- clients = ctx.cluster.only(teuthology.is_type('client'))
- for remot, roles_for_host in clients.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
- client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
- mon0_remote.run(
- args=[
- 'cd',
- '{tdir}'.format(tdir=testdir),
- run.Raw('&&'),
- 'sudo','bash','-c',
- run.Raw('"'),'ceph',
- 'auth',
- 'get-or-create',
- 'client.{id}'.format(id=id_),
- 'mds', 'allow',
- 'mon', 'allow *',
- 'osd', 'allow *',
- run.Raw('>'),
- client_keyring,
- run.Raw('"'),
- ],
- )
- key_data = teuthology.get_file(
- remote=mon0_remote,
- path=client_keyring,
- sudo=True,
- )
- teuthology.sudo_write_file(
- remote=remot,
- path=client_keyring,
- data=key_data,
- perms='0644'
+ zap_disk = './ceph-deploy disk zap'+" "+d
+ execute_ceph_deploy(ctx, config, zap_disk)
+ estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
+ if estatus_osd == 0:
+ log.info('successfully created osd')
+ no_of_osds += 1
+ else:
+ raise RuntimeError("ceph-deploy: Failed to create osds")
+
+ if config.get('wait-for-healthy', True) and no_of_osds >= 2:
+ is_healthy(ctx=ctx, config=None)
+
+ log.info('Setting up client nodes...')
+ conf_path = '/etc/ceph/ceph.conf'
+ admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
+ conf_data = teuthology.get_file(
+ remote=mon0_remote,
+ path=conf_path,
+ sudo=True,
)
- teuthology.sudo_write_file(
- remote=remot,
- path=admin_keyring_path,
- data=admin_keyring,
- perms='0644'
+ admin_keyring = teuthology.get_file(
+ remote=mon0_remote,
+ path=admin_keyring_path,
+ sudo=True,
)
- teuthology.sudo_write_file(
- remote=remot,
- path=conf_path,
- data=conf_data,
- perms='0644'
- )
- else:
- raise RuntimeError("The cluster is NOT operational due to insufficient OSDs")
- try:
+ clients = ctx.cluster.only(teuthology.is_type('client'))
+ for remot, roles_for_host in clients.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
+ client_keyring = \
+ '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+ mon0_remote.run(
+ args=[
+ 'cd',
+ '{tdir}'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'sudo', 'bash', '-c',
+ run.Raw('"'), 'ceph',
+ 'auth',
+ 'get-or-create',
+ 'client.{id}'.format(id=id_),
+ 'mds', 'allow',
+ 'mon', 'allow *',
+ 'osd', 'allow *',
+ run.Raw('>'),
+ client_keyring,
+ run.Raw('"'),
+ ],
+ )
+ key_data = teuthology.get_file(
+ remote=mon0_remote,
+ path=client_keyring,
+ sudo=True,
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=client_keyring,
+ data=key_data,
+ perms='0644'
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=admin_keyring_path,
+ data=admin_keyring,
+ perms='0644'
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=conf_path,
+ data=conf_data,
+ perms='0644'
+ )
+ else:
+ raise RuntimeError(
+ "The cluster is NOT operational due to insufficient OSDs")
yield
finally:
log.info('Stopping ceph...')
- ctx.cluster.run(args=[
- 'sudo', 'stop', 'ceph-all',
- run.Raw('||'),
- 'sudo', 'service', 'ceph', 'stop'
- ])
+ ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
+ 'sudo', 'service', 'ceph', 'stop' ])
if ctx.archive is not None:
# archive mon data, too
log.info('Purging data...')
execute_ceph_deploy(ctx, config, purgedata_nodes)
+
@contextlib.contextmanager
def task(ctx, config):
"""