]> git-server-git.apps.pok.os.sepia.ceph.com Git - teuthology.git/commitdiff
ceph-deploy: copy latest from master branch
authorSage Weil <sage@inktank.com>
Sun, 28 Jul 2013 14:43:05 +0000 (07:43 -0700)
committerSage Weil <sage@inktank.com>
Sun, 28 Jul 2013 14:43:05 +0000 (07:43 -0700)
teuthology/task/ceph-deploy.py

index 5af93c632ac1199f5c97a7e30fd64dec77cd1577..d1c51ea60f22e6695e4d6171efe47790bf957bbc 100644 (file)
@@ -60,7 +60,6 @@ def is_healthy(ctx, config):
                 run.Raw('&&'),
                 'sudo', 'ceph',
                 'health',
-                '--concise',
                 ],
             stdout=StringIO(),
             logger=log.getChild('health'),
@@ -145,102 +144,161 @@ def build_ceph_cluster(ctx, config):
     install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes
     purge_nodes = './ceph-deploy purge'+" "+all_nodes
     purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
-    mon_create_nodes = './ceph-deploy mon create'+" "+mon_nodes
     mon_hostname = mon_nodes.split(' ')[0]
     mon_hostname = str(mon_hostname)
     gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname
     deploy_mds = './ceph-deploy mds create'+" "+mds_nodes
+    no_of_osds = 0
 
-    if mon_nodes is not None:
-        estatus_new = execute_ceph_deploy(ctx, config, new_mon)
-        if estatus_new == 0:
-            estatus_install = execute_ceph_deploy(ctx, config, install_nodes)
-            if estatus_install==0:
-                estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
-                if estatus_mon==0:
-                    estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
-                    if estatus_gather != 0:
-                        while (estatus_gather != 0):
-                            execute_ceph_deploy(ctx, config, mon_create_nodes)
-                            estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
-                            if estatus_gather == 0:
-                                break
-                    estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds)
-                    if estatus_mds==0:
-                        node_dev_list = get_dev_for_osd(ctx, config)
-                        for d in node_dev_list:
-                            osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d
-                            estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
-                            if estatus_osd==0:
-                                log.info('success')
-                            else:
-                                zap_disk = './ceph-deploy zapdisk'+" "+d
-                                execute_ceph_deploy(ctx, config, zap_disk)
-                                estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
-                                if estatus_osd==0:
-                                    log.info('successfully created osd')
-                                else:
-                                    log.info('failed to create osd')
-                    else:
-                        log.info('failed to deploy mds')
-                else:
-                    log.info('failed to create monitors')
-            else:
-                  log.info('failed to install ceph')
-        else:
-            log.info('failed to create config file and monitor keyring')
-    else:
-        log.info('no monitor nodes in the config file')
+    if mon_nodes is None:
+        raise Exception("no monitor nodes in the config file")
+
+    estatus_new = execute_ceph_deploy(ctx, config, new_mon)
+    if estatus_new != 0:
+        raise Exception("ceph-deploy: new command failed")
 
-    log.info('Setting up client nodes...')
-    conf_path = '/etc/ceph/ceph.conf'
+    log.info('adding config inputs...')
+    testdir = teuthology.get_testdir(ctx)
+    conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
     first_mon = teuthology.get_first_mon(ctx, config)
-    (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
-    conf_data = teuthology.get_file(
-        remote=mon0_remote,
-        path=conf_path,
-        sudo=True,
-        )
+    (remote,) = ctx.cluster.only(first_mon).remotes.keys()
 
-    clients = ctx.cluster.only(teuthology.is_type('client'))
-    for remot, roles_for_host in clients.remotes.iteritems():
-        for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
-            client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
-            mon0_remote.run(
-                args=[
-                    'cd',
-                    '{tdir}'.format(tdir=testdir),
-                    run.Raw('&&'),
-                    'sudo','bash','-c',
-                    run.Raw('"'),'ceph',
-                    'auth',
-                    'get-or-create',
-                    'client.{id}'.format(id=id_),
-                    'mds', 'allow',
-                    'mon', 'allow *',
-                    'osd', 'allow *',
-                    run.Raw('>'),
-                    client_keyring,
-                    run.Raw('"'),
-                    ],
-                )
-            key_data = teuthology.get_file(
-                remote=mon0_remote,
-                path=client_keyring,
-                sudo=True,
-                )
-            teuthology.sudo_write_file(
-                remote=remot,
-                path=client_keyring,
-                data=key_data,
-                perms='0644'
+    lines = None
+    if config.get('conf') is not None:
+        confp = config.get('conf')
+        for section, keys in confp.iteritems():
+                lines = '[{section}]\n'.format(section=section)
+                teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True)
+                for key, value in keys.iteritems():
+                    log.info("[%s] %s = %s" % (section, key, value))
+                    lines = '{key} = {value}\n'.format(key=key, value=value)
+                    teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True)
+
+    estatus_install = execute_ceph_deploy(ctx, config, install_nodes)
+    if estatus_install != 0:
+        raise Exception("ceph-deploy: Failed to install ceph")
+
+    mon_no = None
+    mon_no = config.get('mon_initial_members')
+    if mon_no is not None:
+        i = 0
+        mon1 = []
+        while(i < mon_no):
+            mon1.append(mon_node[i])
+            i = i + 1
+        initial_mons = " ".join(mon1)
+        for k in range(mon_no, len(mon_node)):
+            mon_create_nodes = './ceph-deploy mon create'+" "+initial_mons+" "+mon_node[k]
+            estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
+            if estatus_mon != 0:
+                raise Exception("ceph-deploy: Failed to create monitor")
+    else:
+        mon_create_nodes = './ceph-deploy mon create'+" "+mon_nodes
+        estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
+        if estatus_mon != 0:
+            raise Exception("ceph-deploy: Failed to create monitors")
+
+    estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
+    while (estatus_gather != 0):
+        #mon_create_nodes = './ceph-deploy mon create'+" "+mon_node[0]
+        #execute_ceph_deploy(ctx, config, mon_create_nodes)
+        estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
+
+    estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds)
+    if estatus_mds != 0:
+        raise Exception("ceph-deploy: Failed to deploy mds")
+
+    if config.get('test_mon_destroy') is not None:
+        for d in range(1, len(mon_node)):
+            mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d]
+            estatus_mon_d = execute_ceph_deploy(ctx, config, mon_destroy_nodes)
+            if estatus_mon_d != 0:
+                raise Exception("ceph-deploy: Failed to delete monitor")
+
+    node_dev_list = get_dev_for_osd(ctx, config)
+    for d in node_dev_list:
+        osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d
+        estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
+        if estatus_osd == 0:
+            log.info('successfully created osd')
+            no_of_osds += 1
+        else:
+            zap_disk = './ceph-deploy disk zap'+" "+d
+            execute_ceph_deploy(ctx, config, zap_disk)
+            estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
+            if estatus_osd == 0:
+                log.info('successfully created osd')
+                no_of_osds += 1
+            else:
+                raise Exception("ceph-deploy: Failed to create osds")
+
+    if config.get('wait-for-healthy', True) and no_of_osds >= 2:
+        is_healthy(ctx=ctx, config=None)
+
+        log.info('Setting up client nodes...')
+        conf_path = '/etc/ceph/ceph.conf'
+        admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
+        first_mon = teuthology.get_first_mon(ctx, config)
+        (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
+        conf_data = teuthology.get_file(
+            remote=mon0_remote,
+            path=conf_path,
+            sudo=True,
             )
-            teuthology.sudo_write_file(
-                remote=remot,
-                path=conf_path,
-                data=conf_data,
-                perms='0644'
+        admin_keyring = teuthology.get_file(
+            remote=mon0_remote,
+            path=admin_keyring_path,
+            sudo=True,
             )
+
+        clients = ctx.cluster.only(teuthology.is_type('client'))
+        for remot, roles_for_host in clients.remotes.iteritems():
+            for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
+                client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+                mon0_remote.run(
+                    args=[
+                        'cd',
+                        '{tdir}'.format(tdir=testdir),
+                        run.Raw('&&'),
+                        'sudo','bash','-c',
+                        run.Raw('"'),'ceph',
+                        'auth',
+                        'get-or-create',
+                        'client.{id}'.format(id=id_),
+                        'mds', 'allow',
+                        'mon', 'allow *',
+                        'osd', 'allow *',
+                        run.Raw('>'),
+                        client_keyring,
+                        run.Raw('"'),
+                        ],
+                    )
+                key_data = teuthology.get_file(
+                    remote=mon0_remote,
+                    path=client_keyring,
+                    sudo=True,
+                    )
+                teuthology.sudo_write_file(
+                    remote=remot,
+                    path=client_keyring,
+                    data=key_data,
+                    perms='0644'
+                )
+                teuthology.sudo_write_file(
+                    remote=remot,
+                    path=admin_keyring_path,
+                    data=admin_keyring,
+                    perms='0644'
+                )
+                teuthology.sudo_write_file(
+                    remote=remot,
+                    path=conf_path,
+                    data=conf_data,
+                    perms='0644'
+                )
+    else:
+        raise Exception("The cluster is NOT operational due to insufficient OSDs")
+
     try:
         yield
 
@@ -317,6 +375,7 @@ def task(ctx, config):
         - ceph-deploy:
              branch:
                 stable: bobtail
+             mon_initial_members: 1
 
         tasks:
         - install:
@@ -325,6 +384,9 @@ def task(ctx, config):
         - ceph-deploy:
              branch:
                 dev: master
+             conf:
+                mon:
+                   debug mon = 20
 
         tasks:
         - install:
@@ -336,9 +398,16 @@ def task(ctx, config):
     """
     if config is None:
         config = {}
+
+    overrides = ctx.config.get('overrides', {})
+    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
+
     assert isinstance(config, dict), \
         "task ceph-deploy only supports a dictionary for configuration"
 
+    overrides = ctx.config.get('overrides', {})
+    teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
+
     if config.get('branch') is not None:
         assert isinstance(config['branch'], dict), 'branch must be a dictionary'
 
@@ -346,9 +415,10 @@ def task(ctx, config):
          lambda: ceph_fn.ship_utilities(ctx=ctx, config=None),
          lambda: download_ceph_deploy(ctx=ctx, config=config),
          lambda: build_ceph_cluster(ctx=ctx, config=dict(
+                 conf=config.get('conf', {}),
                  branch=config.get('branch',{}),
+                 mon_initial_members=config.get('mon_initial_members', None),
+                 test_mon_destroy=config.get('test_mon_destroy', None),
                  )),
         ):
-        if config.get('wait-for-healthy', True):
-          is_healthy(ctx=ctx, config=None)
         yield