]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-deploy.git/commitdiff
osd.py: rename function parameter on functions prepare_disk/prepare
authorDanny Al-Gaaf <danny.al-gaaf@bisect.de>
Thu, 21 Mar 2013 13:56:35 +0000 (14:56 +0100)
committerDanny Al-Gaaf <danny.al-gaaf@bisect.de>
Thu, 21 Mar 2013 13:56:35 +0000 (14:56 +0100)
Used name for function parameter redefining name 'activate' function
from outer scope.

Signed-off-by: Danny Al-Gaaf <danny.al-gaaf@bisect.de>
40 files changed:
.gitignore [new file with mode: 0644]
LICENSE [new file with mode: 0644]
MANIFEST.in [new file with mode: 0644]
README.rst [new file with mode: 0644]
bootstrap [new file with mode: 0755]
ceph_deploy/__init__.py [new file with mode: 0644]
ceph_deploy/admin.py [new file with mode: 0644]
ceph_deploy/cli.py [new file with mode: 0644]
ceph_deploy/cliutil.py [new file with mode: 0644]
ceph_deploy/conf.py [new file with mode: 0644]
ceph_deploy/config.py [new file with mode: 0644]
ceph_deploy/discover.py [new file with mode: 0644]
ceph_deploy/exc.py [new file with mode: 0644]
ceph_deploy/forgetkeys.py [new file with mode: 0644]
ceph_deploy/gatherkeys.py [new file with mode: 0644]
ceph_deploy/install.py [new file with mode: 0644]
ceph_deploy/lsb.py [new file with mode: 0644]
ceph_deploy/mds.py [new file with mode: 0644]
ceph_deploy/memoize.py [new file with mode: 0644]
ceph_deploy/misc.py [new file with mode: 0644]
ceph_deploy/mon.py [new file with mode: 0644]
ceph_deploy/new.py [new file with mode: 0644]
ceph_deploy/osd.py [new file with mode: 0644]
ceph_deploy/sudo_pushy.py [new file with mode: 0644]
ceph_deploy/test/__init__.py [new file with mode: 0644]
ceph_deploy/test/conftest.py [new file with mode: 0644]
ceph_deploy/test/directory.py [new file with mode: 0644]
ceph_deploy/test/test_cli.py [new file with mode: 0644]
ceph_deploy/test/test_cli_install.py [new file with mode: 0644]
ceph_deploy/test/test_cli_mon.py [new file with mode: 0644]
ceph_deploy/test/test_cli_new.py [new file with mode: 0644]
ceph_deploy/test/test_cli_osd.py [new file with mode: 0644]
ceph_deploy/test/test_conf.py [new file with mode: 0644]
ceph_deploy/validate.py [new file with mode: 0644]
ceph_deploy/zapdisk.py [new file with mode: 0644]
requirements-dev.txt [new file with mode: 0644]
requirements.txt [new file with mode: 0644]
setup.cfg [new file with mode: 0644]
setup.py [new file with mode: 0644]
tox.ini [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..9594060
--- /dev/null
@@ -0,0 +1,18 @@
+*~
+.#*
+## the next line needs to start with a backslash to avoid looking like
+## a comment
+\#*#
+.*.swp
+
+*.pyc
+*.pyo
+*.egg-info
+/build
+/dist
+
+/virtualenv
+/.tox
+
+/ceph-deploy
+/*.conf
diff --git a/LICENSE b/LICENSE
new file mode 100644 (file)
index 0000000..26624cf
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2012 Inktank Storage, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644 (file)
index 0000000..bb37a27
--- /dev/null
@@ -0,0 +1 @@
+include *.rst
diff --git a/README.rst b/README.rst
new file mode 100644 (file)
index 0000000..8662a1c
--- /dev/null
@@ -0,0 +1,200 @@
+========================================================
+ ceph-deploy -- Deploy Ceph with minimal infrastructure
+========================================================
+
+``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to
+the servers, ``sudo``, and some Python. It runs fully on your
+workstation, requiring no servers, databases, or anything like that.
+
+If you set up and tear down Ceph clusters a lot, and want minimal
+extra bureaucracy, this is for you.
+
+It is not a generic deployment system, it is only for Ceph, and is designed
+for users who want to quickly get Ceph running with sensible initial settings
+without the overhead of installing Chef, Puppet or Juju.
+
+It does not handle client configuration beyond pushing the Ceph config file
+and users who want fine-control over security settings, partitions or directory 
+locations should use a tool such as Chef or Puppet.
+
+Setup
+=====
+
+To get the source tree ready for use, run this once::
+
+  ./bootstrap
+
+You can symlink the ``ceph-deploy`` script in this somewhere
+convenient (like ``~/bin``), or add the current directory to ``PATH``,
+or just always type the full path to ``ceph-deploy``.
+
+ceph-deploy at a minimum requires that the machine from which the script is
+being run can ssh as root without password into each Ceph node. 
+
+To enable this generate a new ssh keypair for the root user with no passphrase
+and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:
+
+ /root/.ssh/authorized_keys
+
+and ensure that the following lines are in the sshd config:
+
+ PermitRootLogin yes
+ PermitEmptyPasswords yes
+
+The machine running ceph-deploy does not need to have the Ceph packages installed
+unless it needs to admin the cluster directly using the ``ceph`` command line tool.
+Managing an existing cluster
+============================
+
+You can use ceph-deploy to provision nodes for an existing cluster.
+To grab a copy of the cluster configuration file (normally
+``ceph.conf``)::
+
+ ceph-deploy discover HOST
+
+You will usually also want to gather the encryption keys used for that
+cluster:
+
+ ceph-deploy gatherkeys MONHOST
+
+At this point you can skip the steps below that create a new cluster
+(you already have one) and optionally skip instalation and/or monitor
+creation, depending on what you are trying to accomplish.
+
+
+Creating a new cluster
+======================
+
+Creating a new configuration
+----------------------------
+
+To create a new configuration file and secret key, decide what hosts
+will run ``ceph-mon``, and run::
+
+  ceph-deploy new MON [MON..]
+
+listing the hostnames of the monitors.  Each ``MON`` can be
+
+ * a simple hostname.  It must be DNS resolvable without the fully
+   qualified domain name.
+ * a fully qualified domain name.  The hostname is assumed to be the
+   leading component up to the first ``.``.
+ * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified
+   domain name or IP address.  For example, ``foo``,
+   ``foo.example.com``, ``foo:something.example.com``, and
+   ``foo:1.2.3.4`` are all valid.  Note, however, that the hostname
+   should match that configured on the host ``foo``.
+
+The above will create a ``ceph.conf`` and ``ceph.mon.keyring'' in your
+current directory.
+
+
+Edit initial cluster configuration
+----------------------------------
+
+You want to review the generated ``ceph.conf`` file and make sure that
+the ``mon_host`` setting contains the IP addresses you would like the
+monitors to bind to.  These are the IPs that clients will initially
+contact to authenticate to the cluster, and they need to be reachable
+both by external client-facing hosts and internal cluster daemons.
+
+Installing packages
+===================
+
+To install the Ceph software on the servers, run::
+
+  ceph-deploy install HOST [HOST..]
+
+This installs the current default *stable* release. You can choose a
+different release track with command line options, for example to use
+a release candidate::
+
+  ceph-deploy install --testing HOST 
+
+Or to test a development branch::
+
+  ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] 
+
+
+Deploying monitors
+==================
+
+To actually deploy ``ceph-mon`` to the hosts you chose, run::
+
+  ceph-deploy mon create HOST [HOST..]
+
+Without explicit hosts listed, hosts in ``mon_initial_members`` in the
+config file are deployed. That is, the hosts you passed to
+``ceph-deploy new`` are the default value here.
+
+Gather keys
+===========
+
+To gather authenticate keys (for administering the cluster and
+bootstrapping new nodes) to the local directory, run::
+
+  ceph-deploy gatherkeys HOST [HOST...]
+
+where ``HOST'' is one of the monitor hosts.
+
+Once these keys are in the local directory, you can provision new OSDs etc.
+
+
+Deploying OSDs
+==============
+
+To prepare a node for running OSDs, run::
+
+  ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...]
+
+After that, the hosts will be running OSDs for the given data disks.
+If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be
+created and GPT labels will be used to mark and automatically activate
+OSD volumes.  If an existing partition is specified, the partition
+table will not be modified.  If you want to destroy the existing
+partition table on DISK first, you can include the ``--zap-disk``
+option.
+
+If there is already a prepared disk or directory that is ready to become an
+OSD, you can also do:
+
+ ceph-deploy osd activate HOST:DIR[:JOURNAL] [...]
+
+This is useful when you are managing the mounting of volumes yourself.
+
+
+Admin hosts
+===========
+
+To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring``
+keyring so that it can administer the cluster, run::
+
+  ceph-deploy admin HOST [HOST ...]
+
+Forget keys
+===========
+
+The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in
+the local directory.  If you are worried about them being there for security
+reasons, run::
+
+  ceph-deploy forgetkeys
+
+and they will be removed.  If you need them again later to deploy additional
+nodes, simply re-run::
+
+  ceph-deploy gatherkeys HOST [HOST...]
+
+and they will be retrieved from an existing monitor node.
+
+Multiple clusters
+=================
+
+All of the above commands take a ``--cluster=NAME`` option, allowing
+you to manage multiple clusters conveniently from one workstation.
+For example::
+
+  ceph-deploy --cluster=us-west new
+  vi us-west.conf
+  ceph-deploy --cluster=us-west mon
diff --git a/bootstrap b/bootstrap
new file mode 100755 (executable)
index 0000000..7d1a7de
--- /dev/null
+++ b/bootstrap
@@ -0,0 +1,42 @@
+#!/bin/sh
+set -e
+
+if command -v lsb_release >/dev/null 2>&1; then
+    case "$(lsb_release --id --short)" in
+       Ubuntu|Debian)
+           for package in python-virtualenv; do
+               if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then
+                    # add a space after old values
+                   missing="${missing:+$missing }$package"
+               fi
+           done
+           if [ -n "$missing" ]; then
+                       echo "$0: missing required packages, please install them:" 1>&2
+                       echo "  sudo apt-get install $missing"
+                       exit 1
+           fi
+           ;;
+    esac
+else
+       if [ -f /etc/redhat-release ]; then
+               case "$(cat /etc/redhat-release | awk '{print $1}')" in
+                       CentOS)
+                               for package in python-virtualenv; do
+                               if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
+                                       missing="${missing:+$missing }$package"
+                               fi
+                               done
+                               if [ -n "$missing" ]; then
+                                       echo "$0: missing required packages, please install them:" 1>&2
+                                       echo "  sudo yum install $missing"
+                               exit 1
+                               fi
+                               ;;
+               esac
+       fi
+fi
+
+test -d virtualenv || virtualenv virtualenv
+./virtualenv/bin/python setup.py develop
+./virtualenv/bin/pip install -r requirements.txt -r requirements-dev.txt
+test -e ceph-deploy || ln -s virtualenv/bin/ceph-deploy .
diff --git a/ceph_deploy/__init__.py b/ceph_deploy/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/ceph_deploy/admin.py b/ceph_deploy/admin.py
new file mode 100644 (file)
index 0000000..0291747
--- /dev/null
@@ -0,0 +1,78 @@
+import logging
+
+from cStringIO import StringIO
+
+from . import exc
+from . import conf
+from .cliutil import priority
+
+
+LOG = logging.getLogger(__name__)
+
+def write_file(path, content):
+    try:
+        with file(path, 'w') as f:
+            f.write(content)
+    except:
+        pass        
+
+def admin(args):
+    cfg = conf.load(args)
+    conf_data = StringIO()
+    cfg.write(conf_data)
+
+    try:
+        with file('%s.client.admin.keyring' % args.cluster, 'rb') as f:
+            keyring = f.read()
+    except:
+        raise RuntimeError('%s.client.admin.keyring not found' %
+                           args.cluster)
+
+    errors = 0
+    for hostname in args.client:
+        LOG.debug('Pushing admin keys and conf to %s', hostname)
+        try:
+            sudo = args.pushy('ssh+sudo:{hostname}'.format(
+                    hostname=hostname,
+                    ))
+
+            write_conf_r = sudo.compile(conf.write_conf)
+            write_conf_r(
+                cluster=args.cluster,
+                conf=conf_data.getvalue(),
+                overwrite=args.overwrite_conf,
+                )
+
+            sudo = args.pushy('ssh+sudo:{hostname}'.format(
+                    hostname=hostname,
+                    ))
+            write_file_r = sudo.compile(write_file)
+            error = write_file_r(
+                '/etc/ceph/%s.client.admin.keyring' % args.cluster,
+                keyring
+                )
+            if error is not None:
+                raise exc.GenericError(error)
+
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to configure %d admin hosts' % errors)
+
+
+@priority(70)
+def make(parser):
+    """
+    Push configuration and client.admin key to a remote host.
+    """
+    parser.add_argument(
+        'client',
+        metavar='HOST',
+        nargs='*',
+        help='host to configure for ceph administration',
+        )
+    parser.set_defaults(
+        func=admin,
+        )
diff --git a/ceph_deploy/cli.py b/ceph_deploy/cli.py
new file mode 100644 (file)
index 0000000..7ceee8e
--- /dev/null
@@ -0,0 +1,101 @@
+import pkg_resources
+import argparse
+import logging
+import pushy
+import sys
+
+from . import exc
+from . import validate
+from . import sudo_pushy
+
+
+LOG = logging.getLogger(__name__)
+
+
+def parse_args(args=None, namespace=None):
+    parser = argparse.ArgumentParser(
+        description='Deploy Ceph',
+        )
+    parser.add_argument(
+        '-v', '--verbose',
+        action='store_true', dest='verbose', default=True,
+        help='be more verbose',
+        )
+    parser.add_argument(
+        '-q', '--quiet',
+        action='store_false', dest='verbose',
+        help='be less verbose',
+        )
+    parser.add_argument(
+        '-n', '--dry-run',
+        action='store_true', dest='dry_run',
+        help='do not perform any action, but report what would be done',
+        )
+    parser.add_argument(
+        '--overwrite-conf',
+        action='store_true',
+        help='overwrite an existing conf file on remote host (if present)',
+        )
+    parser.add_argument(
+        '--cluster',
+        metavar='NAME',
+        help='name of the cluster',
+        type=validate.alphanumeric,
+        )
+    sub = parser.add_subparsers(
+        title='commands',
+        metavar='COMMAND',
+        help='description',
+        )
+    entry_points = [
+        (ep.name, ep.load())
+        for ep in pkg_resources.iter_entry_points('ceph_deploy.cli')
+        ]
+    entry_points.sort(
+        key=lambda (name, fn): getattr(fn, 'priority', 100),
+        )
+    for (name, fn) in entry_points:
+        p = sub.add_parser(
+            name,
+            description=fn.__doc__,
+            help=fn.__doc__,
+            )
+        # ugly kludge but i really want to have a nice way to access
+        # the program name, with subcommand, later
+        p.set_defaults(prog=p.prog)
+        fn(p)
+    parser.set_defaults(
+        # we want to hold on to this, for later
+        prog=parser.prog,
+
+        # unit tests can override this to mock pushy; no user-visible
+        # option sets this
+        pushy=pushy.connect,
+
+        cluster='ceph',
+        )
+    args = parser.parse_args(args=args, namespace=namespace)
+    return args
+
+
+def main(args=None, namespace=None):
+    args = parse_args(args=args, namespace=namespace)
+
+    loglevel = logging.INFO
+    if args.verbose:
+        loglevel = logging.DEBUG
+
+    logging.basicConfig(
+        level=loglevel,
+        )
+
+    sudo_pushy.patch()
+
+    try:
+        return args.func(args)
+    except exc.DeployError as e:
+        print >> sys.stderr, '{prog}: {msg}'.format(
+            prog=args.prog,
+            msg=e,
+            )
+        sys.exit(1)
diff --git a/ceph_deploy/cliutil.py b/ceph_deploy/cliutil.py
new file mode 100644 (file)
index 0000000..d273f31
--- /dev/null
@@ -0,0 +1,8 @@
+def priority(num):
+    """
+    Decorator to add a `priority` attribute to the function.
+    """
+    def add_priority(fn):
+        fn.priority = num
+        return fn
+    return add_priority
diff --git a/ceph_deploy/conf.py b/ceph_deploy/conf.py
new file mode 100644 (file)
index 0000000..1b67cfe
--- /dev/null
@@ -0,0 +1,56 @@
+import ConfigParser
+import contextlib
+
+from . import exc
+
+
+class _TrimIndentFile(object):
+    def __init__(self, fp):
+        self.fp = fp
+
+    def readline(self):
+        line = self.fp.readline()
+        return line.lstrip(' \t')
+
+
+def _optionxform(s):
+    s = s.replace('_', ' ')
+    s = '_'.join(s.split())
+    return s
+
+
+def parse(fp):
+    cfg = ConfigParser.RawConfigParser()
+    cfg.optionxform = _optionxform
+    ifp = _TrimIndentFile(fp)
+    cfg.readfp(ifp)
+    return cfg
+
+
+def load(args):
+    path = '{cluster}.conf'.format(cluster=args.cluster)
+    try:
+        f = file(path)
+    except IOError as e:
+        raise exc.ConfigError(e)
+    else:
+        with contextlib.closing(f):
+            return parse(f)
+
+
+def write_conf(cluster, conf, overwrite):
+    import os
+
+    path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster)
+    tmp = '{path}.{pid}.tmp'.format(path=path, pid=os.getpid())
+
+    if os.path.exists(path):
+        with file(path, 'rb') as f:
+            old = f.read()
+            if old != conf and not overwrite:
+                raise RuntimeError('config file %s exists with different content; use --overwrite-conf to overwrite' % path)
+    with file(tmp, 'w') as f:
+        f.write(conf)
+        f.flush()
+        os.fsync(f)
+    os.rename(tmp, path)
diff --git a/ceph_deploy/config.py b/ceph_deploy/config.py
new file mode 100644 (file)
index 0000000..749c554
--- /dev/null
@@ -0,0 +1,111 @@
+import logging
+
+from cStringIO import StringIO
+
+from . import exc
+from . import conf
+from .cliutil import priority
+
+LOG = logging.getLogger(__name__)
+
+def config_push(args):
+    cfg = conf.load(args)
+    conf_data = StringIO()
+    cfg.write(conf_data)
+
+    errors = 0
+    for hostname in args.client:
+        LOG.debug('Pushing config to %s', hostname)
+        try:
+            sudo = args.pushy('ssh+sudo:{hostname}'.format(
+                    hostname=hostname,
+                    ))
+
+            write_conf_r = sudo.compile(conf.write_conf)
+            write_conf_r(
+                cluster=args.cluster,
+                conf=conf_data.getvalue(),
+                overwrite=args.overwrite_conf,
+                )
+
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to config %d hosts' % errors)
+
+
+def get_file(path):
+    """
+    Run on mon node, grab a file.
+    """
+    try:
+        with file(path, 'rb') as f:
+            return f.read()
+    except IOError:
+        pass
+
+def config_pull(args):
+    topath = '{cluster}.conf'.format(cluster=args.cluster)
+    frompath = '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster)
+
+    errors = 0
+    for hostname in args.client:
+        try:
+            LOG.debug('Checking %s for %s', hostname, frompath)
+            sudo = args.pushy('ssh+sudo:{hostname}'.format(hostname=hostname))
+            get_file_r = sudo.compile(get_file)
+            conf = get_file_r(path=frompath)
+            if conf is not None:
+                LOG.debug('Got %s from %s', frompath, hostname)
+                if os.path.exists(topath):
+                    with file(topath, 'rb') as f:
+                        existing = f.read()
+                        if existing != conf and not args.overwrite_conf:
+                            LOG.error('local config file %s exists with different content; use --overwrite-conf to overwrite' % topath)
+                            raise
+
+                with file(topath, 'w') as f:
+                    f.write(conf)
+                return
+            LOG.debug('Empty or missing %s on %s', frompath, hostname)
+        except:
+            LOG.error('Unable to pull %s from %s', frompath, hostname)
+        finally:
+            errors += 1
+
+    raise exc.GenericError('Failed to fetch config from %d hosts' % errors)
+
+
+def config(args):
+    if args.subcommand == 'push':
+        config_push(args)
+    elif args.subcommand == 'pull':
+        config_pull(args)
+    else:
+        LOG.error('subcommand %s not implemented', args.subcommand)
+
+@priority(70)
+def make(parser):
+    """
+    Push configuration file to a remote host.
+    """
+    parser.add_argument(
+        'subcommand',
+        metavar='SUBCOMMAND',
+        choices=[
+            'push',
+            'pull',
+            ],
+        help='push or pull',
+        )
+    parser.add_argument(
+        'client',
+        metavar='HOST',
+        nargs='*',
+        help='host to push/pull the config to/from',
+        )
+    parser.set_defaults(
+        func=config,
+        )
diff --git a/ceph_deploy/discover.py b/ceph_deploy/discover.py
new file mode 100644 (file)
index 0000000..a6b8755
--- /dev/null
@@ -0,0 +1,68 @@
+import logging
+import os.path
+
+from .cliutil import priority
+
+
+LOG = logging.getLogger(__name__)
+
+
+def get_file(path):
+    """
+    Run on remote node, grab a file.
+    """
+    try:
+        with file(path, 'rb') as f:
+            return f.read()
+    except IOError:
+        pass
+
+def fetch_file(args, frompath, topath, hosts):
+    # mon.
+    if os.path.exists(topath):
+        LOG.debug('Have %s', topath)
+        return True
+    else:
+        for hostname in hosts:
+            LOG.debug('Checking %s for %s', hostname, frompath)
+            sudo = args.pushy('ssh+sudo:{hostname}'.format(hostname=hostname))
+            get_file_r = sudo.compile(get_file)
+            key = get_file_r(path=frompath.format(hostname=hostname))
+            if key is not None:
+                LOG.debug('Got %s from %s, writing locally', topath, hostname)
+                if not args.dry_run:
+                    with file(topath, 'w') as f:
+                        f.write(key)
+                return True
+    LOG.warning('Unable to find %s on %s', frompath, hosts)
+    return False
+
+def discover(args):
+    ret = 0
+
+    # ceph.conf
+    r = fetch_file(
+        args=args,
+        frompath='/etc/ceph/{cluster}.conf'.format(cluster=args.cluster),
+        topath='{cluster}.conf'.format(cluster=args.cluster),
+        hosts=args.host,
+        )
+    if not r:
+        ret = 1
+
+    return ret
+
+@priority(10)
+def make(parser):
+    """
+    Gather cluster configuration from another host to CLUSTER.conf.
+    """
+    parser.add_argument(
+        'host',
+        metavar='HOST',
+        nargs='*',
+        help='host to pull cluster information from',
+        )
+    parser.set_defaults(
+        func=discover,
+        )
diff --git a/ceph_deploy/exc.py b/ceph_deploy/exc.py
new file mode 100644 (file)
index 0000000..47249ac
--- /dev/null
@@ -0,0 +1,60 @@
+class DeployError(Exception):
+    """
+    Unknown deploy error
+    """
+
+    def __str__(self):
+        doc = self.__doc__.strip()
+        return ': '.join([doc] + [str(a) for a in self.args])
+
+
+class UnableToResolveError(DeployError):
+    """
+    Unable to resolve host
+    """
+class ClusterExistsError(DeployError):
+    """
+    Cluster config exists already
+    """
+
+
+class ConfigError(DeployError):
+    """
+    Cannot load config
+    """
+
+
+class NeedHostError(DeployError):
+    """
+    No hosts specified to deploy to.
+    """
+
+
+class NeedMonError(DeployError):
+    """
+    Cannot find nodes with ceph-mon.
+    """
+
+
+class UnsupportedPlatform(DeployError):
+    """
+    Platform is not supported
+    """
+    def __init__(self, distro, codename):
+        self.distro = distro
+        self.codename = codename
+
+    def __str__(self):
+        return '{doc}: {distro} {codename}'.format(
+            doc=self.__doc__.strip(),
+            distro=self.distro,
+            codename=self.codename,
+            )
+
+
+class GenericError(DeployError):
+    def __init__(self, message):
+        self.message = message
+
+    def __str__(self):
+        return self.message
diff --git a/ceph_deploy/forgetkeys.py b/ceph_deploy/forgetkeys.py
new file mode 100644 (file)
index 0000000..98bc6ef
--- /dev/null
@@ -0,0 +1,29 @@
+import logging
+
+from .cliutil import priority
+
+
+LOG = logging.getLogger(__name__)
+
+
+def forgetkeys(args):
+    import os
+    for f in [
+        'mon',
+        'client.admin',
+        'bootstrap-osd',
+        'bootstrap-mds',
+        ]:
+        os.unlink('{cluster}.{what}.keyring'.format(
+                cluster=args.cluster,
+                what=f,
+                ))
+
+@priority(100)
+def make(parser):
+    """
+    Remove authentication keys from the local directory.
+    """
+    parser.set_defaults(
+        func=forgetkeys,
+        )
diff --git a/ceph_deploy/gatherkeys.py b/ceph_deploy/gatherkeys.py
new file mode 100644 (file)
index 0000000..a6715c2
--- /dev/null
@@ -0,0 +1,84 @@
+import os.path
+import logging
+
+from .cliutil import priority
+from . import misc
+
+LOG = logging.getLogger(__name__)
+
+def fetch_file(args, frompath, topath, hosts):
+    # mon.
+    if os.path.exists(topath):
+        LOG.debug('Have %s', topath)
+        return True
+    else:
+        for hostname in hosts:
+            LOG.debug('Checking %s for %s', hostname, frompath)
+            sudo = args.pushy('ssh+sudo:{hostname}'.format(hostname=hostname))
+            get_file_r = sudo.compile(misc.get_file)
+            key = get_file_r(path=frompath.format(hostname=hostname))
+            if key is not None:
+                LOG.debug('Got %s key from %s.', topath, hostname)
+                with file(topath, 'w') as f:
+                    f.write(key)
+                    return True
+    LOG.warning('Unable to find %s on %s', frompath, hosts)
+    return False
+
+def gatherkeys(args):
+    ret = 0
+
+    # client.admin
+    r = fetch_file(
+        args=args,
+        frompath='/etc/ceph/{cluster}.client.admin.keyring'.format(
+            cluster=args.cluster),
+        topath='{cluster}.client.admin.keyring'.format(
+            cluster=args.cluster),
+        hosts=args.mon,
+        )
+    if not r:
+        ret = 1
+
+    # mon.
+    fetch_file(
+        args=args,
+        frompath='/var/lib/ceph/mon/%s-{hostname}/keyring' % args.cluster,
+        topath='{cluster}.mon.keyring'.format(
+             cluster=args.cluster),
+        hosts=args.mon,
+        )
+    if not r:
+        ret = 1
+
+    # bootstrap
+    for what in ['osd', 'mds']:
+        r = fetch_file(
+            args=args,
+            frompath='/var/lib/ceph/bootstrap-{what}/{cluster}.keyring'.format(
+                cluster=args.cluster,
+                what=what),
+            topath='{cluster}.bootstrap-{what}.keyring'.format(
+                cluster=args.cluster,
+                what=what),
+            hosts=args.mon,
+            )
+        if not r:
+            ret = 1
+
+    return ret
+
+@priority(40)
+def make(parser):
+    """
+    Gather authentication keys for provisioning new nodes.
+    """
+    parser.add_argument(
+        'mon',
+        metavar='HOST',
+        nargs='+',
+        help='monitor host to pull keys from',
+        )
+    parser.set_defaults(
+        func=gatherkeys,
+        )
diff --git a/ceph_deploy/install.py b/ceph_deploy/install.py
new file mode 100644 (file)
index 0000000..7a986e5
--- /dev/null
@@ -0,0 +1,351 @@
+import argparse
+import logging
+
+from . import exc
+from . import lsb
+from .cliutil import priority
+
+LOG = logging.getLogger(__name__)
+
+def install_centos(release, codename, version_kind, version):
+    import platform
+    import subprocess
+
+    if version_kind in ['stable', 'testing']:
+        key = 'release'
+    else:
+        key = 'autobuild'
+
+    subprocess.check_call(
+        args='su -c \'rpm --import "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc"\''.format(key=key),
+        shell=True,
+        )
+
+    if version_kind == 'stable':
+        url = 'http://ceph.com/rpm-{version}/'.format(
+        version=version,
+        )
+    elif version_kind == 'testing':
+        url = 'http://ceph.com/rpm-testing/'
+    elif version_kind == 'dev':
+        url = 'http://gitbuilder.ceph.com/ceph-rpm-centos{release}-{machine}-basic/ref/{version}/'.format(
+            release=release.split(".",1)[0],
+            machine=platform.machine(),
+            version=version,
+            )
+
+    subprocess.call(
+        args=['rpm', '-Uvh','--quiet', '{url}noarch/ceph-release-1-0.el6.noarch.rpm'.format(
+            url=url
+            )]
+        )
+    
+    subprocess.check_call(
+        args=[
+            'yum',
+            '-y',
+            '-q',
+            'install',
+            'ceph',
+            'ceph-common',
+            'ceph-fs-common',
+            ],
+        )
+    
+def uninstall_centos():
+    import subprocess
+
+    packages = [
+        'ceph',
+        'ceph-mds',
+        'ceph-common',
+        'ceph-fs-common',
+        ]
+    args = [
+        'yum',
+        '-q',
+        '-y',
+        'remove',
+        ]
+
+    args.extend(packages)
+    subprocess.check_call(args=args)
+
+def uninstall_debian(arg_purge=False):
+    import subprocess
+
+    packages = [
+        'ceph',
+        'ceph-mds',
+        'ceph-common',
+        'ceph-fs-common',
+        ]
+    args = [
+        'apt-get',
+        '-q',
+        'remove',
+        '-f',
+        '-y',
+        '--force-yes',
+        ]
+    if arg_purge:
+        args.append('--purge')
+    args.append('--')
+    args.extend(packages)
+    subprocess.check_call(args=args)
+
+def install_debian(release, codename, version_kind, version):
+    import platform
+    import subprocess
+
+    if version_kind in ['stable', 'testing']:
+        key = 'release'
+    else:
+        key = 'autobuild'
+
+    subprocess.check_call(
+        args='wget -q -O- \'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc\' | apt-key add -'.format(key=key),
+        shell=True,
+        )
+
+    if version_kind == 'stable':
+        url = 'http://ceph.com/debian-{version}/'.format(
+            version=version,
+            )
+    elif version_kind == 'testing':
+        url = 'http://ceph.com/debian-testing/'
+    elif version_kind == 'dev':
+        url = 'http://gitbuilder.ceph.com/ceph-deb-{codename}-{machine}-basic/ref/{version}'.format(
+            codename=codename,
+            machine=platform.machine(),
+            version=version,
+            )
+    else:
+        raise RuntimeError('Unknown version kind: %r' % version_kind)
+
+    with file('/etc/apt/sources.list.d/ceph.list', 'w') as f:
+        f.write('deb {url} {codename} main\n'.format(
+                url=url,
+                codename=codename,
+                ))
+
+    subprocess.check_call(
+        args=[
+            'apt-get',
+            '-q',
+            'update',
+            ],
+        )
+
+    # TODO this does not downgrade -- should it?
+    subprocess.check_call(
+        args=[
+            'env',
+            'DEBIAN_FRONTEND=noninteractive',
+            'DEBIAN_PRIORITY=critical',
+            'apt-get',
+            '-q',
+            '-o', 'Dpkg::Options::=--force-confnew',
+            'install',
+            '--no-install-recommends',
+            '--assume-yes',
+            '--',
+            'ceph',
+            'ceph-mds',
+            'ceph-common',
+            'ceph-fs-common',
+            # ceph only recommends gdisk, make sure we actually have
+            # it; only really needed for osds, but minimal collateral
+            'gdisk',
+            ],
+        )
+
+def install(args):
+    version = getattr(args, args.version_kind)
+    version_str = args.version_kind
+    if version:
+        version_str += ' version {version}'.format(version=version)
+    LOG.debug(
+        'Installing %s on cluster %s hosts %s',
+        version_str,
+        args.cluster,
+        ' '.join(args.host),
+        )
+
+    for hostname in args.host:
+        LOG.debug('Detecting platform for host %s ...', hostname)
+
+        # TODO username
+        sudo = args.pushy('ssh+sudo:{hostname}'.format(hostname=hostname))
+        lsb_release_r = sudo.compile(lsb.lsb_release)
+        (distro, release, codename) = lsb_release_r()
+        LOG.debug('Distro %s release %s codename %s', distro, release, codename)
+
+        if (distro == 'Debian' or distro == 'Ubuntu'):
+            LOG.debug('Installing on host %s ...', hostname)
+            install_r = sudo.compile(install_debian)
+        elif (distro == 'CentOS'):
+            LOG.debug('Installing on host %s ...', hostname)
+            install_r = sudo.compile(install_centos)
+        else:
+            raise exc.UnsupportedPlatform(distro=distro, codename=codename)
+
+        install_r(
+            release=release,
+            codename=codename,
+            version_kind=args.version_kind,
+            version=version,
+            )
+
+def uninstall(args):
+    LOG.debug(
+        'Uninstalling on cluster %s hosts %s',
+        args.cluster,
+        ' '.join(args.host),
+        )
+
+    for hostname in args.host:
+        LOG.debug('Detecting platform for host %s ...', hostname)
+
+        # TODO username
+        sudo = args.pushy('ssh+sudo:{hostname}'.format(hostname=hostname))
+        lsb_release_r = sudo.compile(lsb.lsb_release)
+        (distro, release, codename) = lsb_release_r()
+        LOG.debug('Distro %s codename %s', distro, codename)
+
+        if (distro == 'Debian' or distro == 'Ubuntu'):
+            LOG.debug('Uninstalling on host %s ...', hostname)
+            uninstall_r = sudo.compile(uninstall_debian)
+        elif (distro == 'centos'):
+            LOG.debug('Uninstalling on host %s ...', hostname)
+            uninstall_r = sudo.compile(uninstall_centos)
+        else:
+            raise exc.UnsupportedPlatform(distro=distro, codename=codename)
+
+        uninstall_r()
+
+def purge(args):
+    LOG.debug(
+        'Purging from cluster %s hosts %s',
+        args.cluster,
+        ' '.join(args.host),
+        )
+
+    for hostname in args.host:
+        LOG.debug('Detecting platform for host %s ...', hostname)
+
+        # TODO username
+        sudo = args.pushy('ssh+sudo:{hostname}'.format(hostname=hostname))
+        lsb_release_r = sudo.compile(lsb.lsb_release)
+        (distro, release, codename) = lsb_release_r()
+        LOG.debug('Distro %s codename %s', distro, codename)
+
+        if (distro == 'Debian' or distro == 'Ubuntu'):
+            LOG.debug('Purging host %s ...', hostname)
+            purge_r = sudo.compile(uninstall_debian)
+        else:
+            raise exc.UnsupportedPlatform(distro=distro, codename=codename)
+
+        purge_r(purge=True)
+
+class StoreVersion(argparse.Action):
+    """
+    Like ``"store"`` but also remember which one of the exclusive
+    options was set.
+
+    There are three kinds of versions: stable, testing and dev.
+    This sets ``version_kind`` to be the right one of the above.
+
+    This kludge essentially lets us differentiate explicitly set
+    values from defaults.
+    """
+    def __call__(self, parser, namespace, values, option_string=None):
+        setattr(namespace, self.dest, values)
+        namespace.version_kind = self.dest
+
+
+@priority(20)
+def make(parser):
+    """
+    Install Ceph packages on remote hosts.
+    """
+
+    version = parser.add_mutually_exclusive_group()
+
+    version.add_argument(
+        '--stable',
+        nargs='?',
+        action=StoreVersion,
+        choices=[
+            'argonaut',
+            'bobtail',
+            'cuttlefish',
+            ],
+        metavar='CODENAME',
+        help='install a release known as CODENAME (done by default) (default: %(default)s)',
+        )
+
+    version.add_argument(
+        '--testing',
+        nargs=0,
+        action=StoreVersion,
+        help='install the latest development release',
+        )
+
+    version.add_argument(
+        '--dev',
+        nargs='?',
+        action=StoreVersion,
+        const='master',
+        metavar='BRANCH_OR_TAG',
+        help='install a bleeding edge build from Git branch or tag (default: %(default)s)',
+        )
+
+    version.set_defaults(
+        func=install,
+        stable='bobtail',
+        dev='master',
+        version_kind='stable',
+        )
+
+    parser.add_argument(
+        'host',
+        metavar='HOST',
+        nargs='+',
+        help='hosts to install on',
+        )
+    parser.set_defaults(
+        func=install,
+        )
+
+
+
+@priority(80)
+def make_uninstall(parser):
+    """
+    Remove Ceph packages from remote hosts.
+    """
+    parser.add_argument(
+        'host',
+        metavar='HOST',
+        nargs='+',
+        help='hosts to uninstall Ceph from',
+        )
+    parser.set_defaults(
+        func=uninstall,
+        )
+
+@priority(80)
+def make_purge(parser):
+    """
+    Remove Ceph packages from remote hosts and purge all data.
+    """
+    parser.add_argument(
+        'host',
+        metavar='HOST',
+        nargs='+',
+        help='hosts to purge Ceph from',
+        )
+    parser.set_defaults(
+        func=purge,
+        )
diff --git a/ceph_deploy/lsb.py b/ceph_deploy/lsb.py
new file mode 100644 (file)
index 0000000..f7e0b21
--- /dev/null
@@ -0,0 +1,56 @@
+def lsb_release():
+    import subprocess
+
+    args = [ 'which', 'lsb_release', ]
+    p = subprocess.Popen(
+        args=args,
+        stdout=subprocess.PIPE,
+        )
+    distro = p.stdout.read()
+    ret = p.wait()
+    if ret != 0:
+        raise RuntimeError('lsb_release not found on host')
+
+    args = [ 'lsb_release', '-s', '-i' ]
+    p = subprocess.Popen(
+        args=args,
+        stdout=subprocess.PIPE,
+        )
+    distro = p.stdout.read()
+    ret = p.wait()
+    if ret != 0:
+        raise subprocess.CalledProcessError(ret, args, output=out)
+    if distro == '':
+        raise RuntimeError('lsb_release gave invalid output for distro')
+
+    args = [ 'lsb_release', '-s', '-r', ]
+    p = subprocess.Popen(
+        args=args,
+        stdout=subprocess.PIPE,
+        )
+    release = p.stdout.read()
+    ret = p.wait()
+    if ret != 0:
+        raise subprocess.CalledProcessError(ret, args, output=out)
+    if release == '':
+        raise RuntimeError('lsb_release gave invalid output for release')
+
+    args = [ 'lsb_release', '-s', '-c', ]
+    p = subprocess.Popen(
+        args=args,
+        stdout=subprocess.PIPE,
+        )
+    codename = p.stdout.read()
+    ret = p.wait()
+    if ret != 0:
+        raise subprocess.CalledProcessError(ret, args, output=out)
+    if codename == '':
+        raise RuntimeError('lsb_release gave invalid output for codename')
+
+    return (distro.rstrip(), release.rstrip(), codename.rstrip())
+
+
+def choose_init(distro, codename):
+    if distro == 'Ubuntu':
+        return 'upstart'
+    return 'sysvinit'
diff --git a/ceph_deploy/mds.py b/ceph_deploy/mds.py
new file mode 100644 (file)
index 0000000..9f6ff41
--- /dev/null
@@ -0,0 +1,229 @@
+import logging
+
+from cStringIO import StringIO
+
+from . import conf
+from . import exc
+from . import lsb
+from .cliutil import priority
+
+
+LOG = logging.getLogger(__name__)
+
+
+def get_bootstrap_mds_key(cluster):
+    """
+    Read the bootstrap-mds key for `cluster`.
+    """
+    path = '{cluster}.bootstrap-mds.keyring'.format(cluster=cluster)
+    try:
+        with file(path, 'rb') as f:
+            return f.read()
+    except IOError:
+        raise RuntimeError('bootstrap-mds keyring not found; run \'gatherkeys\'')
+
+
+def create_mds_bootstrap(cluster, key):
+    """
+    Run on mds node, writes the bootstrap key if not there yet.
+
+    Returns None on success, error message on error exceptions. pushy
+    mangles exceptions to all be of type ExceptionProxy, so we can't
+    tell between bug and correctly handled failure, so avoid using
+    exceptions for non-exceptional runs.
+    """
+    import os
+
+    path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
+        cluster=cluster,
+        )
+    if not os.path.exists(path):
+        tmp = '{path}.{pid}.tmp'.format(
+            path=path,
+            pid=os.getpid(),
+            )
+        # file() doesn't let us control access mode from the
+        # beginning, and thus would have a race where attacker can
+        # open before we chmod the file, so play games with os.open
+        fd = os.open(
+            tmp,
+            (os.O_WRONLY|os.O_CREAT|os.O_EXCL
+             |os.O_NOCTTY|os.O_NOFOLLOW),
+            0600,
+            )
+        with os.fdopen(fd, 'wb') as f:
+            f.write(key)
+            f.flush()
+            os.fsync(f)
+        os.rename(tmp, path)
+
+
+def create_mds(
+    name,
+    cluster,
+    init,
+    ):
+    import os
+    import subprocess
+    import errno
+
+    path = '/var/lib/ceph/mds/{cluster}-{name}'.format(
+        cluster=cluster,
+        name=name
+        )
+
+    try:
+        os.mkdir(path)
+    except OSError, e:
+        if e.errno == errno.EEXIST:
+            pass
+        else:
+            raise
+
+    bootstrap_keyring = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
+        cluster=cluster
+        )
+
+    keypath = os.path.join(path, 'keyring')
+
+    subprocess.check_call(
+        args=[
+            'ceph',
+            '--cluster', cluster,
+            '--name', 'client.bootstrap-mds',
+            '--keyring', bootstrap_keyring,
+            'auth', 'get-or-create', 'mds.{name}'.format(name=name),
+            'osd', 'allow *',
+            'mds', 'allow',
+            'mon', 'allow rwx',
+            '-o',
+            os.path.join(keypath),
+            ],
+        )
+
+    with file(os.path.join(path, 'done'), 'wb') as f:
+        pass
+
+    with file(os.path.join(path, init), 'wb') as f:
+        pass
+
+    if init == 'upstart':
+        subprocess.check_call(
+            args=[
+                'initctl',
+                'emit',
+                'ceph-mds',
+                'cluster={cluster}'.format(cluster=cluster),
+                'id={name}'.format(name=name),
+                ])
+    elif init == 'sysvinit':
+        subprocess.check_call(
+            args=[
+                'service',
+                'ceph',
+                'start',
+                'mds.{name}'.format(name=name),
+                ])
+
+def mds_create(args):
+    cfg = conf.load(args)
+    LOG.debug(
+        'Deploying mds, cluster %s hosts %s',
+        args.cluster,
+        ' '.join(':'.join(x or '' for x in t) for t in args.mds),
+        )
+
+    if not args.mds:
+        raise exc.NeedHostError()
+
+    key = get_bootstrap_mds_key(cluster=args.cluster)
+
+    bootstrapped = set()
+    errors = 0
+    for hostname, name in args.mds:
+        try:
+            # TODO username
+            sudo = args.pushy('ssh+sudo:{hostname}'.format(hostname=hostname))
+
+            lsb_release_r = sudo.compile(lsb.lsb_release)
+            (distro, release, codename) = lsb_release_r()
+            init = lsb.choose_init(distro, codename)
+            LOG.debug('Distro %s codename %s, will use %s',
+                      distro, codename, init)
+
+            if hostname not in bootstrapped:
+                bootstrapped.add(hostname)
+                LOG.debug('Deploying mds bootstrap to %s', hostname)
+
+                write_conf_r = sudo.compile(conf.write_conf)
+                conf_data = StringIO()
+                cfg.write(conf_data)
+                write_conf_r(
+                    cluster=args.cluster,
+                    conf=conf_data.getvalue(),
+                    overwrite=args.overwrite_conf,
+                    )
+
+                create_mds_bootstrap_r = sudo.compile(create_mds_bootstrap)
+                error = create_mds_bootstrap_r(
+                    cluster=args.cluster,
+                    key=key,
+                    )
+                if error is not None:
+                    raise exc.GenericError(error)
+                LOG.debug('Host %s is now ready for MDS use.', hostname)
+
+            # create an mds
+            LOG.debug('Deploying mds.%s to %s', name, hostname)
+            create_mds_r = sudo.compile(create_mds)
+            create_mds_r(
+                name=name,
+                cluster=args.cluster,
+                init=init,
+                )
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to create %d MDSs' % errors)
+
+
+def mds(args):
+    if args.subcommand == 'create':
+        mds_create(args)
+    else:
+        LOG.error('subcommand %s not implemented', args.subcommand)
+
+
+def colon_separated(s):
+    host = s
+    name = s
+    if s.count(':') == 1:
+        (host, name) = s.split(':')
+    return (host, name)
+
+@priority(30)
+def make(parser):
+    """
+    Deploy ceph MDS on remote hosts.
+    """
+    parser.add_argument(
+        'subcommand',
+        metavar='SUBCOMMAND',
+        choices=[
+            'create',
+            'destroy',
+            ],
+        help='create or destroy',
+        )
+    parser.add_argument(
+        'mds',
+        metavar='HOST[:NAME]',
+        nargs='*',
+        type=colon_separated,
+        help='host (and optionally the daemon name) to deploy on',
+        )
+    parser.set_defaults(
+        func=mds,
+        )
diff --git a/ceph_deploy/memoize.py b/ceph_deploy/memoize.py
new file mode 100644 (file)
index 0000000..fd344a0
--- /dev/null
@@ -0,0 +1,26 @@
+import functools
+
+
+class NotFound(object):
+    """
+    Sentinel object to say call was not memoized.
+
+    Supposed to be faster than throwing exceptions on cache miss.
+    """
+    def __str__(self):
+        return self.__class__.__name__
+
+NotFound = NotFound()
+
+
+def memoize(f):
+    cache = {}
+
+    @functools.wraps(f)
+    def wrapper(*args, **kwargs):
+        key = (args, tuple(sorted(kwargs.iteritems())))
+        val = cache.get(key, NotFound)
+        if val is NotFound:
+            val = cache[key] = f(*args, **kwargs)
+        return val
+    return wrapper
diff --git a/ceph_deploy/misc.py b/ceph_deploy/misc.py
new file mode 100644 (file)
index 0000000..0954800
--- /dev/null
@@ -0,0 +1,11 @@
+
+def get_file(path):
+    """
+    Run on mon node, grab a file.
+    """
+    try:
+        with file(path, 'rb') as f:
+            return f.read()
+    except IOError:
+        pass
+
diff --git a/ceph_deploy/mon.py b/ceph_deploy/mon.py
new file mode 100644 (file)
index 0000000..ae9f774
--- /dev/null
@@ -0,0 +1,258 @@
+import ConfigParser
+import logging
+import re
+
+from cStringIO import StringIO
+
+from . import conf
+from . import exc
+from . import lsb
+from .cliutil import priority
+
+
+LOG = logging.getLogger(__name__)
+
+
+def create_mon(cluster, monitor_keyring, init):
+    import os
+    import socket
+    import subprocess
+
+    hostname = socket.gethostname().split('.')[0]
+    path = '/var/lib/ceph/mon/ceph-{hostname}'.format(
+        hostname=hostname,
+        )
+    done_path = '/var/lib/ceph/mon/ceph-{hostname}/done'.format(
+        hostname=hostname,
+        )
+    init_path = '/var/lib/ceph/mon/ceph-{hostname}/{init}'.format(
+        hostname=hostname,
+        init=init,
+        )
+
+    if not os.path.exists(path):
+        os.mkdir(path)
+
+    if not os.path.exists(done_path):
+        keyring = '/var/lib/ceph/tmp/{cluster}-{hostname}.mon.keyring'.format(
+            cluster=cluster,
+            hostname=hostname,
+            )
+
+        with file(keyring, 'w') as f:
+            f.write(monitor_keyring)
+
+        subprocess.check_call(
+            args=[
+                'ceph-mon',
+                '--cluster', cluster,
+                '--mkfs',
+                '-i', hostname,
+                '--keyring', keyring,
+                ],
+            )
+        os.unlink(keyring)
+        with file(done_path, 'w'):
+            pass
+
+    if not os.path.exists(init_path):
+        with file(init_path, 'w'):
+            pass
+
+    if init == 'upstart':
+        subprocess.check_call(
+            args=[
+                'initctl',
+                'emit',
+                'ceph-mon',
+                'cluster={cluster}'.format(cluster=cluster),
+                'id={hostname}'.format(hostname=hostname),
+                ],
+            )
+    elif init == 'sysvinit':
+        subprocess.check_call(
+            args=[
+                'service',
+                'ceph',
+                'start',
+                'mon.{hostname}'.format(hostname=hostname),
+                ],
+            )
+
+
+def mon_create(args):
+    cfg = conf.load(args)
+    if not args.mon:
+        try:
+            mon_initial_members = cfg.get('global', 'mon_initial_members')
+        except (ConfigParser.NoSectionError,
+                ConfigParser.NoOptionError):
+            pass
+        else:
+            args.mon = re.split(r'[,\s]+', mon_initial_members)
+
+    if not args.mon:
+        raise exc.NeedHostError()
+
+    try:
+        with file('{cluster}.mon.keyring'.format(cluster=args.cluster),
+                  'rb') as f:
+            monitor_keyring = f.read()
+    except IOError:
+        raise RuntimeError('mon keyring not found; run \'new\' to create a new cluster')
+
+    LOG.debug(
+        'Deploying mon, cluster %s hosts %s',
+        args.cluster,
+        ' '.join(args.mon),
+        )
+
+    errors = 0
+    for hostname in args.mon:
+        try:
+            LOG.debug('Deploying mon to %s', hostname)
+
+            # TODO username
+            sudo = args.pushy('ssh+sudo:{hostname}'.format(hostname=hostname))
+
+            lsb_release_r = sudo.compile(lsb.lsb_release)
+            (distro, release, codename) = lsb_release_r()
+            init = lsb.choose_init(distro, codename)
+            LOG.debug('Distro %s codename %s, will use %s',
+                      distro, codename, init)
+
+            write_conf_r = sudo.compile(conf.write_conf)
+            conf_data = StringIO()
+            cfg.write(conf_data)
+            write_conf_r(
+                cluster=args.cluster,
+                conf=conf_data.getvalue(),
+                overwrite=args.overwrite_conf,
+                )
+
+            create_mon_r = sudo.compile(create_mon)
+            create_mon_r(
+                cluster=args.cluster,
+                monitor_keyring=monitor_keyring,
+                init=init,
+                )
+
+            # TODO add_bootstrap_peer_hint
+
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to create %d monitors' % errors)
+
+
+def destroy_mon(cluster):
+    import os
+    import subprocess
+    import socket
+
+    hostname = socket.gethostname().split('.')[0]
+    path = '/var/lib/ceph/mon/ceph-{hostname}'.format(
+        hostname=hostname,
+        )
+
+    if os.path.exists(path):
+        # remove from cluster
+        subprocess.check_call(
+            args=[
+                'sudo',
+                'ceph',
+                '--cluster={cluster}'.format(cluster=cluster),
+                '-n', 'mon.',
+                '-k', '{path}/keyring'.format(path=path),
+                'mon',
+                'remove',
+                hostname,
+                ],
+            )
+
+        # stop
+        if os.path.exists(os.path.join(path, 'upstart')):
+            subprocess.call(   # ignore initctl error when job not running
+                args=[
+                    'initctl',
+                    'stop',
+                    'ceph-mon',
+                    'cluster={cluster}'.format(cluster=cluster),
+                    'id={hostname}'.format(hostname=hostname),
+                ],
+            )
+        elif os.path.exists(os.path.join(path, 'sysvinit')):
+            subprocess.check_call(
+                args=[
+                    'service',
+                    'ceph',
+                    'stop',
+                    'mon.{hostname}'.format(hostname=hostname),
+                ],
+            )
+
+        # delete monitor directory
+        subprocess.check_call(
+            args=[
+                'rm',
+                '-rf',
+                path,
+                ],
+            )
+
+
+def mon_destroy(args):
+    errors = 0
+    for hostname in args.mon:
+        try:
+            LOG.debug('Removing mon from %s', hostname)
+
+            # TODO username
+            sudo = args.pushy('ssh+sudo:{hostname}'.format(hostname=hostname))
+
+            destroy_mon_r = sudo.compile(destroy_mon)
+            destroy_mon_r(
+                cluster=args.cluster,
+                )
+
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to create %d monitors' % errors)
+
+
+def mon(args):
+    if args.subcommand == 'create':
+        mon_create(args)
+    elif args.subcommand == 'destroy':
+        mon_destroy(args)
+    else:
+        LOG.error('subcommand %s not implemented', args.subcommand)
+
+@priority(30)
+def make(parser):
+    """
+    Deploy ceph monitor on remote hosts.
+    """
+    parser.add_argument(
+        'subcommand',
+        metavar='SUBCOMMAND',
+        choices=[
+            'create',
+            'destroy',
+            ],
+        help='create or destroy',
+        )
+    parser.add_argument(
+        'mon',
+        metavar='HOST',
+        nargs='*',
+        help='host to deploy on',
+        )
+    parser.set_defaults(
+        func=mon,
+        )
diff --git a/ceph_deploy/new.py b/ceph_deploy/new.py
new file mode 100644 (file)
index 0000000..5f5c269
--- /dev/null
@@ -0,0 +1,136 @@
+import ConfigParser
+import errno
+import logging
+import os
+import uuid
+import struct
+import time
+import base64
+import socket
+
+from . import exc
+from .cliutil import priority
+
+
+LOG = logging.getLogger(__name__)
+
+
+def generate_auth_key():
+    key = os.urandom(16)
+    header = struct.pack('<hiih',
+                1,               # le16 type: CEPH_CRYPTO_AES
+                int(time.time()),  # le32 created: seconds
+                0,               # le32 created: nanoseconds,
+                len(key),        # le16: len(key)
+                )
+    return base64.b64encode(header + key)
+
+"""
+Search result of getaddrinfo() for a non-localhost-net address
+"""
+def get_nonlocal_ip(host):
+    ailist = socket.getaddrinfo(host, None)
+    for ai in ailist:
+        # an ai is a 5-tuple; the last element is (ip, port)
+        ip = ai[4][0];
+        if not ip.startswith('127.'):
+            return ip
+    raise exc.UnableToResolveError(host)
+
+def new(args):
+    LOG.debug('Creating new cluster named %s', args.cluster)
+    cfg = ConfigParser.RawConfigParser()
+    cfg.add_section('global')
+
+    fsid = uuid.uuid4()
+    cfg.set('global', 'fsid', str(fsid))
+
+    mon_initial_members = []
+    mon_host = []
+
+    for m in args.mon:
+        if m.count(':'):
+            (name, host) = m.split(':')
+        else:
+            name = m
+            host = m
+            if name.count('.') > 0:
+                name = name.split('.')[0]
+        LOG.debug('Resolving host %s', host)
+        ip = None
+        ip = get_nonlocal_ip(host)
+        LOG.debug('Monitor %s at %s', name, ip)
+        mon_initial_members.append(name)
+        mon_host.append(ip)
+
+    LOG.debug('Monitor initial members are %s', mon_initial_members)
+    LOG.debug('Monitor addrs are %s', mon_host)
+
+    cfg.set('global', 'mon initial members', ', '.join(mon_initial_members))
+    # no spaces here, see http://tracker.newdream.net/issues/3145
+    cfg.set('global', 'mon host', ','.join(mon_host))
+
+    # override undesirable defaults, needed until bobtail
+
+    # http://tracker.newdream.net/issues/3136
+    cfg.set('global', 'auth supported', 'cephx')
+
+    # http://tracker.newdream.net/issues/3137
+    cfg.set('global', 'osd journal size', '1024')
+
+    # http://tracker.newdream.net/issues/3138
+    cfg.set('global', 'filestore xattr use omap', 'true')
+
+    path = '{name}.conf'.format(
+        name=args.cluster,
+        )
+
+    # FIXME: create a random key
+    LOG.debug('Creating a random mon key...')
+    mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key()
+
+    keypath = '{name}.mon.keyring'.format(
+        name=args.cluster,
+        )
+
+    LOG.debug('Writing initial config to %s...', path)
+    if not args.dry_run:
+        tmp = '%s.tmp' % path
+        with file(tmp, 'w') as f:
+            cfg.write(f)
+        try:
+            os.rename(tmp, path)
+        except OSError as e:
+            if e.errno == errno.EEXIST:
+                raise exc.ClusterExistsError(path)
+            else:
+                raise
+
+    LOG.debug('Writing monitor keyring to %s...', path)
+    if not args.dry_run:
+        tmp = '%s.tmp' % keypath
+        with file(tmp, 'w') as f:
+            f.write(mon_keyring)
+        try:
+            os.rename(tmp, keypath)
+        except OSError as e:
+            if e.errno == errno.EEXIST:
+                raise exc.ClusterExistsError(keypath)
+            else:
+                raise
+
+
+@priority(10)
+def make(parser):
+    """
+    Start deploying a new cluster, and write a CLUSTER.conf and keyring for it.
+    """
+    parser.add_argument(
+        'mon',
+        metavar='MON',
+        nargs='+',
+        help='initial monitor hostname, fqdn, or hostname:fqdn pair',
+        )
+    parser.set_defaults(
+        func=new,
+        )
diff --git a/ceph_deploy/osd.py b/ceph_deploy/osd.py
new file mode 100644 (file)
index 0000000..e2b3887
--- /dev/null
@@ -0,0 +1,289 @@
+import argparse
+import logging
+import os.path
+import sys
+
+from cStringIO import StringIO
+
+from . import conf
+from . import exc
+from . import lsb
+from .cliutil import priority
+
+
+LOG = logging.getLogger(__name__)
+
+
+def get_bootstrap_osd_key(cluster):
+    """
+    Read the bootstrap-osd key for `cluster`.
+    """
+    path = '{cluster}.bootstrap-osd.keyring'.format(cluster=cluster)
+    try:
+        with file(path, 'rb') as f:
+            return f.read()
+    except IOError:
+        raise RuntimeError('bootstrap-osd keyring not found; run \'gatherkeys\'')
+
+def create_osd(cluster, key):
+    """
+    Run on osd node, writes the bootstrap key if not there yet.
+
+    Returns None on success, error message on error exceptions. pushy
+    mangles exceptions to all be of type ExceptionProxy, so we can't
+    tell between bug and correctly handled failure, so avoid using
+    exceptions for non-exceptional runs.
+    """
+    import os
+    import subprocess
+
+    path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring'.format(
+        cluster=cluster,
+        )
+    if not os.path.exists(path):
+        tmp = '{path}.{pid}.tmp'.format(
+            path=path,
+            pid=os.getpid(),
+            )
+        # file() doesn't let us control access mode from the
+        # beginning, and thus would have a race where attacker can
+        # open before we chmod the file, so play games with os.open
+        fd = os.open(
+            tmp,
+            (os.O_WRONLY|os.O_CREAT|os.O_EXCL
+             |os.O_NOCTTY|os.O_NOFOLLOW),
+            0600,
+            )
+        with os.fdopen(fd, 'wb') as f:
+            f.write(key)
+            f.flush()
+            os.fsync(f)
+        os.rename(tmp, path)
+
+    # in case disks have been prepared before we do this, activate
+    # them now
+    subprocess.check_call(
+        args=[
+            'udevadm',
+            'trigger',
+            '--subsystem-match=block',
+            '--action=add',
+            ],
+        )
+
+
+def prepare_disk(cluster, disk, journal, activate_prepared_disk, zap, dmcrypt, dmcrypt_dir):
+    """
+    Run on osd node, prepares a data disk for use.
+    """
+    import subprocess
+
+    args = [
+        'ceph-disk-prepare',
+        ]
+    if zap:
+        args.append('--zap-disk')
+    if dmcrypt:
+        args.append('--dmcrypt')
+        if dmcrypt_dir is not None:
+            args.append('--dmcrypt-key-dir')
+            args.append(dmcrypt_dir)
+    args.extend([
+            '--',
+            disk,
+            ])
+    if journal is not None:
+        args.append(journal)
+    subprocess.check_call(args=args)
+
+    if activate_prepared_disk:
+        subprocess.check_call(
+            args=[
+                'udevadm',
+                'trigger',
+                '--subsystem-match=block',
+                '--action=add',
+                ],
+            )
+
+
+def activate_disk(cluster, disk, init):
+    """
+    Run on the osd node, activates a disk.
+    """
+    import subprocess
+
+    subprocess.check_call(
+        args=[
+            'ceph-disk-activate',
+            '--mark-init',
+            init,
+            '--mount',
+            disk,
+            ])
+
+
+def prepare(args, cfg, activate_prepared_disk):
+    LOG.debug(
+        'Preparing cluster %s disks %s',
+        args.cluster,
+        ' '.join(':'.join(x or '' for x in t) for t in args.disk),
+        )
+
+    key = get_bootstrap_osd_key(cluster=args.cluster)
+
+    bootstrapped = set()
+    errors = 0
+    for hostname, disk, journal in args.disk:
+        try:
+            # TODO username
+            sudo = args.pushy('ssh+sudo:{hostname}'.format(
+                    hostname=hostname,
+                    ))
+
+            if hostname not in bootstrapped:
+                bootstrapped.add(hostname)
+                LOG.debug('Deploying osd to %s', hostname)
+
+                write_conf_r = sudo.compile(conf.write_conf)
+                conf_data = StringIO()
+                cfg.write(conf_data)
+                write_conf_r(
+                    cluster=args.cluster,
+                    conf=conf_data.getvalue(),
+                    overwrite=args.overwrite_conf,
+                    )
+
+                create_osd_r = sudo.compile(create_osd)
+                error = create_osd_r(
+                    cluster=args.cluster,
+                    key=key,
+                    )
+                if error is not None:
+                    raise exc.GenericError(error)
+                LOG.debug('Host %s is now ready for osd use.', hostname)
+
+            LOG.debug('Preparing host %s disk %s journal %s activate %s',
+                      hostname, disk, journal, activate)
+
+            prepare_disk_r = sudo.compile(prepare_disk)
+            prepare_disk_r(
+                cluster=args.cluster,
+                disk=disk,
+                journal=journal,
+                activate=activate_prepared_disk,
+                zap=args.zap_disk,
+                dmcrypt=args.dmcrypt,
+                dmcrypt_dir=args.dmcrypt_key_dir,
+                )
+
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to create %d OSDs' % errors)
+
+def activate(args, cfg):
+    LOG.debug(
+        'Activating cluster %s disks %s',
+        args.cluster,
+        ' '.join(':'.join(t) for t in args.disk),
+        )
+
+    for hostname, disk, journal in args.disk:
+
+        # TODO username
+        sudo = args.pushy('ssh+sudo:{hostname}'.format(
+                hostname=hostname,
+                ))
+
+        LOG.debug('Activating host %s disk %s', hostname, disk)
+
+        lsb_release_r = sudo.compile(lsb.lsb_release)
+        (distro, release, codename) = lsb_release_r()
+        init = lsb.choose_init(distro, codename)
+        LOG.debug('Distro %s codename %s, will use %s',
+                  distro, codename, init)
+
+        activate_disk_r = sudo.compile(activate_disk)
+        activate_disk_r(
+            cluster=args.cluster,
+            disk=disk,
+            init=init,
+            )
+
+
+def osd(args):
+    cfg = conf.load(args)
+
+    if args.subcommand == 'prepare':
+        prepare(args, cfg, activate_prepared_disk=False)
+    if args.subcommand == 'create':
+        prepare(args, cfg, activate_prepared_disk=True)
+    elif args.subcommand == 'activate':
+        activate(args, cfg)
+    else:
+        LOG.error('subcommand %s not implemented', args.subcommand)
+        sys.exit(1)
+
+
+def colon_separated(s):
+    journal = None
+    if s.count(':') == 2:
+        (host, disk, journal) = s.split(':')
+    elif s.count(':') == 1:
+        (host, disk) = s.split(':')
+    else:
+        raise argparse.ArgumentTypeError('must be in form HOST:DISK[:JOURNAL]')
+
+    # allow just "sdb" to mean /dev/sdb
+    disk = os.path.join('/dev', disk)
+    if journal is not None:
+        journal = os.path.join('/dev', journal)
+
+    return (host, disk, journal)
+
+
+@priority(50)
+def make(parser):
+    """
+    Prepare a data disk on remote host.
+    """
+    parser.add_argument(
+        'subcommand',
+        metavar='SUBCOMMAND',
+        choices=[
+            'create',
+            'prepare',
+            'activate',
+            'destroy',
+            ],
+        help='create (prepare+activate), prepare, activate, or destroy',
+        )
+    parser.add_argument(
+        'disk',
+        nargs='+',
+        metavar='HOST:DISK[:JOURNAL]',
+        type=colon_separated,
+        help='host and disk to prepare',
+        )
+    parser.add_argument(
+        '--zap-disk',
+        action='store_true', default=None,
+        help='destroy existing partition table and content for DISK',
+        )
+    parser.add_argument(
+        '--dmcrypt',
+        action='store_true', default=None,
+        help='use dm-crypt on DISK',
+        )
+    parser.add_argument(
+        '--dmcrypt-key-dir',
+        metavar='KEYDIR',
+        default='/etc/ceph/dmcrypt-keys',
+        help='directory where dm-crypt keys are stored',
+        )
+    parser.set_defaults(
+        func=osd,
+        )
diff --git a/ceph_deploy/sudo_pushy.py b/ceph_deploy/sudo_pushy.py
new file mode 100644 (file)
index 0000000..892f7ee
--- /dev/null
@@ -0,0 +1,16 @@
+import pushy.transport.ssh
+
+
+class SshSudoTransport(object):
+    @staticmethod
+    def Popen(command, *a, **kw):
+        command = ['sudo'] + command
+        return pushy.transport.ssh.Popen(command, *a, **kw)
+
+
+def patch():
+    """
+    Monkey patches pushy so it supports running via (passphraseless)
+    sudo on the remote host.
+    """
+    pushy.transports['ssh+sudo'] = SshSudoTransport
diff --git a/ceph_deploy/test/__init__.py b/ceph_deploy/test/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/ceph_deploy/test/conftest.py b/ceph_deploy/test/conftest.py
new file mode 100644 (file)
index 0000000..819fc34
--- /dev/null
@@ -0,0 +1,98 @@
+import logging
+import os
+import subprocess
+import sys
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _prepend_path(env):
+    """
+    Make sure the PATH contains the location where the Python binary
+    lives. This makes sure cli tools installed in a virtualenv work.
+    """
+    if env is None:
+        env = os.environ
+    env = dict(env)
+    new = os.path.dirname(sys.executable)
+    path = env.get('PATH')
+    if path is not None:
+        new = new + ':' + path
+    env['PATH'] = new
+    return env
+
+
+class CLIFailed(Exception):
+    """CLI tool failed"""
+
+    def __init__(self, args, status):
+        self.args = args
+        self.status = status
+
+    def __str__(self):
+        return '{doc}: {args}: exited with status {status}'.format(
+            doc=self.__doc__,
+            args=self.args,
+            status=self.status,
+            )
+
+
+class CLIProcess(object):
+    def __init__(self, **kw):
+        self.kw = kw
+
+    def __enter__(self):
+        try:
+            self.p = subprocess.Popen(**self.kw)
+        except OSError as e:
+            raise AssertionError(
+                'CLI tool {args!r} does not work: {err}'.format(
+                    args=self.kw['args'],
+                    err=e,
+                    ),
+                )
+        else:
+            return self.p
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.p.wait()
+        if self.p.returncode != 0:
+            err = CLIFailed(
+                args=self.kw['args'],
+                status=self.p.returncode,
+                )
+            if exc_type is None:
+                # nothing else raised, so we should complain; if
+                # something else failed, we'll just log
+                raise err
+            else:
+                LOG.error(str(err))
+
+
+class CLITester(object):
+    # provide easy way for caller to access the exception class
+    # without importing us
+    Failed = CLIFailed
+
+    def __init__(self, tmpdir):
+        self.tmpdir = tmpdir
+
+    def __call__(self, **kw):
+        kw.setdefault('cwd', str(self.tmpdir))
+        kw['env'] = _prepend_path(kw.get('env'))
+        kw['env']['COLUMNS'] = '80'
+        return CLIProcess(**kw)
+
+
+def pytest_funcarg__cli(request):
+    """
+    Test command line behavior.
+    """
+
+    # the tmpdir here will be the same value as the test function
+    # sees; we rely on that to let caller prepare and introspect
+    # any files the cli tool will read or create
+    tmpdir = request.getfuncargvalue('tmpdir')
+
+    return CLITester(tmpdir=tmpdir)
diff --git a/ceph_deploy/test/directory.py b/ceph_deploy/test/directory.py
new file mode 100644 (file)
index 0000000..81d3e19
--- /dev/null
@@ -0,0 +1,13 @@
+import contextlib
+import os
+
+
+@contextlib.contextmanager
+def directory(path):
+    prev = os.open('.', os.O_RDONLY | os.O_DIRECTORY)
+    try:
+        os.chdir(path)
+        yield
+    finally:
+        os.fchdir(prev)
+        os.close(prev)
diff --git a/ceph_deploy/test/test_cli.py b/ceph_deploy/test/test_cli.py
new file mode 100644 (file)
index 0000000..c801ed7
--- /dev/null
@@ -0,0 +1,60 @@
+import pytest
+import subprocess
+
+
+def test_help(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', '--help'],
+        stdout=subprocess.PIPE,
+        ) as p:
+        got = p.stdout.read()
+        assert got == """\
+usage: ceph-deploy [-h] [-v] [--cluster NAME] COMMAND ...
+
+Deploy Ceph
+
+optional arguments:
+  -h, --help      show this help message and exit
+  -v, --verbose   be more verbose
+  --cluster NAME  name of the cluster
+
+commands:
+  COMMAND         description
+    new           Start deploying a new cluster, and write a CLUSTER.conf for
+                  it.
+    install       Install Ceph packages on remote hosts.
+    mon           Deploy ceph monitor on remote hosts.
+    osd           Prepare a data disk on remote host.
+"""
+
+
+def test_bad_command(tmpdir, cli):
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'bork'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            got = p.stderr.read()
+            assert got == """\
+usage: ceph-deploy [-h] [-v] [--cluster NAME] COMMAND ...
+ceph-deploy: error: argument COMMAND: invalid choice: 'bork' (choose from 'new', 'install', 'mon', 'osd')
+"""
+
+    assert err.value.status == 2
+    assert {p.basename for p in tmpdir.listdir()} == set()
+
+
+def test_bad_cluster(tmpdir, cli):
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', '--cluster=/evil-this-should-not-be-created', 'new'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            got = p.stderr.read()
+            assert got == """\
+usage: ceph-deploy [-h] [-v] [--cluster NAME] COMMAND ...
+ceph-deploy: error: argument --cluster: argument must start with a letter and contain only letters and numbers
+"""
+
+    assert err.value.status == 2
+    assert {p.basename for p in tmpdir.listdir()} == set()
diff --git a/ceph_deploy/test/test_cli_install.py b/ceph_deploy/test/test_cli_install.py
new file mode 100644 (file)
index 0000000..ec47dc6
--- /dev/null
@@ -0,0 +1,89 @@
+import argparse
+import collections
+import mock
+import pytest
+import subprocess
+
+from ..cli import main
+from .. import install
+
+from .directory import directory
+
+
+def test_help(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', 'install', '--help'],
+        stdout=subprocess.PIPE,
+        ) as p:
+        got = p.stdout.read()
+        assert got == """\
+usage: ceph-deploy install [-h] [--stable [CODENAME] | --testing | --dev
+                           [BRANCH_OR_TAG]]
+                           HOST [HOST ...]
+
+Install Ceph packages on remote hosts.
+
+positional arguments:
+  HOST                  hosts to install on
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --stable [CODENAME]   install a release known as CODENAME (done by default)
+                        (default: argonaut)
+  --testing             install the latest development release
+  --dev [BRANCH_OR_TAG]
+                        install a bleeding edge build from Git branch or tag
+                        (default: master)
+"""
+
+
+def test_bad_no_host(tmpdir, cli):
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'install'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            got = p.stderr.read()
+            assert got == """\
+usage: ceph-deploy install [-h] [--stable [CODENAME] | --testing | --dev
+                           [BRANCH_OR_TAG]]
+                           HOST [HOST ...]
+ceph-deploy install: error: too few arguments
+"""
+
+    assert err.value.status == 2
+
+
+def test_simple(tmpdir):
+    ns = argparse.Namespace()
+    ns.pushy = mock.Mock()
+    conn = mock.NonCallableMock(name='PushyClient')
+    ns.pushy.return_value = conn
+
+    mock_compiled = collections.defaultdict(mock.Mock)
+    conn.compile.side_effect = mock_compiled.__getitem__
+
+    mock_compiled[install.lsb_release].return_value = ('Ubuntu', 'precise')
+
+    try:
+        with directory(str(tmpdir)):
+            main(
+                args=['-v', 'install', 'storehost1'],
+                namespace=ns,
+                )
+    except SystemExit as e:
+        raise AssertionError('Unexpected exit: %s', e)
+
+    ns.pushy.assert_has_calls([
+            mock.call('ssh+sudo:storehost1'),
+        ])
+
+    mock_compiled.pop(install.lsb_release).assert_called_once_with()
+
+    mock_compiled.pop(install.install_ubuntu).assert_called_once_with(
+        version_kind='stable',
+        codename='precise',
+        version='argonaut',
+        )
+
+    assert mock_compiled == {}
diff --git a/ceph_deploy/test/test_cli_mon.py b/ceph_deploy/test/test_cli_mon.py
new file mode 100644 (file)
index 0000000..6e90aaa
--- /dev/null
@@ -0,0 +1,112 @@
+import argparse
+import collections
+import mock
+import pytest
+import subprocess
+
+from ..cli import main
+from .. import mon
+
+from .directory import directory
+
+
+def test_help(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', 'mon', '--help'],
+        stdout=subprocess.PIPE,
+        ) as p:
+        got = p.stdout.read()
+        assert got == """\
+usage: ceph-deploy mon [-h] [HOST [HOST ...]]
+
+Deploy ceph monitor on remote hosts.
+
+positional arguments:
+  HOST        host to deploy on
+
+optional arguments:
+  -h, --help  show this help message and exit
+"""
+
+
+def test_bad_no_conf(tmpdir, cli):
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'mon'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            got = p.stderr.read()
+            assert got == """\
+ceph-deploy: Cannot load config: [Errno 2] No such file or directory: 'ceph.conf'
+"""
+
+    assert err.value.status == 1
+
+
+def test_bad_no_mon(tmpdir, cli):
+    with tmpdir.join('ceph.conf').open('w'):
+        pass
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'mon'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            got = p.stderr.read()
+            assert got == """\
+ceph-deploy: No hosts specified to deploy to.
+"""
+
+    assert err.value.status == 1
+
+
+def test_simple(tmpdir):
+    with tmpdir.join('ceph.conf').open('w') as f:
+        f.write("""\
+[global]
+fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0
+mon initial members = host1
+""")
+
+    ns = argparse.Namespace()
+    ns.pushy = mock.Mock()
+    conn = mock.NonCallableMock(name='PushyClient')
+    ns.pushy.return_value = conn
+
+    mock_compiled = collections.defaultdict(mock.Mock)
+    conn.compile.side_effect = mock_compiled.__getitem__
+
+    MON_SECRET = 'AQBWDj5QAP6LHhAAskVBnUkYHJ7eYREmKo5qKA=='
+
+    def _create_mon(cluster, get_monitor_secret):
+        secret = get_monitor_secret()
+        assert secret == MON_SECRET
+
+    mock_compiled[mon.create_mon].side_effect = _create_mon
+
+    try:
+        with directory(str(tmpdir)):
+            main(
+                args=['-v', 'mon'],
+                namespace=ns,
+                )
+    except SystemExit as e:
+        raise AssertionError('Unexpected exit: %s', e)
+
+    ns.pushy.assert_called_once_with('ssh+sudo:host1')
+
+    mock_compiled.pop(mon.write_conf).assert_called_once_with(
+        cluster='ceph',
+        conf="""\
+[global]
+fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0
+mon_initial_members = host1
+
+""",
+        )
+
+    mock_compiled.pop(mon.create_mon).assert_called_once_with(
+        cluster='ceph',
+        get_monitor_secret=mock.ANY,
+        )
+
+    assert mock_compiled == {}
diff --git a/ceph_deploy/test/test_cli_new.py b/ceph_deploy/test/test_cli_new.py
new file mode 100644 (file)
index 0000000..cb2aba6
--- /dev/null
@@ -0,0 +1,110 @@
+import pytest
+import re
+import subprocess
+import uuid
+
+from .. import conf
+
+
+def test_help(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', 'new', '--help'],
+        stdout=subprocess.PIPE,
+        ) as p:
+        got = p.stdout.read()
+        assert got == """\
+usage: ceph-deploy new [-h] [MON [MON ...]]
+
+Start deploying a new cluster, and write a CLUSTER.conf for it.
+
+positional arguments:
+  MON         initial monitor hosts
+
+optional arguments:
+  -h, --help  show this help message and exit
+"""
+
+
+def test_simple(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', 'new'],
+        ):
+        pass
+    assert {p.basename for p in tmpdir.listdir()} == {'ceph.conf'}
+    with tmpdir.join('ceph.conf').open() as f:
+        cfg = conf.parse(f)
+    assert cfg.sections() == ['global']
+
+
+def test_named(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', '--cluster=foo', 'new'],
+        ):
+        pass
+    assert {p.basename for p in tmpdir.listdir()} == {'foo.conf'}
+
+
+def test_exists(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', 'new'],
+        ):
+        pass
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'new'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            got = p.stderr.read()
+            assert got == """\
+ceph-deploy: Cluster config exists already: ceph.conf
+"""
+
+    assert err.value.status == 1
+    # no temp files left around
+    assert {p.basename for p in tmpdir.listdir()} == {'ceph.conf'}
+
+
+def pytest_funcarg__newcfg(request):
+    tmpdir = request.getfuncargvalue('tmpdir')
+    cli = request.getfuncargvalue('cli')
+
+    def new(*args):
+        with cli(
+            args=['ceph-deploy', 'new'] + list(args),
+            ):
+            pass
+        with tmpdir.join('ceph.conf').open() as f:
+            cfg = conf.parse(f)
+        return cfg
+    return new
+
+
+def test_uuid(newcfg):
+    cfg = newcfg()
+    fsid = cfg.get('global', 'fsid')
+    # make sure it's a valid uuid
+    uuid.UUID(hex=fsid)
+    # make sure it looks pretty, too
+    UUID_RE = re.compile(
+        r'^[0-9a-f]{8}-'
+        + r'[0-9a-f]{4}-'
+        # constant 4 here, we want to enforce randomness and not leak
+        # MACs or time
+        + r'4[0-9a-f]{3}-'
+        + r'[0-9a-f]{4}-'
+        + r'[0-9a-f]{12}$',
+        )
+    assert UUID_RE.match(fsid)
+
+
+def test_mons(newcfg):
+    cfg = newcfg('node01', 'node07', 'node34')
+    mon_initial_members = cfg.get('global', 'mon_initial_members')
+    assert mon_initial_members == 'node01, node07, node34'
+
+
+def test_defaults(newcfg):
+    cfg = newcfg()
+    assert cfg.get('global', 'auth_supported') == 'cephx'
+    assert cfg.get('global', 'osd_journal_size') == '1024'
+    assert cfg.get('global', 'filestore_xattr_use_omap') == 'true'
diff --git a/ceph_deploy/test/test_cli_osd.py b/ceph_deploy/test/test_cli_osd.py
new file mode 100644 (file)
index 0000000..f39981a
--- /dev/null
@@ -0,0 +1,137 @@
+import argparse
+import collections
+import mock
+import pytest
+import subprocess
+
+from ..cli import main
+from .. import osd
+
+from .directory import directory
+
+
+def test_help(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', 'osd', '--help'],
+        stdout=subprocess.PIPE,
+        ) as p:
+        got = p.stdout.read()
+        assert got == """\
+usage: ceph-deploy osd [-h] HOST:DISK [HOST:DISK ...]
+
+Prepare a data disk on remote host.
+
+positional arguments:
+  HOST:DISK   host and disk to prepare
+
+optional arguments:
+  -h, --help  show this help message and exit
+"""
+
+
+def test_bad_no_conf(tmpdir, cli):
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'osd', 'fakehost:/does-not-exist'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            got = p.stderr.read()
+            assert got == """\
+ceph-deploy: Cannot load config: [Errno 2] No such file or directory: 'ceph.conf'
+"""
+
+    assert err.value.status == 1
+
+
+def test_bad_no_disk(tmpdir, cli):
+    with tmpdir.join('ceph.conf').open('w'):
+        pass
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'osd'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            got = p.stderr.read()
+            assert got == """\
+usage: ceph-deploy osd [-h] HOST:DISK [HOST:DISK ...]
+ceph-deploy osd: error: too few arguments
+"""
+
+    assert err.value.status == 2
+
+
+def test_simple(tmpdir):
+    with tmpdir.join('ceph.conf').open('w') as f:
+        f.write("""\
+[global]
+fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0
+mon host = host1
+""")
+
+    ns = argparse.Namespace()
+
+    conn_osd = mock.NonCallableMock(name='PushyClient')
+    mock_compiled_osd = collections.defaultdict(mock.Mock)
+    conn_osd.compile.side_effect = mock_compiled_osd.__getitem__
+
+    conn_mon = mock.NonCallableMock(name='PushyClient')
+    mock_compiled_mon = collections.defaultdict(mock.Mock)
+    conn_mon.compile.side_effect = mock_compiled_mon.__getitem__
+
+    ns.pushy = mock.Mock()
+
+    def _conn(url):
+        if url == 'ssh+sudo:host1':
+            return conn_mon
+        elif url == 'ssh+sudo:storehost1':
+            return conn_osd
+        else:
+            raise AssertionError('Unexpected connection url: %r', url)
+    ns.pushy.side_effect = _conn
+
+    BOOTSTRAP_KEY = 'fakekeyring'
+
+    mock_compiled_mon[osd.get_bootstrap_osd_key].return_value = BOOTSTRAP_KEY
+
+    def _create_osd(cluster, find_key):
+        key = find_key()
+        assert key == BOOTSTRAP_KEY
+
+    mock_compiled_osd[osd.create_osd].side_effect = _create_osd
+
+    try:
+        with directory(str(tmpdir)):
+            main(
+                args=['-v', 'osd', 'storehost1:sdc'],
+                namespace=ns,
+                )
+    except SystemExit as e:
+        raise AssertionError('Unexpected exit: %s', e)
+
+    mock_compiled_mon.pop(osd.get_bootstrap_osd_key).assert_called_once_with(
+        cluster='ceph',
+        )
+
+    assert mock_compiled_mon == {}
+
+    mock_compiled_osd.pop(osd.write_conf).assert_called_once_with(
+        cluster='ceph',
+        conf="""\
+[global]
+fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0
+mon_host = host1
+
+""",
+        )
+
+    mock_compiled_osd.pop(osd.create_osd).assert_called_once_with(
+        cluster='ceph',
+        find_key=mock.ANY,
+        )
+
+    mock_compiled_osd.pop(osd.prepare_disk).assert_called_once_with(
+        cluster='ceph',
+        disk='/dev/sdc',
+        )
+
+    assert mock_compiled_osd == {}
diff --git a/ceph_deploy/test/test_conf.py b/ceph_deploy/test/test_conf.py
new file mode 100644 (file)
index 0000000..faa3688
--- /dev/null
@@ -0,0 +1,59 @@
+from cStringIO import StringIO
+from .. import conf
+
+
+def test_simple():
+    f = StringIO("""\
+[foo]
+bar = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar') == 'baz'
+
+
+def test_indent_space():
+    f = StringIO("""\
+[foo]
+        bar = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar') == 'baz'
+
+
+def test_indent_tab():
+    f = StringIO("""\
+[foo]
+\tbar = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar') == 'baz'
+
+
+def test_words_underscore():
+    f = StringIO("""\
+[foo]
+bar_thud = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar_thud') == 'baz'
+    assert cfg.get('foo', 'bar thud') == 'baz'
+
+
+def test_words_space():
+    f = StringIO("""\
+[foo]
+bar thud = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar_thud') == 'baz'
+    assert cfg.get('foo', 'bar thud') == 'baz'
+
+
+def test_words_many():
+    f = StringIO("""\
+[foo]
+bar__ thud   quux = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar_thud_quux') == 'baz'
+    assert cfg.get('foo', 'bar thud quux') == 'baz'
diff --git a/ceph_deploy/validate.py b/ceph_deploy/validate.py
new file mode 100644 (file)
index 0000000..8ef5e73
--- /dev/null
@@ -0,0 +1,16 @@
+import argparse
+import re
+
+
+ALPHANUMERIC_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*$')
+
+
+def alphanumeric(s):
+    """
+    Enforces string to be alphanumeric with leading alpha.
+    """
+    if not ALPHANUMERIC_RE.match(s):
+        raise argparse.ArgumentTypeError(
+            'argument must start with a letter and contain only letters and numbers',
+            )
+    return s
diff --git a/ceph_deploy/zapdisk.py b/ceph_deploy/zapdisk.py
new file mode 100644 (file)
index 0000000..ebd0b77
--- /dev/null
@@ -0,0 +1,75 @@
+import argparse
+import logging
+import os
+
+from . import conf
+from .cliutil import priority
+
+LOG = logging.getLogger(__name__)
+
+
+# NOTE: this mirrors ceph-disk-prepare --zap-disk DEV
+def zap(dev):
+    import subprocess
+
+    try:
+        # this kills the crab
+        lba_size = 4096
+        size = 33 * lba_size
+        with file(dev, 'wb') as f:
+            f.seek(-size, os.SEEK_END)
+            f.write(size*'\0')
+
+        subprocess.check_call(
+            args=[
+                'sgdisk',
+                '--zap-all',
+                '--clear',
+                '--mbrtogpt',
+                '--',
+                dev,
+                ],
+            )
+    except subprocess.CalledProcessError as e:
+        raise RuntimeError(e)
+
+def zapdisk(args):
+    cfg = conf.load(args)
+
+    for hostname, disk in args.disk:
+        LOG.debug('zapping %s on %s', disk, hostname)
+
+        # TODO username
+        sudo = args.pushy('ssh+sudo:{hostname}'.format(
+                hostname=hostname,
+                ))
+        zap_r = sudo.compile(zap)
+        zap_r(disk)
+
+def colon_separated(s):
+    if s.count(':') == 1:
+        (host, disk) = s.split(':')
+    else:
+        raise argparse.ArgumentTypeError('must be in form HOST:DISK')
+
+    # allow just "sdb" to mean /dev/sdb
+    disk = os.path.join('/dev', disk)
+
+    return (host, disk)
+
+
+@priority(50)
+def make(parser):
+    """
+    Zap a data disk on a remote host.
+    """
+    parser.add_argument(
+        'disk',
+        nargs='+',
+        metavar='HOST:DISK',
+        type=colon_separated,
+        help='host and disk to zap',
+        )
+    parser.set_defaults(
+        func=zapdisk,
+        )
diff --git a/requirements-dev.txt b/requirements-dev.txt
new file mode 100644 (file)
index 0000000..dbc0d19
--- /dev/null
@@ -0,0 +1,3 @@
+pytest >=2.1.3
+tox >=1.2
+mock >=1.0b1
diff --git a/requirements.txt b/requirements.txt
new file mode 100644 (file)
index 0000000..41b5dc8
--- /dev/null
@@ -0,0 +1 @@
+pushy >=0.5.1
diff --git a/setup.cfg b/setup.cfg
new file mode 100644 (file)
index 0000000..d9ec107
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[pytest]
+norecursedirs = .* _* virtualenv
diff --git a/setup.py b/setup.py
new file mode 100644 (file)
index 0000000..14d1f02
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,63 @@
+#!/usr/bin/python
+from setuptools import setup, find_packages
+import os
+import sys
+
+
+def read(fname):
+    path = os.path.join(os.path.dirname(__file__), fname)
+    f = open(path)
+    return f.read()
+
+install_requires = []
+pyversion = sys.version_info[:2]
+if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1):
+    install_requires.append('argparse')
+
+setup(
+    name='ceph-deploy',
+    version='0.0.1',
+    packages=find_packages(),
+
+    author='Tommi Virtanen',
+    author_email='tommi.virtanen@inktank.com',
+    description='Deploy Ceph with minimal infrastructure',
+    long_description=read('README.rst'),
+    license='MIT',
+    keywords='ceph deploy',
+    url="https://github.com/ceph/ceph-deploy",
+
+    install_requires=[
+        'setuptools',
+        'pushy >=0.5.1',
+        ] + install_requires,
+
+    tests_require=[
+        'pytest >=2.1.3',
+        'mock >=1.0b1',
+        ],
+
+    entry_points={
+
+        'console_scripts': [
+            'ceph-deploy = ceph_deploy.cli:main',
+            ],
+
+        'ceph_deploy.cli': [
+            'new = ceph_deploy.new:make',
+            'discover = ceph_deploy.discover:make',
+            'install = ceph_deploy.install:make',
+            'uninstall = ceph_deploy.install:make_uninstall',
+            'purge = ceph_deploy.install:make_purge',
+            'mon = ceph_deploy.mon:make',
+            'gatherkeys = ceph_deploy.gatherkeys:make',
+            'osd = ceph_deploy.osd:make',
+            'mds = ceph_deploy.mds:make',
+            'forgetkeys = ceph_deploy.forgetkeys:make',
+            'config = ceph_deploy.config:make',
+            'admin = ceph_deploy.admin:make',
+            'zapdisk = ceph_deploy.zapdisk:make',
+            ],
+
+        },
+    )
diff --git a/tox.ini b/tox.ini
new file mode 100644 (file)
index 0000000..c65b7af
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,8 @@
+[tox]
+envlist = py27
+
+[testenv]
+deps=
+  pytest
+  mock
+commands=py.test {posargs:ceph_deploy}