--- /dev/null
+*~
+.#*
+## the next line needs to start with a backslash to avoid looking like
+## a comment
+\#*#
+.*.swp
+
+*.pyc
+*.pyo
+*.egg-info
+/build
+/dist
+
+/virtualenv
+/.tox
+
+/ceph-deploy
+/*.conf
--- /dev/null
+
+1.2.1
+-----
+* Print the help when no arguments are passed
+* Add a ``--version`` flag
+* Show the version in the help menu
+* Catch ``DeployError`` exceptions nicely with the logger
+* Fix blocked command when calling ``mon create``
+* default to ``dumpling`` for installs
+* halt execution on remote exceptions
+
+
+1.2
+---
+* Better logging output
+* Remote logging for individual actions for ``install`` and ``mon create``
+* Install ``ca-certificates`` on all Debian-based distros
+* Honor the usage of ``--cluster``
+* Do not ``rm -rf`` monitor logs when destroying
+* Error out when ``ceph-deploy new [IP]`` is used
+* Log the ceph version when installing
--- /dev/null
+Copyright (c) 2012 Inktank Storage, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
--- /dev/null
+include *.rst
+include LICENSE
+include scripts/ceph-deploy
+prune ceph_deploy/test
--- /dev/null
+========================================================
+ ceph-deploy -- Deploy Ceph with minimal infrastructure
+========================================================
+
+``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to
+the servers, ``sudo``, and some Python. It runs fully on your
+workstation, requiring no servers, databases, or anything like that.
+
+If you set up and tear down Ceph clusters a lot, and want minimal
+extra bureaucracy, this is for you.
+
+It is not a generic deployment system, it is only for Ceph, and is designed
+for users who want to quickly get Ceph running with sensible initial settings
+without the overhead of installing Chef, Puppet or Juju.
+
+It does not handle client configuration beyond pushing the Ceph config file
+and users who want fine-control over security settings, partitions or directory
+locations should use a tool such as Chef or Puppet.
+
+Installation
+============
+Depending on what type of usage you are going to have with ``ceph-deploy`` you
+might want to look into the different ways to install it. For automation, you
+might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would
+probably install from the OS packages or from the Python Package Index.
+
+Python Package Index
+--------------------
+If you are familiar with Python install tools (like ``pip`` and
+``easy_install``) you can easily install ``ceph-deploy`` like::
+
+ pip install ceph-deploy
+
+or::
+
+ easy_install ceph-deploy
+
+
+It should grab all the dependencies for you and install into the current user's
+environment.
+
+We highly recommend using ``virtualenv`` and installing dependencies in
+a contained way.
+
+
+DEB
+---
+The DEB repo can be found at http://ceph.com/packages/ceph-extras/debian/
+
+But they can also be found for ``ceph`` releases in the ``ceph`` repos like::
+
+ ceph.com/debian-{release}
+ ceph.com/debian-testing
+
+RPM
+---
+The RPM repos can be found at http://ceph.com/packages/ceph-extras/rpm/
+
+Make sure you add the proper one for your distribution.
+
+But they can also be found for ``ceph`` releases in the ``ceph`` repos like::
+
+ ceph.com/rpm-{release}
+ ceph.com/rpm-testing
+
+
+bootstraping
+------------
+To get the source tree ready for use, run this once::
+
+ ./bootstrap
+
+You can symlink the ``ceph-deploy`` script in this somewhere
+convenient (like ``~/bin``), or add the current directory to ``PATH``,
+or just always type the full path to ``ceph-deploy``.
+
+ceph-deploy at a minimum requires that the machine from which the script is
+being run can ssh as root without password into each Ceph node.
+
+To enable this generate a new ssh keypair for the root user with no passphrase
+and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in::
+
+ /root/.ssh/authorized_keys
+
+and ensure that the following lines are in the sshd config::
+
+ PermitRootLogin yes
+ PermitEmptyPasswords yes
+
+The machine running ceph-deploy does not need to have the Ceph packages installed
+unless it needs to admin the cluster directly using the ``ceph`` command line tool.
+
+Managing an existing cluster
+============================
+
+You can use ceph-deploy to provision nodes for an existing cluster.
+To grab a copy of the cluster configuration file (normally
+``ceph.conf``)::
+
+ ceph-deploy config pull HOST
+
+You will usually also want to gather the encryption keys used for that
+cluster::
+
+ ceph-deploy gatherkeys MONHOST
+
+At this point you can skip the steps below that create a new cluster
+(you already have one) and optionally skip instalation and/or monitor
+creation, depending on what you are trying to accomplish.
+
+
+Creating a new cluster
+======================
+
+Creating a new configuration
+----------------------------
+
+To create a new configuration file and secret key, decide what hosts
+will run ``ceph-mon``, and run::
+
+ ceph-deploy new MON [MON..]
+
+listing the hostnames of the monitors. Each ``MON`` can be
+
+ * a simple hostname. It must be DNS resolvable without the fully
+ qualified domain name.
+ * a fully qualified domain name. The hostname is assumed to be the
+ leading component up to the first ``.``.
+ * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified
+ domain name or IP address. For example, ``foo``,
+ ``foo.example.com``, ``foo:something.example.com``, and
+ ``foo:1.2.3.4`` are all valid. Note, however, that the hostname
+ should match that configured on the host ``foo``.
+
+The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your
+current directory.
+
+
+Edit initial cluster configuration
+----------------------------------
+
+You want to review the generated ``ceph.conf`` file and make sure that
+the ``mon_host`` setting contains the IP addresses you would like the
+monitors to bind to. These are the IPs that clients will initially
+contact to authenticate to the cluster, and they need to be reachable
+both by external client-facing hosts and internal cluster daemons.
+
+Installing packages
+===================
+
+To install the Ceph software on the servers, run::
+
+ ceph-deploy install HOST [HOST..]
+
+This installs the current default *stable* release. You can choose a
+different release track with command line options, for example to use
+a release candidate::
+
+ ceph-deploy install --testing HOST
+
+Or to test a development branch::
+
+ ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..]
+
+
+Proxy or Firewall Installs
+--------------------------
+If attempting to install behind a firewall or through a proxy you will need to
+set the `wget` env variables to point to the right hosts, for example::
+
+ http_proxy=http://host:port
+ ftp_proxy=http://host:port
+ https_proxy=http://host:port
+
+
+Deploying monitors
+==================
+
+To actually deploy ``ceph-mon`` to the hosts you chose, run::
+
+ ceph-deploy mon create HOST [HOST..]
+
+Without explicit hosts listed, hosts in ``mon_initial_members`` in the
+config file are deployed. That is, the hosts you passed to
+``ceph-deploy new`` are the default value here.
+
+Gather keys
+===========
+
+To gather authenticate keys (for administering the cluster and
+bootstrapping new nodes) to the local directory, run::
+
+ ceph-deploy gatherkeys HOST [HOST...]
+
+where ``HOST`` is one of the monitor hosts.
+
+Once these keys are in the local directory, you can provision new OSDs etc.
+
+
+Deploying OSDs
+==============
+
+To prepare a node for running OSDs, run::
+
+ ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...]
+
+After that, the hosts will be running OSDs for the given data disks.
+If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be
+created and GPT labels will be used to mark and automatically activate
+OSD volumes. If an existing partition is specified, the partition
+table will not be modified. If you want to destroy the existing
+partition table on DISK first, you can include the ``--zap-disk``
+option.
+
+If there is already a prepared disk or directory that is ready to become an
+OSD, you can also do::
+
+ ceph-deploy osd activate HOST:DIR[:JOURNAL] [...]
+
+This is useful when you are managing the mounting of volumes yourself.
+
+
+Admin hosts
+===========
+
+To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring``
+keyring so that it can administer the cluster, run::
+
+ ceph-deploy admin HOST [HOST ...]
+
+Forget keys
+===========
+
+The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in
+the local directory. If you are worried about them being there for security
+reasons, run::
+
+ ceph-deploy forgetkeys
+
+and they will be removed. If you need them again later to deploy additional
+nodes, simply re-run::
+
+ ceph-deploy gatherkeys HOST [HOST...]
+
+and they will be retrieved from an existing monitor node.
+
+Multiple clusters
+=================
+
+All of the above commands take a ``--cluster=NAME`` option, allowing
+you to manage multiple clusters conveniently from one workstation.
+For example::
+
+ ceph-deploy --cluster=us-west new
+ vi us-west.conf
+ ceph-deploy --cluster=us-west mon
--- /dev/null
+#!/bin/sh
+set -e
+
+if command -v lsb_release >/dev/null 2>&1; then
+ case "$(lsb_release --id --short)" in
+ Ubuntu|Debian)
+ for package in python-virtualenv; do
+ if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then
+ # add a space after old values
+ missing="${missing:+$missing }$package"
+ fi
+ done
+ if [ -n "$missing" ]; then
+ echo "$0: missing required packages, please install them:" 1>&2
+ echo " sudo apt-get install $missing"
+ exit 1
+ fi
+ ;;
+ esac
+
+ case "$(lsb_release --id --short | awk '{print $1}')" in
+ openSUSE|SUSE)
+ for package in python-virtualenv; do
+ if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
+ missing="${missing:+$missing }$package"
+ fi
+ done
+ if [ -n "$missing" ]; then
+ echo "$0: missing required packages, please install them:" 1>&2
+ echo " sudo zypper install $missing"
+ exit 1
+ fi
+ ;;
+ esac
+
+else
+ if [ -f /etc/redhat-release ]; then
+ case "$(cat /etc/redhat-release | awk '{print $1}')" in
+ CentOS)
+ for package in python-virtualenv; do
+ if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
+ missing="${missing:+$missing }$package"
+ fi
+ done
+ if [ -n "$missing" ]; then
+ echo "$0: missing required packages, please install them:" 1>&2
+ echo " sudo yum install $missing"
+ exit 1
+ fi
+ ;;
+ esac
+ fi
+fi
+
+test -d virtualenv || virtualenv virtualenv
+./virtualenv/bin/python setup.py develop
+./virtualenv/bin/pip install -r requirements.txt -r requirements-dev.txt
+test -e ceph-deploy || ln -s virtualenv/bin/ceph-deploy .
--- /dev/null
+#
+# spec file for package ceph-deploy
+#
+
+%if ! (0%{?fedora} > 12 || 0%{?rhel} > 5)
+%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
+%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
+%endif
+
+#################################################################################
+# common
+#################################################################################
+Name: ceph-deploy
+Version: 1.2.1
+Release: 0
+Summary: Admin and deploy tool for Ceph
+License: MIT
+Group: System/Filesystems
+URL: http://ceph.com/
+Source0: %{name}-%{version}.tar.bz2
+BuildRoot: %{_tmppath}/%{name}-%{version}-build
+BuildRequires: python-devel
+BuildRequires: python-distribute
+BuildRequires: python-setuptools
+BuildRequires: python-virtualenv
+BuildRequires: python-mock
+BuildRequires: python-tox
+%if 0%{?suse_version}
+BuildRequires: python-pytest
+%else
+BuildRequires: pytest
+%endif
+Requires: python-argparse
+#Requires: python-pushy
+Requires: python-distribute
+#Requires: lsb-release
+Requires: ceph
+%if 0%{?suse_version} && 0%{?suse_version} <= 1110
+%{!?python_sitelib: %global python_sitelib %(python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
+%else
+BuildArch: noarch
+%endif
+
+#################################################################################
+# specific
+#################################################################################
+%if 0%{defined suse_version}
+%py_requires
+%if 0%{?suse_version} > 1210
+Requires: gptfdisk
+%else
+Requires: scsirastools
+%endif
+%else
+Requires: gdisk
+%endif
+
+%if 0%{?rhel}
+BuildRequires: python >= %{pyver}
+Requires: python >= %{pyver}
+%endif
+
+%description
+An easy to use admin tool for deploy ceph storage clusters.
+
+%prep
+#%setup -q -n %{name}
+%setup -q
+
+%build
+#python setup.py build
+
+%install
+python setup.py install --prefix=%{_prefix} --root=%{buildroot}
+install -m 0755 -D scripts/ceph-deploy $RPM_BUILD_ROOT/usr/bin
+
+%clean
+[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT"
+
+%files
+%defattr(-,root,root)
+%doc LICENSE README.rst
+%{_bindir}/ceph-deploy
+%{python_sitelib}/*
+
+%changelog
--- /dev/null
+
+__version__ = '1.2.1'
+
--- /dev/null
+import logging
+
+from cStringIO import StringIO
+
+from . import exc
+from . import conf
+from .cliutil import priority
+from .sudo_pushy import get_transport
+
+LOG = logging.getLogger(__name__)
+
+def write_file(path, content):
+ try:
+ with file(path, 'w') as f:
+ f.write(content)
+ except:
+ pass
+
+def admin(args):
+ cfg = conf.load(args)
+ conf_data = StringIO()
+ cfg.write(conf_data)
+
+ try:
+ with file('%s.client.admin.keyring' % args.cluster, 'rb') as f:
+ keyring = f.read()
+ except:
+ raise RuntimeError('%s.client.admin.keyring not found' %
+ args.cluster)
+
+ errors = 0
+ for hostname in args.client:
+ LOG.debug('Pushing admin keys and conf to %s', hostname)
+ try:
+ sudo = args.pushy(get_transport(hostname))
+ write_conf_r = sudo.compile(conf.write_conf)
+ write_conf_r(
+ cluster=args.cluster,
+ conf=conf_data.getvalue(),
+ overwrite=args.overwrite_conf,
+ )
+
+ sudo = args.pushy(get_transport(hostname))
+ write_file_r = sudo.compile(write_file)
+ error = write_file_r(
+ '/etc/ceph/%s.client.admin.keyring' % args.cluster,
+ keyring
+ )
+ if error is not None:
+ raise exc.GenericError(error)
+ sudo.close()
+
+ except RuntimeError as e:
+ LOG.error(e)
+ errors += 1
+
+ if errors:
+ raise exc.GenericError('Failed to configure %d admin hosts' % errors)
+
+
+@priority(70)
+def make(parser):
+ """
+ Push configuration and client.admin key to a remote host.
+ """
+ parser.add_argument(
+ 'client',
+ metavar='HOST',
+ nargs='*',
+ help='host to configure for ceph administration',
+ )
+ parser.set_defaults(
+ func=admin,
+ )
--- /dev/null
+import pkg_resources
+import argparse
+import logging
+import pushy
+import textwrap
+import sys
+
+import ceph_deploy
+from . import exc
+from . import validate
+from . import sudo_pushy
+from .util import log
+from .util.decorators import catches
+
+LOG = logging.getLogger(__name__)
+
+
+__header__ = textwrap.dedent("""
+ -^-
+ / \\
+ |O o| ceph-deploy v%s
+ ).-.(
+ '/|||\`
+ | '|` |
+ '|`
+""" % ceph_deploy.__version__)
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description='Easy Ceph deployment\n\n%s' % __header__,
+ )
+ verbosity = parser.add_mutually_exclusive_group(required=False)
+ verbosity.add_argument(
+ '-v', '--verbose',
+ action='store_true', dest='verbose', default=False,
+ help='be more verbose',
+ )
+ verbosity.add_argument(
+ '-q', '--quiet',
+ action='store_true', dest='quiet',
+ help='be less verbose',
+ )
+ parser.add_argument(
+ '-n', '--dry-run',
+ action='store_true', dest='dry_run',
+ help='do not perform any action, but report what would be done',
+ )
+ parser.add_argument(
+ '--version',
+ action='version',
+ version='%s' % ceph_deploy.__version__,
+ help='the current installed version of ceph-deploy',
+ )
+ parser.add_argument(
+ '--overwrite-conf',
+ action='store_true',
+ help='overwrite an existing conf file on remote host (if present)',
+ )
+ parser.add_argument(
+ '--cluster',
+ metavar='NAME',
+ help='name of the cluster',
+ type=validate.alphanumeric,
+ )
+ sub = parser.add_subparsers(
+ title='commands',
+ metavar='COMMAND',
+ help='description',
+ )
+ entry_points = [
+ (ep.name, ep.load())
+ for ep in pkg_resources.iter_entry_points('ceph_deploy.cli')
+ ]
+ entry_points.sort(
+ key=lambda (name, fn): getattr(fn, 'priority', 100),
+ )
+ for (name, fn) in entry_points:
+ p = sub.add_parser(
+ name,
+ description=fn.__doc__,
+ help=fn.__doc__,
+ )
+ # ugly kludge but i really want to have a nice way to access
+ # the program name, with subcommand, later
+ p.set_defaults(prog=p.prog)
+ fn(p)
+ parser.set_defaults(
+ # we want to hold on to this, for later
+ prog=parser.prog,
+
+ # unit tests can override this to mock pushy; no user-visible
+ # option sets this
+ pushy=pushy.connect,
+
+ cluster='ceph',
+ )
+ return parser
+
+
+@catches((KeyboardInterrupt, RuntimeError, exc.DeployError))
+def main(args=None, namespace=None):
+ parser = get_parser()
+
+ if len(sys.argv) < 2:
+ parser.print_help()
+ sys.exit()
+ else:
+ args = parser.parse_args(args=args, namespace=namespace)
+
+ console_loglevel = logging.DEBUG # start at DEBUG for now
+ if args.quiet:
+ console_loglevel = logging.WARNING
+ if args.verbose:
+ console_loglevel = logging.DEBUG
+
+ # Console Logger
+ sh = logging.StreamHandler()
+ sh.setFormatter(log.color_format())
+ sh.setLevel(console_loglevel)
+
+ # File Logger
+ fh = logging.FileHandler('{cluster}.log'.format(cluster=args.cluster))
+ fh.setLevel(logging.DEBUG)
+ fh.setFormatter(logging.Formatter(log.BASE_FORMAT))
+
+ # because we're in a module already, __name__ is not the ancestor of
+ # the rest of the package; use the root as the logger for everyone
+ root_logger = logging.getLogger()
+
+ # allow all levels at root_logger, handlers control individual levels
+ root_logger.setLevel(logging.DEBUG)
+
+ root_logger.addHandler(sh)
+ root_logger.addHandler(fh)
+
+ sudo_pushy.patch()
+
+ return args.func(args)
--- /dev/null
+def priority(num):
+ """
+ Decorator to add a `priority` attribute to the function.
+ """
+ def add_priority(fn):
+ fn.priority = num
+ return fn
+ return add_priority
--- /dev/null
+import ConfigParser
+import contextlib
+
+from . import exc
+
+
+class _TrimIndentFile(object):
+ def __init__(self, fp):
+ self.fp = fp
+
+ def readline(self):
+ line = self.fp.readline()
+ return line.lstrip(' \t')
+
+
+def _optionxform(s):
+ s = s.replace('_', ' ')
+ s = '_'.join(s.split())
+ return s
+
+
+def parse(fp):
+ cfg = ConfigParser.RawConfigParser()
+ cfg.optionxform = _optionxform
+ ifp = _TrimIndentFile(fp)
+ cfg.readfp(ifp)
+ return cfg
+
+
+def load(args):
+ path = '{cluster}.conf'.format(cluster=args.cluster)
+ try:
+ f = file(path)
+ except IOError as e:
+ raise exc.ConfigError(e)
+ else:
+ with contextlib.closing(f):
+ return parse(f)
+
+
+def write_conf(cluster, conf, overwrite):
+ """ write cluster configuration to /etc/ceph/{cluster}.conf """
+ import os
+
+ path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster)
+ tmp = '{path}.{pid}.tmp'.format(path=path, pid=os.getpid())
+
+ if os.path.exists(path):
+ with file(path, 'rb') as f:
+ old = f.read()
+ if old != conf and not overwrite:
+ raise RuntimeError('config file %s exists with different content; use --overwrite-conf to overwrite' % path)
+ with file(tmp, 'w') as f:
+ f.write(conf)
+ f.flush()
+ os.fsync(f)
+ os.rename(tmp, path)
--- /dev/null
+import logging
+
+from cStringIO import StringIO
+
+from . import exc
+from . import conf
+from . import misc
+from .cliutil import priority
+from .sudo_pushy import get_transport
+
+LOG = logging.getLogger(__name__)
+
+def config_push(args):
+ cfg = conf.load(args)
+ conf_data = StringIO()
+ cfg.write(conf_data)
+
+ errors = 0
+ for hostname in args.client:
+ LOG.debug('Pushing config to %s', hostname)
+ try:
+ sudo = args.pushy(get_transport(hostname))
+ write_conf_r = sudo.compile(conf.write_conf)
+ write_conf_r(
+ cluster=args.cluster,
+ conf=conf_data.getvalue(),
+ overwrite=args.overwrite_conf,
+ )
+ sudo.close()
+
+ except RuntimeError as e:
+ LOG.error(e)
+ errors += 1
+
+ if errors:
+ raise exc.GenericError('Failed to config %d hosts' % errors)
+
+
+def config_pull(args):
+ import os.path
+
+ topath = '{cluster}.conf'.format(cluster=args.cluster)
+ frompath = '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster)
+
+ errors = 0
+ for hostname in args.client:
+ try:
+ LOG.debug('Checking %s for %s', hostname, frompath)
+ sudo = args.pushy(get_transport(hostname))
+ get_file_r = sudo.compile(misc.get_file)
+ conf_file = get_file_r(path=frompath)
+ if conf_file is not None:
+ LOG.debug('Got %s from %s', frompath, hostname)
+ if os.path.exists(topath):
+ with file(topath, 'rb') as f:
+ existing = f.read()
+ if existing != conf_file and not args.overwrite_conf:
+ LOG.error('local config file %s exists with different content; use --overwrite-conf to overwrite' % topath)
+ raise
+
+ with file(topath, 'w') as f:
+ f.write(conf_file)
+ return
+ sudo.close()
+ LOG.debug('Empty or missing %s on %s', frompath, hostname)
+ except:
+ LOG.error('Unable to pull %s from %s', frompath, hostname)
+ finally:
+ errors += 1
+
+ raise exc.GenericError('Failed to fetch config from %d hosts' % errors)
+
+
+def config(args):
+ if args.subcommand == 'push':
+ config_push(args)
+ elif args.subcommand == 'pull':
+ config_pull(args)
+ else:
+ LOG.error('subcommand %s not implemented', args.subcommand)
+
+@priority(70)
+def make(parser):
+ """
+ Push configuration file to a remote host.
+ """
+ parser.add_argument(
+ 'subcommand',
+ metavar='SUBCOMMAND',
+ choices=[
+ 'push',
+ 'pull',
+ ],
+ help='push or pull',
+ )
+ parser.add_argument(
+ 'client',
+ metavar='HOST',
+ nargs='*',
+ help='host to push/pull the config to/from',
+ )
+ parser.set_defaults(
+ func=config,
+ )
--- /dev/null
+class DeployError(Exception):
+ """
+ Unknown deploy error
+ """
+
+ def __str__(self):
+ doc = self.__doc__.strip()
+ return ': '.join([doc] + [str(a) for a in self.args])
+
+
+class UnableToResolveError(DeployError):
+ """
+ Unable to resolve host
+ """
+class ClusterExistsError(DeployError):
+ """
+ Cluster config exists already
+ """
+
+
+class ConfigError(DeployError):
+ """
+ Cannot load config
+ """
+
+
+class NeedHostError(DeployError):
+ """
+ No hosts specified to deploy to.
+ """
+
+
+class NeedMonError(DeployError):
+ """
+ Cannot find nodes with ceph-mon.
+ """
+
+class NeedDiskError(DeployError):
+ """
+ Must supply disk/path argument
+ """
+
+class UnsupportedPlatform(DeployError):
+ """
+ Platform is not supported
+ """
+ def __init__(self, distro, codename):
+ self.distro = distro
+ self.codename = codename
+
+ def __str__(self):
+ return '{doc}: {distro} {codename}'.format(
+ doc=self.__doc__.strip(),
+ distro=self.distro,
+ codename=self.codename,
+ )
+
+class MissingPackageError(DeployError):
+ """
+ A required package or command is missing
+ """
+ def __init__(self, message):
+ self.message = message
+
+ def __str__(self):
+ return self.message
+
+
+class GenericError(DeployError):
+ def __init__(self, message):
+ self.message = message
+
+ def __str__(self):
+ return self.message
--- /dev/null
+import logging
+import errno
+
+from .cliutil import priority
+
+
+LOG = logging.getLogger(__name__)
+
+
+def forgetkeys(args):
+ import os
+ for f in [
+ 'mon',
+ 'client.admin',
+ 'bootstrap-osd',
+ 'bootstrap-mds',
+ ]:
+ try:
+ os.unlink('{cluster}.{what}.keyring'.format(
+ cluster=args.cluster,
+ what=f,
+ ))
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+
+@priority(100)
+def make(parser):
+ """
+ Remove authentication keys from the local directory.
+ """
+ parser.set_defaults(
+ func=forgetkeys,
+ )
--- /dev/null
+import os.path
+import logging
+
+from .cliutil import priority
+from . import misc
+from .sudo_pushy import get_transport
+
+LOG = logging.getLogger(__name__)
+
+def fetch_file(args, frompath, topath, hosts):
+ # mon.
+ if os.path.exists(topath):
+ LOG.debug('Have %s', topath)
+ return True
+ else:
+ for hostname in hosts:
+ LOG.debug('Checking %s for %s', hostname, frompath)
+ sudo = args.pushy(get_transport(hostname))
+ get_file_r = sudo.compile(misc.get_file)
+ key = get_file_r(path=frompath.format(hostname=hostname))
+ if key is not None:
+ LOG.debug('Got %s key from %s.', topath, hostname)
+ with file(topath, 'w') as f:
+ f.write(key)
+ return True
+ sudo.close()
+ LOG.warning('Unable to find %s on %s', frompath, hosts)
+ return False
+
+def gatherkeys(args):
+ ret = 0
+
+ # client.admin
+ r = fetch_file(
+ args=args,
+ frompath='/etc/ceph/{cluster}.client.admin.keyring'.format(
+ cluster=args.cluster),
+ topath='{cluster}.client.admin.keyring'.format(
+ cluster=args.cluster),
+ hosts=args.mon,
+ )
+ if not r:
+ ret = 1
+
+ # mon.
+ fetch_file(
+ args=args,
+ frompath='/var/lib/ceph/mon/%s-{hostname}/keyring' % args.cluster,
+ topath='{cluster}.mon.keyring'.format(
+ cluster=args.cluster),
+ hosts=args.mon,
+ )
+ if not r:
+ ret = 1
+
+ # bootstrap
+ for what in ['osd', 'mds']:
+ r = fetch_file(
+ args=args,
+ frompath='/var/lib/ceph/bootstrap-{what}/{cluster}.keyring'.format(
+ cluster=args.cluster,
+ what=what),
+ topath='{cluster}.bootstrap-{what}.keyring'.format(
+ cluster=args.cluster,
+ what=what),
+ hosts=args.mon,
+ )
+ if not r:
+ ret = 1
+
+ return ret
+
+@priority(40)
+def make(parser):
+ """
+ Gather authentication keys for provisioning new nodes.
+ """
+ parser.add_argument(
+ 'mon',
+ metavar='HOST',
+ nargs='+',
+ help='monitor host to pull keys from',
+ )
+ parser.set_defaults(
+ func=gatherkeys,
+ )
--- /dev/null
+"""
+We deal (mostly) with remote hosts. To avoid special casing each different
+commands (e.g. using `yum` as opposed to `apt`) we can make a one time call to
+that remote host and set all the special cases for running commands depending
+on the type of distribution/version we are dealing with.
+"""
+
+from ceph_deploy import lsb, exc
+from ceph_deploy.sudo_pushy import get_transport
+from ceph_deploy.hosts import debian, centos, fedora, suse
+
+# Import sudo_pushy and patch it
+import pushy
+from ceph_deploy import sudo_pushy
+sudo_pushy.patch()
+
+
+def get(hostname, fallback=None):
+ """
+ Retrieve the module that matches the distribution of a ``hostname``. This
+ function will connect to that host and retrieve the distribution
+ informaiton, then return the appropriate module and slap a few attributes
+ to that module defining the information it found from the hostname.
+
+ For example, if host ``node1.example.com`` is an Ubuntu server, the
+ ``debian`` module would be returned and the following would be set::
+
+ module.name = 'ubuntu'
+ module.release = '12.04'
+ module.codename = 'precise'
+
+ :param hostname: A hostname that is reachable/resolvable over the network
+ :param fallback: Optional fallback to use if no supported distro is found
+ """
+ sudo_conn = pushy.connect(get_transport(hostname))
+ (distro, release, codename) = lsb.get_lsb_release(sudo_conn)
+
+ module = _get_distro(distro)
+ module.name = distro
+ module.release = release
+ module.codename = codename
+ module.sudo_conn = sudo_conn
+ module.init = lsb.choose_init(distro, codename)
+ return module
+
+
+def _get_distro(distro, fallback=None):
+ distro = _normalized_distro_name(distro)
+ distributions = {
+ 'debian': debian,
+ 'ubuntu': debian,
+ 'centos': centos,
+ 'scientific': centos,
+ 'redhat': centos,
+ 'fedora': fedora,
+ 'suse': suse,
+ }
+ try:
+ return distributions[distro]
+ except KeyError:
+ if fallback:
+ return _get_distro(fallback)
+ raise exc.UnsupportedPlatform(distro=distro, codename='')
+
+
+def _normalized_distro_name(distro):
+ distro = distro.lower()
+ if distro.startswith('redhat'):
+ return 'redhat'
+ elif distro.startswith('suse'):
+ return 'suse'
+ return distro
--- /dev/null
+import mon
+from install import install
+
+# Allow to set some information about this distro
+#
+
+distro = None
+release = None
+codename = None
--- /dev/null
+from ceph_deploy.util.wrappers import check_call
+from ceph_deploy.hosts import common
+
+
+def install(distro, logger, version_kind, version):
+ release = distro.release
+ machine = distro.sudo_conn.modules.platform.machine()
+
+ if version_kind in ['stable', 'testing']:
+ key = 'release'
+ else:
+ key = 'autobuild'
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ ['su -c \'rpm --import "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc"\''.format(key=key),],
+ shell=True)
+
+ if version_kind == 'stable':
+ url = 'http://ceph.com/rpm-{version}/el6/'.format(
+ version=version,
+ )
+ elif version_kind == 'testing':
+ url = 'http://ceph.com/rpm-testing/'
+ elif version_kind == 'dev':
+ url = 'http://gitbuilder.ceph.com/ceph-rpm-centos{release}-{machine}-basic/ref/{version}/'.format(
+ release=release.split(".",1)[0],
+ machine=machine,
+ version=version,
+ )
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ 'rpm',
+ '-Uvh',
+ '--replacepkgs',
+ '--force',
+ '--quiet',
+ '{url}noarch/ceph-release-1-0.el6.noarch.rpm'.format(url=url),
+ ],
+ )
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ 'yum',
+ '-y',
+ '-q',
+ 'install',
+ 'ceph',
+ ],
+ )
+
+ # Check the ceph version
+ common.ceph_version(distro.sudo_conn, logger)
--- /dev/null
+from create import create
--- /dev/null
+from ceph_deploy.util.wrappers import check_call
+from ceph_deploy.hosts import common
+
+
+def create(distro, logger, args, monitor_keyring):
+ hostname = distro.sudo_conn.modules.socket.gethostname().split('.')[0]
+ common.mon_create(distro, logger, args, monitor_keyring, hostname)
+ service = common.which_service(distro.sudo_conn, logger)
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ service,
+ 'ceph',
+ 'start',
+ 'mon.{hostname}'.format(hostname=hostname)
+ ],
+ patch=False,
+ )
--- /dev/null
+from ceph_deploy.util import paths
+from ceph_deploy.util.wrappers import check_call
+from ceph_deploy.util.context import remote
+from ceph_deploy import conf
+from StringIO import StringIO
+
+
+def ceph_version(conn, logger):
+ """
+ Log the remote ceph-version by calling `ceph --version`
+ """
+ return check_call(conn, logger, ['ceph', '--version'])
+
+
+def which_service(conn, logger):
+ """
+ Attempt to find the right `service` executable location as it
+ might not be in the path for the user executing the remote
+ calls
+ """
+ logger.info('locating `service` executable...')
+ locations = ['/sbin/service', '/usr/sbin/service']
+ for location in locations:
+ if conn.modules.os.path.exists(location):
+ logger.info('found `service` executable: %s' % location)
+ return location
+ logger.error('could not find `service` executable')
+
+
+def mon_create(distro, logger, args, monitor_keyring, hostname):
+ logger.debug('remote hostname: %s' % hostname)
+ path = paths.mon.path(args.cluster, hostname)
+ done_path = paths.mon.done(args.cluster, hostname)
+ init_path = paths.mon.init(args.cluster, hostname, distro.init)
+
+ configuration = conf.load(args)
+ conf_data = StringIO()
+ configuration.write(conf_data)
+
+ with remote(distro.sudo_conn, logger, conf.write_conf) as remote_func:
+ remote_func(args.cluster, conf_data.getvalue(), overwrite=args.overwrite_conf)
+
+ if not distro.sudo_conn.modules.os.path.exists(path):
+ logger.info('creating path: %s' % path)
+ distro.sudo_conn.modules.os.makedirs(path)
+
+ logger.debug('checking for done path: %s' % done_path)
+ if not distro.sudo_conn.modules.os.path.exists(done_path):
+ logger.debug('done path does not exist: %s' % done_path)
+ if not distro.sudo_conn.modules.os.path.exists(paths.mon.constants.tmp_path):
+ logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path)
+ distro.sudo_conn.modules.os.makedirs(paths.mon.constants.tmp_path)
+ keyring = paths.mon.keyring(args.cluster, hostname)
+
+ def write_monitor_keyring(keyring, monitor_keyring):
+ """create the monitor keyring file"""
+ with file(keyring, 'w') as f:
+ f.write(monitor_keyring)
+
+ logger.info('creating keyring file: %s' % keyring)
+ with remote(distro.sudo_conn, logger, write_monitor_keyring) as remote_func:
+ remote_func(keyring, monitor_keyring)
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ 'ceph-mon',
+ '--cluster', args.cluster,
+ '--mkfs',
+ '-i', hostname,
+ '--keyring', keyring,
+ ],
+ )
+
+ logger.info('unlinking keyring file %s' % keyring)
+ distro.sudo_conn.modules.os.unlink(keyring)
+
+ def create_done_path(done_path):
+ """create a done file to avoid re-doing the mon deployment"""
+ with file(done_path, 'w'):
+ pass
+
+ with remote(distro.sudo_conn, logger, create_done_path) as remote_func:
+ remote_func(done_path)
+
+ def create_init_path(init_path):
+ """create the init path if it does not exist"""
+ import os
+ if not os.path.exists(init_path):
+ with file(init_path, 'w'):
+ pass
+
+ with remote(distro.sudo_conn, logger, create_init_path) as remote_func:
+ remote_func(init_path)
--- /dev/null
+import mon
+from install import install
+
+# Allow to set some information about this distro
+#
+
+distro = None
+release = None
+codename = None
--- /dev/null
+from ceph_deploy.util.wrappers import check_call
+from ceph_deploy.util.context import remote
+from ceph_deploy.hosts import common
+
+
+def install(distro, logger, version_kind, version):
+ codename = distro.codename
+ machine = distro.sudo_conn.modules.platform.machine()
+
+ if version_kind in ['stable', 'testing']:
+ key = 'release'
+ else:
+ key = 'autobuild'
+
+ # Make sure ca-certificates is installed
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ 'env',
+ 'DEBIAN_FRONTEND=noninteractive',
+ 'apt-get',
+ '-q',
+ 'install',
+ '--assume-yes',
+ 'ca-certificates',
+ ]
+ )
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ ['wget -q -O- \'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc\' | apt-key add -'.format(key=key)],
+ shell=True,
+ )
+
+ if version_kind == 'stable':
+ url = 'http://ceph.com/debian-{version}/'.format(
+ version=version,
+ )
+ elif version_kind == 'testing':
+ url = 'http://ceph.com/debian-testing/'
+ elif version_kind == 'dev':
+ url = 'http://gitbuilder.ceph.com/ceph-deb-{codename}-{machine}-basic/ref/{version}'.format(
+ codename=codename,
+ machine=machine,
+ version=version,
+ )
+ else:
+ raise RuntimeError('Unknown version kind: %r' % version_kind)
+
+ def write_sources_list(url, codename):
+ """add ceph deb repo to sources.list"""
+ with file('/etc/apt/sources.list.d/ceph.list', 'w') as f:
+ f.write('deb {url} {codename} main\n'.format(
+ url=url,
+ codename=codename,
+ ))
+
+ with remote(distro.sudo_conn, logger, write_sources_list) as remote_func:
+ remote_func(url, codename)
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ ['apt-get', '-q', 'update'],
+ )
+
+ # TODO this does not downgrade -- should it?
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ 'env',
+ 'DEBIAN_FRONTEND=noninteractive',
+ 'DEBIAN_PRIORITY=critical',
+ 'apt-get',
+ '-q',
+ '-o', 'Dpkg::Options::=--force-confnew',
+ 'install',
+ '--no-install-recommends',
+ '--assume-yes',
+ '--',
+ 'ceph',
+ 'ceph-mds',
+ 'ceph-common',
+ 'ceph-fs-common',
+ # ceph only recommends gdisk, make sure we actually have
+ # it; only really needed for osds, but minimal collateral
+ 'gdisk',
+ ],
+ )
+
+ # Check the ceph version
+ common.ceph_version(distro.sudo_conn, logger)
--- /dev/null
+from create import create
--- /dev/null
+from ceph_deploy.util.wrappers import check_call
+from ceph_deploy.hosts import common
+
+
+def create(distro, logger, args, monitor_keyring):
+ hostname = distro.sudo_conn.modules.socket.gethostname().split('.')[0]
+ common.mon_create(distro, logger, args, monitor_keyring, hostname)
+
+ if distro.init == 'upstart': # Ubuntu uses upstart
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ 'initctl',
+ 'emit',
+ 'ceph-mon',
+ 'cluster={cluster}'.format(cluster=args.cluster),
+ 'id={hostname}'.format(hostname=hostname),
+ ],
+ patch=False,
+ )
+
+ elif distro.init == 'sysvinit': # Debian uses sysvinit
+ service = common.which_service(distro.sudo_conn, logger)
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ service,
+ 'ceph',
+ 'start',
+ 'mon.{hostname}'.format(hostname=hostname)
+ ],
+ patch=False,
+ )
+ else:
+ raise RuntimeError('create cannot use init %s' % distro.init)
--- /dev/null
+import mon
+from install import install
+
+# Allow to set some information about this distro
+#
+
+distro = None
+release = None
+codename = None
--- /dev/null
+from ceph_deploy.util.wrappers import check_call
+from ceph_deploy.hosts import common
+
+
+def install(distro, logger, version_kind, version):
+ release = distro.release
+ machine = distro.sudo_conn.modules.platform.machine()
+
+ if version_kind in ['stable', 'testing']:
+ key = 'release'
+ else:
+ key = 'autobuild'
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ args='su -c \'rpm --import "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc"\''.format(key=key),
+ shell=True,
+ )
+
+ if version_kind == 'stable':
+ url = 'http://ceph.com/rpm-{version}/fc{release}/'.format(
+ version=version,
+ release=release,
+ )
+ elif version_kind == 'testing':
+ url = 'http://ceph.com/rpm-testing/fc{release}'.format(
+ release=release,
+ )
+ elif version_kind == 'dev':
+ url = 'http://gitbuilder.ceph.com/ceph-rpm-fc{release}-{machine}-basic/ref/{version}/'.format(
+ release=release.split(".", 1)[0],
+ machine=machine,
+ version=version,
+ )
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ args=[
+ 'rpm',
+ '-Uvh',
+ '--replacepkgs',
+ '--force',
+ '--quiet',
+ '{url}noarch/ceph-release-1-0.fc{release}.noarch.rpm'.format(
+ url=url,
+ release=release,
+ ),
+ ]
+ )
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ args=[
+ 'yum',
+ '-y',
+ '-q',
+ 'install',
+ 'ceph',
+ ],
+ )
+
+ # Check the ceph version
+ common.ceph_version(distro.sudo_conn, logger)
--- /dev/null
+from create import create
--- /dev/null
+from ceph_deploy.util.wrappers import check_call
+from ceph_deploy.hosts import common
+
+
+def create(distro, logger, args, monitor_keyring):
+ hostname = distro.sudo_conn.modules.socket.gethostname().split('.')[0]
+ common.mon_create(distro, logger, args, monitor_keyring, hostname)
+ service = common.which_service(distro.sudo_conn, logger)
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ service,
+ 'ceph',
+ 'start',
+ 'mon.{hostname}'.format(hostname=hostname)
+ ],
+ patch=False,
+ )
--- /dev/null
+import mon
+from install import install
+
+# Allow to set some information about this distro
+#
+
+distro = None
+release = None
+codename = None
--- /dev/null
+from ceph_deploy.util.wrappers import check_call
+from ceph_deploy.hosts import common
+
+
+def install(distro, logger, version_kind, version):
+ release = distro.release
+ machine = distro.sudo_conn.modules.platform.machine()
+
+ if version_kind in ['stable', 'testing']:
+ key = 'release'
+ else:
+ key = 'autobuild'
+
+ if distro.codename == 'Mantis':
+ distro = 'opensuse12'
+ else:
+ distro = 'sles-11sp2'
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ ['su -c \'rpm --import "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc"\''.format(key=key)],
+ shell=True,
+ )
+
+ if version_kind == 'stable':
+ url = 'http://ceph.com/rpm-{version}/{distro}/'.format(
+ version=version,
+ distro=distro,
+ )
+ elif version_kind == 'testing':
+ url = 'http://ceph.com/rpm-testing/{distro}'.format(distro=distro)
+ elif version_kind == 'dev':
+ url = 'http://gitbuilder.ceph.com/ceph-rpm-{distro}{release}-{machine}-basic/ref/{version}/'.format(
+ distro=distro,
+ release=release.split(".", 1)[0],
+ machine=machine,
+ version=version,
+ )
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ 'rpm',
+ '-Uvh',
+ '--replacepkgs',
+ '--force',
+ '--quiet',
+ '{url}noarch/ceph-release-1-0.noarch.rpm'.format(
+ url=url,
+ ),
+ ]
+ )
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ 'zypper',
+ '--non-interactive',
+ '--quiet',
+ 'install',
+ 'ceph',
+ ],
+ )
+
+ # Check the ceph version
+ common.ceph_version(distro.sudo_conn, logger)
--- /dev/null
+from create import create
--- /dev/null
+from ceph_deploy.util.wrappers import check_call
+from ceph_deploy.hosts import common
+
+
+def create(distro, logger, args, monitor_keyring):
+ hostname = distro.sudo_conn.modules.socket.gethostname().split('.')[0]
+ common.mon_create(distro, logger, args, monitor_keyring, hostname)
+ service = common.which_service(distro.sudo_conn, logger)
+
+ check_call(
+ distro.sudo_conn,
+ logger,
+ [
+ service,
+ 'ceph',
+ 'start',
+ 'mon.{hostname}'.format(hostname=hostname)
+ ],
+ patch=False,
+ )
--- /dev/null
+import argparse
+import logging
+from distutils.util import strtobool
+
+from . import exc
+from . import lsb, hosts
+from .cliutil import priority
+from .sudo_pushy import get_transport
+from .util.decorators import remote_compile
+
+LOG = logging.getLogger(__name__)
+
+def check_ceph_installed():
+ """
+ Check if the ceph packages are installed by looking for the
+ presence of the ceph command.
+ """
+ import subprocess
+
+ args = [ 'which', 'ceph', ]
+ process = subprocess.Popen(
+ args=args,
+ )
+ lsb_release_path, _ = process.communicate()
+ return process.wait()
+
+
+def uninstall_suse(arg_purge=False):
+ import subprocess
+
+ packages = [
+ 'ceph',
+ 'libcephfs1',
+ 'librados2',
+ 'librbd1',
+ ]
+ args = [
+ 'zypper',
+ '--non-interactive',
+ '--quiet',
+ 'remove',
+ ]
+
+ args.extend(packages)
+ subprocess.check_call(args=args)
+
+def uninstall_debian(arg_purge=False):
+ import subprocess
+
+ packages = [
+ 'ceph',
+ 'ceph-mds',
+ 'ceph-common',
+ 'ceph-fs-common',
+ ]
+ args = [
+ 'apt-get',
+ '-q',
+ 'remove',
+ '-f',
+ '-y',
+ '--force-yes',
+ ]
+ if arg_purge:
+ args.append('--purge')
+ args.append('--')
+ args.extend(packages)
+ subprocess.check_call(args=args)
+
+
+def uninstall_fedora(arg_purge=False):
+ import subprocess
+
+ packages = [
+ 'ceph',
+ ]
+ args = [
+ 'yum',
+ '-q',
+ '-y',
+ 'remove',
+ ]
+
+ args.extend(packages)
+ subprocess.check_call(args=args)
+
+
+def uninstall_centos(arg_purge=False):
+ import subprocess
+
+ packages = [
+ 'ceph',
+ ]
+ args = [
+ 'yum',
+ '-q',
+ '-y',
+ 'remove',
+ ]
+
+ args.extend(packages)
+ subprocess.check_call(args=args)
+
+def uninstall_debian(arg_purge=False):
+ import subprocess
+
+ packages = [
+ 'ceph',
+ 'ceph-mds',
+ 'ceph-common',
+ 'ceph-fs-common',
+ ]
+ args = [
+ 'apt-get',
+ '-q',
+ 'remove',
+ '-f',
+ '-y',
+ '--force-yes',
+ ]
+ if arg_purge:
+ args.append('--purge')
+ args.append('--')
+ args.extend(packages)
+ subprocess.check_call(args=args)
+
+
+def purge_data_any():
+ import subprocess
+ import os.path
+
+ subprocess.call(args=[
+ 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
+ ])
+ if os.path.exists('/var/lib/ceph'):
+ subprocess.check_call(args=[
+ 'find', '/var/lib/ceph',
+ '-mindepth', '1',
+ '-maxdepth', '2',
+ '-type', 'd',
+ '-exec', 'umount', '{}', ';',
+ ])
+ subprocess.check_call(args=[
+ 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
+ ])
+ subprocess.check_call(args=[
+ 'rm', '-rf', '--one-file-system', '--', '/etc/ceph',
+ ])
+
+
+def install(args):
+ version = getattr(args, args.version_kind)
+ version_str = args.version_kind
+ if version:
+ version_str += ' version {version}'.format(version=version)
+ LOG.debug(
+ 'Installing %s on cluster %s hosts %s',
+ version_str,
+ args.cluster,
+ ' '.join(args.host),
+ )
+ for hostname in args.host:
+ # TODO username
+ LOG.debug('Detecting platform for host %s ...', hostname)
+ distro = hosts.get(hostname)
+ LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
+ rlogger = logging.getLogger(hostname)
+ rlogger.info('installing ceph on %s' % hostname)
+ distro.install(distro, rlogger, args.version_kind, version)
+ distro.sudo_conn.close()
+
+
+def uninstall(args):
+ LOG.debug(
+ 'Uninstalling on cluster %s hosts %s',
+ args.cluster,
+ ' '.join(args.host),
+ )
+
+ for hostname in args.host:
+ LOG.debug('Detecting platform for host %s ...', hostname)
+
+ # TODO username
+ sudo = args.pushy(get_transport(hostname))
+ (distro, release, codename) = lsb.get_lsb_release(sudo)
+ LOG.debug('Distro %s codename %s', distro, codename)
+
+ if (distro == 'Debian' or distro == 'Ubuntu'):
+ uninstall_r = sudo.compile(uninstall_debian)
+ elif distro == 'CentOS' or distro == 'Scientific' or distro.startswith('RedHat'):
+ uninstall_r = sudo.compile(uninstall_centos)
+ elif distro == 'Fedora':
+ uninstall_r = sudo.compile(uninstall_fedora)
+ elif (distro == 'SUSE LINUX'):
+ uninstall_r = sudo.compile(uninstall_suse)
+ else:
+ raise exc.UnsupportedPlatform(distro=distro, codename=codename)
+
+ LOG.debug('Uninstalling on host %s ...', hostname)
+ uninstall_r()
+ sudo.close()
+
+def purge(args):
+ LOG.debug(
+ 'Purging from cluster %s hosts %s',
+ args.cluster,
+ ' '.join(args.host),
+ )
+
+ for hostname in args.host:
+ LOG.debug('Detecting platform for host %s ...', hostname)
+
+ # TODO username
+ sudo = args.pushy(get_transport(hostname))
+ (distro, release, codename) = lsb.get_lsb_release(sudo)
+ LOG.debug('Distro %s codename %s', distro, codename)
+
+ if distro == 'Debian' or distro == 'Ubuntu':
+ purge_r = sudo.compile(uninstall_debian)
+ elif distro == 'CentOS' or distro == 'Scientific' or distro.startswith('RedHat'):
+ purge_r = sudo.compile(uninstall_centos)
+ elif distro == 'Fedora':
+ purge_r = sudo.compile(uninstall_fedora)
+ elif (distro == 'SUSE LINUX'):
+ purge_r = sudo.compile(uninstall_suse)
+ else:
+ raise exc.UnsupportedPlatform(distro=distro, codename=codename)
+
+ LOG.debug('Purging host %s ...', hostname)
+ purge_r(arg_purge=True)
+ sudo.close()
+
+def purge_data(args):
+ LOG.debug(
+ 'Purging data from cluster %s hosts %s',
+ args.cluster,
+ ' '.join(args.host),
+ )
+
+ installed_hosts=[]
+ for hostname in args.host:
+ sudo = args.pushy(get_transport(hostname))
+ check_ceph_installed_r = sudo.compile(check_ceph_installed)
+ status = check_ceph_installed_r()
+ if status == 0:
+ installed_hosts.append(hostname)
+ sudo.close()
+
+ if installed_hosts:
+ print "ceph is still installed on: ", installed_hosts
+ answer=raw_input("Continue (y/n)")
+ if not strtobool(answer):
+ return
+
+ for hostname in args.host:
+ # TODO username
+ sudo = args.pushy(get_transport(hostname))
+
+ LOG.debug('Purging data from host %s ...', hostname)
+ purge_data_any_r = sudo.compile(purge_data_any)
+ purge_data_any_r()
+ sudo.close()
+
+class StoreVersion(argparse.Action):
+ """
+ Like ``"store"`` but also remember which one of the exclusive
+ options was set.
+
+ There are three kinds of versions: stable, testing and dev.
+ This sets ``version_kind`` to be the right one of the above.
+
+ This kludge essentially lets us differentiate explicitly set
+ values from defaults.
+ """
+ def __call__(self, parser, namespace, values, option_string=None):
+ setattr(namespace, self.dest, values)
+ namespace.version_kind = self.dest
+
+
+@priority(20)
+def make(parser):
+ """
+ Install Ceph packages on remote hosts.
+ """
+
+ version = parser.add_mutually_exclusive_group()
+
+ version.add_argument(
+ '--stable',
+ nargs='?',
+ action=StoreVersion,
+ choices=[
+ 'bobtail',
+ 'cuttlefish',
+ 'dumpling',
+ 'emperor',
+ ],
+ metavar='CODENAME',
+ help='install a release known as CODENAME (done by default) (default: %(default)s)',
+ )
+
+ version.add_argument(
+ '--testing',
+ nargs=0,
+ action=StoreVersion,
+ help='install the latest development release',
+ )
+
+ version.add_argument(
+ '--dev',
+ nargs='?',
+ action=StoreVersion,
+ const='master',
+ metavar='BRANCH_OR_TAG',
+ help='install a bleeding edge build from Git branch or tag (default: %(default)s)',
+ )
+
+ version.set_defaults(
+ func=install,
+ stable='dumpling',
+ dev='master',
+ version_kind='stable',
+ )
+
+ parser.add_argument(
+ 'host',
+ metavar='HOST',
+ nargs='+',
+ help='hosts to install on',
+ )
+ parser.set_defaults(
+ func=install,
+ )
+
+
+
+@priority(80)
+def make_uninstall(parser):
+ """
+ Remove Ceph packages from remote hosts.
+ """
+ parser.add_argument(
+ 'host',
+ metavar='HOST',
+ nargs='+',
+ help='hosts to uninstall Ceph from',
+ )
+ parser.set_defaults(
+ func=uninstall,
+ )
+
+@priority(80)
+def make_purge(parser):
+ """
+ Remove Ceph packages from remote hosts and purge all data.
+ """
+ parser.add_argument(
+ 'host',
+ metavar='HOST',
+ nargs='+',
+ help='hosts to purge Ceph from',
+ )
+ parser.set_defaults(
+ func=purge,
+ )
+
+
+@priority(80)
+def make_purge_data(parser):
+ """
+ Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph
+ """
+ parser.add_argument(
+ 'host',
+ metavar='HOST',
+ nargs='+',
+ help='hosts to purge Ceph data from',
+ )
+ parser.set_defaults(
+ func=purge_data,
+ )
--- /dev/null
+from . import exc
+
+def check_lsb_release():
+ """
+ Verify if lsb_release command is available
+ """
+ import subprocess
+
+ args = [ 'which', 'lsb_release', ]
+ process = subprocess.Popen(
+ args=args,
+ stdout=subprocess.PIPE,
+ )
+ lsb_release_path, _ = process.communicate()
+ ret = process.wait()
+ if ret != 0:
+ raise RuntimeError('The lsb_release command was not found on remote host. Please install the lsb-release package.')
+
+def lsb_release():
+ """
+ Get LSB release information from lsb_release.
+
+ Returns truple with distro, release and codename. Otherwise
+ the function raises an error (subprocess.CalledProcessError or
+ RuntimeError).
+ """
+ import subprocess
+
+ args = [ 'lsb_release', '-s', '-i' ]
+ process = subprocess.Popen(
+ args=args,
+ stdout=subprocess.PIPE,
+ )
+ distro, _ = process.communicate()
+ ret = process.wait()
+ if ret != 0:
+ raise subprocess.CalledProcessError(ret, args, output=distro)
+ if distro == '':
+ raise RuntimeError('lsb_release gave invalid output for distro')
+
+ args = [ 'lsb_release', '-s', '-r', ]
+ process = subprocess.Popen(
+ args=args,
+ stdout=subprocess.PIPE,
+ )
+ release, _ = process.communicate()
+ ret = process.wait()
+ if ret != 0:
+ raise subprocess.CalledProcessError(ret, args, output=release)
+ if release == '':
+ raise RuntimeError('lsb_release gave invalid output for release')
+
+ args = [ 'lsb_release', '-s', '-c', ]
+ process = subprocess.Popen(
+ args=args,
+ stdout=subprocess.PIPE,
+ )
+ codename, _ = process.communicate()
+ ret = process.wait()
+ if ret != 0:
+ raise subprocess.CalledProcessError(ret, args, output=codename)
+ if codename == '':
+ raise RuntimeError('lsb_release gave invalid output for codename')
+
+ return (str(distro).rstrip(), str(release).rstrip(), str(codename).rstrip())
+
+
+def get_lsb_release(sudo):
+ """
+ Get LSB release information from lsb_release.
+
+ Check if lsb_release is installed on the remote host and issue
+ a message if not.
+
+ Returns truple with distro, release and codename. Otherwise
+ the function raises an error (subprocess.CalledProcessError or
+ RuntimeError).
+ """
+ try:
+ check_lsb_release_r = sudo.compile(check_lsb_release)
+ status = check_lsb_release_r()
+ except RuntimeError as e:
+ raise exc.MissingPackageError(e.message)
+
+ lsb_release_r = sudo.compile(lsb_release)
+ return lsb_release_r()
+
+
+def choose_init(distro, codename):
+ """
+ Select a init system for a given distribution.
+
+ Returns the name of a init system (upstart, sysvinit ...).
+ """
+ if distro == 'Ubuntu':
+ return 'upstart'
+ return 'sysvinit'
--- /dev/null
+import logging
+
+from cStringIO import StringIO
+
+from . import conf
+from . import exc
+from . import lsb
+from .cliutil import priority
+from .sudo_pushy import get_transport
+
+
+LOG = logging.getLogger(__name__)
+
+
+def get_bootstrap_mds_key(cluster):
+ """
+ Read the bootstrap-mds key for `cluster`.
+ """
+ path = '{cluster}.bootstrap-mds.keyring'.format(cluster=cluster)
+ try:
+ with file(path, 'rb') as f:
+ return f.read()
+ except IOError:
+ raise RuntimeError('bootstrap-mds keyring not found; run \'gatherkeys\'')
+
+
+def create_mds_bootstrap(cluster, key):
+ """
+ Run on mds node, writes the bootstrap key if not there yet.
+
+ Returns None on success, error message on error exceptions. pushy
+ mangles exceptions to all be of type ExceptionProxy, so we can't
+ tell between bug and correctly handled failure, so avoid using
+ exceptions for non-exceptional runs.
+ """
+ import os
+
+ path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
+ cluster=cluster,
+ )
+ if not os.path.exists(path):
+ tmp = '{path}.{pid}.tmp'.format(
+ path=path,
+ pid=os.getpid(),
+ )
+ # file() doesn't let us control access mode from the
+ # beginning, and thus would have a race where attacker can
+ # open before we chmod the file, so play games with os.open
+ fd = os.open(
+ tmp,
+ (os.O_WRONLY|os.O_CREAT|os.O_EXCL
+ |os.O_NOCTTY|os.O_NOFOLLOW),
+ 0600,
+ )
+ with os.fdopen(fd, 'wb') as f:
+ f.write(key)
+ f.flush()
+ os.fsync(f)
+ os.rename(tmp, path)
+
+
+def create_mds(
+ name,
+ cluster,
+ init,
+ ):
+ import os
+ import subprocess
+ import errno
+
+ path = '/var/lib/ceph/mds/{cluster}-{name}'.format(
+ cluster=cluster,
+ name=name
+ )
+
+ try:
+ os.mkdir(path)
+ except OSError, e:
+ if e.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+ bootstrap_keyring = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
+ cluster=cluster
+ )
+
+ keypath = os.path.join(path, 'keyring')
+
+ try:
+ subprocess.check_call(
+ args = [
+ 'ceph',
+ '--cluster', cluster,
+ '--name', 'client.bootstrap-mds',
+ '--keyring', bootstrap_keyring,
+ 'auth', 'get-or-create', 'mds.{name}'.format(name=name),
+ 'osd', 'allow rwx',
+ 'mds', 'allow',
+ 'mon', 'allow profile mds',
+ '-o',
+ os.path.join(keypath),
+ ])
+ except subprocess.CalledProcessError as err:
+ if err.returncode != errno.EACCES:
+ raise
+ subprocess.check_call(
+ args = [
+ 'ceph',
+ '--cluster', cluster,
+ '--name', 'client.bootstrap-mds',
+ '--keyring', bootstrap_keyring,
+ 'auth', 'get-or-create', 'mds.{name}'.format(name=name),
+ 'osd', 'allow *',
+ 'mds', 'allow',
+ 'mon', 'allow rwx',
+ '-o',
+ os.path.join(keypath),
+ ])
+
+ with file(os.path.join(path, 'done'), 'wb') as f:
+ pass
+
+ with file(os.path.join(path, init), 'wb') as f:
+ pass
+
+ if init == 'upstart':
+ subprocess.check_call(
+ args=[
+ 'initctl',
+ 'emit',
+ 'ceph-mds',
+ 'cluster={cluster}'.format(cluster=cluster),
+ 'id={name}'.format(name=name),
+ ])
+ elif init == 'sysvinit':
+ subprocess.check_call(
+ args=[
+ 'service',
+ 'ceph',
+ 'start',
+ 'mds.{name}'.format(name=name),
+ ])
+
+def mds_create(args):
+ cfg = conf.load(args)
+ LOG.debug(
+ 'Deploying mds, cluster %s hosts %s',
+ args.cluster,
+ ' '.join(':'.join(x or '' for x in t) for t in args.mds),
+ )
+
+ if not args.mds:
+ raise exc.NeedHostError()
+
+ key = get_bootstrap_mds_key(cluster=args.cluster)
+
+ bootstrapped = set()
+ errors = 0
+ for hostname, name in args.mds:
+ try:
+ # TODO username
+ sudo = args.pushy(get_transport(hostname))
+
+ (distro, release, codename) = lsb.get_lsb_release(sudo)
+ init = lsb.choose_init(distro, codename)
+ LOG.debug('Distro %s codename %s, will use %s',
+ distro, codename, init)
+
+ if hostname not in bootstrapped:
+ bootstrapped.add(hostname)
+ LOG.debug('Deploying mds bootstrap to %s', hostname)
+
+ write_conf_r = sudo.compile(conf.write_conf)
+ conf_data = StringIO()
+ cfg.write(conf_data)
+ write_conf_r(
+ cluster=args.cluster,
+ conf=conf_data.getvalue(),
+ overwrite=args.overwrite_conf,
+ )
+
+ create_mds_bootstrap_r = sudo.compile(create_mds_bootstrap)
+ error = create_mds_bootstrap_r(
+ cluster=args.cluster,
+ key=key,
+ )
+ if error is not None:
+ raise exc.GenericError(error)
+ LOG.debug('Host %s is now ready for MDS use.', hostname)
+
+ # create an mds
+ LOG.debug('Deploying mds.%s to %s', name, hostname)
+ create_mds_r = sudo.compile(create_mds)
+ create_mds_r(
+ name=name,
+ cluster=args.cluster,
+ init=init,
+ )
+ sudo.close()
+ except RuntimeError as e:
+ LOG.error(e)
+ errors += 1
+
+ if errors:
+ raise exc.GenericError('Failed to create %d MDSs' % errors)
+
+
+def mds(args):
+ if args.subcommand == 'create':
+ mds_create(args)
+ else:
+ LOG.error('subcommand %s not implemented', args.subcommand)
+
+
+def colon_separated(s):
+ host = s
+ name = s
+ if s.count(':') == 1:
+ (host, name) = s.split(':')
+ return (host, name)
+
+@priority(30)
+def make(parser):
+ """
+ Deploy ceph MDS on remote hosts.
+ """
+ parser.add_argument(
+ 'subcommand',
+ metavar='SUBCOMMAND',
+ choices=[
+ 'create',
+ 'destroy',
+ ],
+ help='create or destroy',
+ )
+ parser.add_argument(
+ 'mds',
+ metavar='HOST[:NAME]',
+ nargs='*',
+ type=colon_separated,
+ help='host (and optionally the daemon name) to deploy on',
+ )
+ parser.set_defaults(
+ func=mds,
+ )
--- /dev/null
+import functools
+
+
+class NotFound(object):
+ """
+ Sentinel object to say call was not memoized.
+
+ Supposed to be faster than throwing exceptions on cache miss.
+ """
+ def __str__(self):
+ return self.__class__.__name__
+
+NotFound = NotFound()
+
+
+def memoize(f):
+ cache = {}
+
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ key = (args, tuple(sorted(kwargs.iteritems())))
+ val = cache.get(key, NotFound)
+ if val is NotFound:
+ val = cache[key] = f(*args, **kwargs)
+ return val
+ return wrapper
--- /dev/null
+
+def get_file(path):
+ """
+ Run on mon node, grab a file.
+ """
+ try:
+ with file(path, 'rb') as f:
+ return f.read()
+ except IOError:
+ pass
+
--- /dev/null
+import ConfigParser
+import logging
+import re
+import subprocess
+
+from cStringIO import StringIO
+
+from . import conf
+from . import exc
+from . import lsb
+from .cliutil import priority
+from .sudo_pushy import get_transport
+from .util import paths
+from . import hosts
+
+
+LOG = logging.getLogger(__name__)
+
+
+def mon_create(args):
+
+ cfg = conf.load(args)
+ if not args.mon:
+ try:
+ mon_initial_members = cfg.get('global', 'mon_initial_members')
+ except (ConfigParser.NoSectionError,
+ ConfigParser.NoOptionError):
+ pass
+ else:
+ args.mon = re.split(r'[,\s]+', mon_initial_members)
+
+ if not args.mon:
+ raise exc.NeedHostError()
+
+ try:
+ with file('{cluster}.mon.keyring'.format(cluster=args.cluster),
+ 'rb') as f:
+ monitor_keyring = f.read()
+ except IOError:
+ raise RuntimeError('mon keyring not found; run \'new\' to create a new cluster')
+
+ LOG.debug(
+ 'Deploying mon, cluster %s hosts %s',
+ args.cluster,
+ ' '.join(args.mon),
+ )
+
+ errors = 0
+ for hostname in args.mon:
+ try:
+ # TODO username
+ # TODO add_bootstrap_peer_hint
+ LOG.debug('detecting platform for host %s ...', hostname)
+ distro = hosts.get(hostname)
+ LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
+ rlogger = logging.getLogger(hostname)
+ rlogger.debug('deploying mon to %s', hostname)
+ distro.mon.create(distro, rlogger, args, monitor_keyring)
+ distro.sudo_conn.close()
+ except RuntimeError as e:
+ LOG.error(e)
+ errors += 1
+
+ if errors:
+ raise exc.GenericError('Failed to create %d monitors' % errors)
+
+
+def destroy_mon(cluster, paths, is_running):
+ import datetime
+ import errno
+ import os
+ import subprocess # noqa
+ import socket
+ import time
+ retries = 5
+
+ hostname = socket.gethostname().split('.')[0]
+ path = paths.mon.path(cluster, hostname)
+
+ if os.path.exists(path):
+ # remove from cluster
+ proc = subprocess.Popen(
+ args=[
+ 'sudo',
+ 'ceph',
+ '--cluster={cluster}'.format(cluster=cluster),
+ '-n', 'mon.',
+ '-k', '{path}/keyring'.format(path=path),
+ 'mon',
+ 'remove',
+ hostname,
+ ],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ out, err = proc.communicate()
+ return_status = proc.wait()
+ if return_status > 0:
+ raise RuntimeError(err.strip())
+
+ # stop
+ if os.path.exists(os.path.join(path, 'upstart')):
+ status_args = [
+ 'initctl',
+ 'status',
+ 'ceph-mon',
+ 'cluster={cluster}'.format(cluster=cluster),
+ 'id={hostname}'.format(hostname=hostname),
+ ]
+
+ elif os.path.exists(os.path.join(path, 'sysvinit')):
+ status_args = [
+ 'service',
+ 'ceph',
+ 'status',
+ 'mon.{hostname}'.format(hostname=hostname),
+ ]
+
+ while retries:
+ if is_running(status_args):
+ time.sleep(5)
+ retries -= 1
+ if retries <= 0:
+ raise RuntimeError('ceph-mon deamon did not stop')
+ else:
+ break
+
+ # archive old monitor directory
+ fn = '{cluster}-{hostname}-{stamp}'.format(
+ hostname=hostname,
+ cluster=cluster,
+ stamp=datetime.datetime.utcnow().strftime("%Y-%m-%dZ%H:%M:%S"),
+ )
+ subprocess.check_call(
+ args=[
+ 'mkdir',
+ '-p',
+ '/var/lib/ceph/mon-removed',
+ ],
+ )
+ try:
+ os.makedirs('/var/lib/ceph/mon-removed')
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ os.rename(path, os.path.join('/var/lib/ceph/mon-removed/', fn))
+
+def mon_destroy(args):
+ errors = 0
+ for hostname in args.mon:
+ try:
+ LOG.debug('Removing mon from %s', hostname)
+
+ # TODO username
+ sudo = args.pushy(get_transport(hostname))
+
+ destroy_mon_r = sudo.compile(destroy_mon)
+ destroy_mon_r(
+ cluster=args.cluster,
+ paths=paths,
+ is_running=is_running,
+ )
+ sudo.close()
+
+ except RuntimeError as e:
+ LOG.error(e)
+ errors += 1
+
+ if errors:
+ raise exc.GenericError('Failed to destroy %d monitors' % errors)
+
+
+def mon(args):
+ if args.subcommand == 'create':
+ mon_create(args)
+ elif args.subcommand == 'destroy':
+ mon_destroy(args)
+ else:
+ LOG.error('subcommand %s not implemented', args.subcommand)
+
+
+@priority(30)
+def make(parser):
+ """
+ Deploy ceph monitor on remote hosts.
+ """
+ parser.add_argument(
+ 'subcommand',
+ metavar='SUBCOMMAND',
+ choices=[
+ 'create',
+ 'destroy',
+ ],
+ help='create or destroy',
+ )
+ parser.add_argument(
+ 'mon',
+ metavar='HOST',
+ nargs='*',
+ help='host to deploy on',
+ )
+ parser.set_defaults(
+ func=mon,
+ )
+
+#
+# Helpers
+#
+
+
+def is_running(args):
+ """
+ Run a command to check the status of a mon, return a boolean.
+
+ We heavily depend on the format of the output, if that ever changes
+ we need to modify this.
+ Check daemon status for 3 times
+ output of the status should be similar to::
+
+ mon.mira094: running {"version":"0.61.5"}
+
+ or when it fails::
+
+ mon.mira094: dead {"version":"0.61.5"}
+ mon.mira094: not running {"version":"0.61.5"}
+ """
+ proc = subprocess.Popen(
+ args=args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ result = proc.communicate()
+ result_string = ' '.join(result)
+ for run_check in [': running', ' start/running']:
+ if run_check in result_string:
+ return True
+ return False
--- /dev/null
+import ConfigParser
+import errno
+import logging
+import os
+import uuid
+import struct
+import time
+import base64
+import socket
+
+from . import exc
+from .cliutil import priority
+from .util import arg_validators
+
+
+LOG = logging.getLogger(__name__)
+
+
+def generate_auth_key():
+ key = os.urandom(16)
+ header = struct.pack('<hiih',
+ 1, # le16 type: CEPH_CRYPTO_AES
+ int(time.time()), # le32 created: seconds
+ 0, # le32 created: nanoseconds,
+ len(key), # le16: len(key)
+ )
+ return base64.b64encode(header + key)
+
+def get_nonlocal_ip(host):
+ """
+ Search result of getaddrinfo() for a non-localhost-net address
+ """
+ ailist = socket.getaddrinfo(host, None)
+ for ai in ailist:
+ # an ai is a 5-tuple; the last element is (ip, port)
+ ip = ai[4][0]
+ if not ip.startswith('127.'):
+ return ip
+ raise exc.UnableToResolveError(host)
+
+
+def new(args):
+ LOG.debug('Creating new cluster named %s', args.cluster)
+ cfg = ConfigParser.RawConfigParser()
+ cfg.add_section('global')
+
+ fsid = uuid.uuid4()
+ cfg.set('global', 'fsid', str(fsid))
+
+ mon_initial_members = []
+ mon_host = []
+
+ for m in args.mon:
+ if m.count(':'):
+ (name, host) = m.split(':')
+ else:
+ name = m
+ host = m
+ if name.count('.') > 0:
+ name = name.split('.')[0]
+ LOG.debug('Resolving host %s', host)
+ ip = None
+ ip = get_nonlocal_ip(host)
+ LOG.debug('Monitor %s at %s', name, ip)
+ mon_initial_members.append(name)
+ mon_host.append(ip)
+
+ LOG.debug('Monitor initial members are %s', mon_initial_members)
+ LOG.debug('Monitor addrs are %s', mon_host)
+
+ cfg.set('global', 'mon initial members', ', '.join(mon_initial_members))
+ # no spaces here, see http://tracker.newdream.net/issues/3145
+ cfg.set('global', 'mon host', ','.join(mon_host))
+
+ # override undesirable defaults, needed until bobtail
+
+ # http://tracker.newdream.net/issues/3136
+ cfg.set('global', 'auth supported', 'cephx')
+
+ # http://tracker.newdream.net/issues/3137
+ cfg.set('global', 'osd journal size', '1024')
+
+ # http://tracker.newdream.net/issues/3138
+ cfg.set('global', 'filestore xattr use omap', 'true')
+
+ path = '{name}.conf'.format(
+ name=args.cluster,
+ )
+
+ # FIXME: create a random key
+ LOG.debug('Creating a random mon key...')
+ mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key()
+
+ keypath = '{name}.mon.keyring'.format(
+ name=args.cluster,
+ )
+
+ LOG.debug('Writing initial config to %s...', path)
+ if not args.dry_run:
+ tmp = '%s.tmp' % path
+ with file(tmp, 'w') as f:
+ cfg.write(f)
+ try:
+ os.rename(tmp, path)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ raise exc.ClusterExistsError(path)
+ else:
+ raise
+
+ LOG.debug('Writing monitor keyring to %s...', keypath)
+ if not args.dry_run:
+ tmp = '%s.tmp' % keypath
+ with file(tmp, 'w') as f:
+ f.write(mon_keyring)
+ try:
+ os.rename(tmp, keypath)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ raise exc.ClusterExistsError(keypath)
+ else:
+ raise
+
+
+@priority(10)
+def make(parser):
+ """
+ Start deploying a new cluster, and write a CLUSTER.conf and keyring for it.
+ """
+ parser.add_argument(
+ 'mon',
+ metavar='MON',
+ nargs='+',
+ help='initial monitor hostname, fqdn, or hostname:fqdn pair',
+ type=arg_validators.Hostname(),
+ )
+ parser.set_defaults(
+ func=new,
+ )
--- /dev/null
+import argparse
+import logging
+import os
+import sys
+
+from cStringIO import StringIO
+
+from . import conf
+from . import exc
+from . import lsb
+from .cliutil import priority
+from .sudo_pushy import get_transport
+
+
+LOG = logging.getLogger(__name__)
+
+
+def get_bootstrap_osd_key(cluster):
+ """
+ Read the bootstrap-osd key for `cluster`.
+ """
+ path = '{cluster}.bootstrap-osd.keyring'.format(cluster=cluster)
+ try:
+ with file(path, 'rb') as f:
+ return f.read()
+ except IOError:
+ raise RuntimeError('bootstrap-osd keyring not found; run \'gatherkeys\'')
+
+def create_osd(cluster, key):
+ """
+ Run on osd node, writes the bootstrap key if not there yet.
+
+ Returns None on success, error message on error exceptions. pushy
+ mangles exceptions to all be of type ExceptionProxy, so we can't
+ tell between bug and correctly handled failure, so avoid using
+ exceptions for non-exceptional runs.
+ """
+ path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring'.format(
+ cluster=cluster,
+ )
+ if not os.path.exists(path):
+ tmp = '{path}.{pid}.tmp'.format(
+ path=path,
+ pid=os.getpid(),
+ )
+ # file() doesn't let us control access mode from the
+ # beginning, and thus would have a race where attacker can
+ # open before we chmod the file, so play games with os.open
+ fd = os.open(
+ tmp,
+ (os.O_WRONLY|os.O_CREAT|os.O_EXCL
+ |os.O_NOCTTY|os.O_NOFOLLOW),
+ 0600,
+ )
+ with os.fdopen(fd, 'wb') as f:
+ f.write(key)
+ f.flush()
+ os.fsync(f)
+ os.rename(tmp, path)
+
+ def subproc_call(*args, **kwargs):
+ """
+ call subproc that might fail, collect returncode and stderr/stdout
+ to be used in pushy.compile()d functions. Returns 4-tuple of
+ (process exit code, command, stdout contents, stderr contents)
+ """
+ import subprocess
+ import tempfile
+
+ otmp = tempfile.TemporaryFile()
+ etmp = tempfile.TemporaryFile()
+ cmd = ' '.join(kwargs['args'])
+ ret = 0
+ errtxt = ''
+ kwargs.update(dict(stdout=otmp, stderr=etmp))
+ try:
+ subprocess.check_call(*args, **kwargs)
+ except subprocess.CalledProcessError as e:
+ ret = e.returncode
+ except Exception as e:
+ ret = -1
+ # OSError has errno
+ if hasattr(e, 'errno'):
+ ret = e.errno
+ errtxt = str(e)
+ otmp.seek(0)
+ etmp.seek(0)
+ return (ret, cmd, otmp.read(), errtxt + etmp.read())
+
+ # in case disks have been prepared before we do this, activate
+ # them now.
+ return subproc_call(
+ args=[
+ 'udevadm',
+ 'trigger',
+ '--subsystem-match=block',
+ '--action=add',
+ ],
+ )
+
+def prepare_disk(cluster, disk, journal, activate_prepared_disk, zap, dmcrypt, dmcrypt_dir):
+ """
+ Run on osd node, prepares a data disk for use.
+ """
+ args = [
+ 'ceph-disk-prepare',
+ ]
+ if zap:
+ args.append('--zap-disk')
+ if dmcrypt:
+ args.append('--dmcrypt')
+ if dmcrypt_dir is not None:
+ args.append('--dmcrypt-key-dir')
+ args.append(dmcrypt_dir)
+ args.extend([
+ '--',
+ disk,
+ ])
+ if journal is not None:
+ args.append(journal)
+
+ def subproc_call(*args, **kwargs):
+ """
+ call subproc that might fail, collect returncode and stderr/stdout
+ to be used in pushy.compile()d functions. Returns 4-tuple of
+ (process exit code, command, stdout contents, stderr contents)
+ """
+ import subprocess
+ import tempfile
+
+ otmp = tempfile.TemporaryFile()
+ etmp = tempfile.TemporaryFile()
+ cmd = ' '.join(kwargs['args'])
+ ret = 0
+ errtxt = ''
+ kwargs.update(dict(stdout=otmp, stderr=etmp))
+ try:
+ subprocess.check_call(*args, **kwargs)
+ except subprocess.CalledProcessError as e:
+ ret = e.returncode
+ except Exception as e:
+ ret = -1
+ # OSError has errno
+ if hasattr(e, 'errno'):
+ ret = e.errno
+ errtxt = str(e)
+ otmp.seek(0)
+ etmp.seek(0)
+ return (ret, cmd, otmp.read(), errtxt + etmp.read())
+
+ ret = subproc_call(args=args)
+ if ret[0]:
+ return ret
+ if activate_prepared_disk:
+ ret = subproc_call(
+ args=[
+ 'udevadm',
+ 'trigger',
+ '--subsystem-match=block',
+ '--action=add',
+ ],
+ )
+ if ret[0]:
+ return ret
+ return (0, '', '', '')
+
+
+def activate_disk(cluster, disk, init):
+ """
+ Run on the osd node, activates a disk.
+ """
+ def subproc_call(*args, **kwargs):
+ """
+ call subproc that might fail, collect returncode and stderr/stdout
+ to be used in pushy.compile()d functions. Returns 4-tuple of
+ (process exit code, command, stdout contents, stderr contents)
+ """
+ import subprocess
+ import tempfile
+
+ otmp = tempfile.TemporaryFile()
+ etmp = tempfile.TemporaryFile()
+ cmd = ' '.join(kwargs['args'])
+ ret = 0
+ errtxt = ''
+ kwargs.update(dict(stdout=otmp, stderr=etmp))
+ try:
+ subprocess.check_call(*args, **kwargs)
+ except subprocess.CalledProcessError as e:
+ ret = e.returncode
+ except Exception as e:
+ ret = -1
+ # OSError has errno
+ if hasattr(e, 'errno'):
+ ret = e.errno
+ errtxt = str(e)
+ otmp.seek(0)
+ etmp.seek(0)
+ return (ret, cmd, otmp.read(), errtxt + etmp.read())
+
+ return subproc_call(
+ args=[
+ 'ceph-disk-activate',
+ '--mark-init',
+ init,
+ '--mount',
+ disk,
+ ])
+
+def prepare(args, cfg, activate_prepared_disk):
+ LOG.debug(
+ 'Preparing cluster %s disks %s',
+ args.cluster,
+ ' '.join(':'.join(x or '' for x in t) for t in args.disk),
+ )
+
+ key = get_bootstrap_osd_key(cluster=args.cluster)
+
+ bootstrapped = set()
+ errors = 0
+ for hostname, disk, journal in args.disk:
+ try:
+ if disk is None:
+ raise exc.NeedDiskError(hostname)
+ # TODO username
+ sudo = args.pushy(get_transport(hostname))
+
+ if hostname not in bootstrapped:
+ bootstrapped.add(hostname)
+ LOG.debug('Deploying osd to %s', hostname)
+
+ write_conf_r = sudo.compile(conf.write_conf)
+ conf_data = StringIO()
+ cfg.write(conf_data)
+ write_conf_r(
+ cluster=args.cluster,
+ conf=conf_data.getvalue(),
+ overwrite=args.overwrite_conf,
+ )
+
+ create_osd_r = sudo.compile(create_osd)
+ ret, cmd, out, err = create_osd_r(
+ cluster=args.cluster,
+ key=key,
+ )
+ if ret:
+ s = '{cmd} returned {ret}\n{out}\n{err}'.format(
+ cmd=cmd, ret=ret, out=out, err=err)
+ LOG.debug('Failed preparing host %s: %s', hostname, s)
+ raise RuntimeError(s)
+ else:
+ LOG.debug('Host %s is now ready for osd use.', hostname)
+
+ LOG.debug('Preparing host %s disk %s journal %s activate %s',
+ hostname, disk, journal, activate_prepared_disk)
+
+ prepare_disk_r = sudo.compile(prepare_disk)
+ ret, cmd, out, err = prepare_disk_r(
+ cluster=args.cluster,
+ disk=disk,
+ journal=journal,
+ activate_prepared_disk=activate_prepared_disk,
+ zap=args.zap_disk,
+ dmcrypt=args.dmcrypt,
+ dmcrypt_dir=args.dmcrypt_key_dir,
+ )
+ sudo.close()
+ if ret:
+ s = '{cmd} returned {ret}\n{out}\n{err}'.format(
+ cmd=cmd, ret=ret, out=out, err=err)
+ raise RuntimeError(s)
+ except RuntimeError as e:
+ LOG.error(e)
+ errors += 1
+
+ if errors:
+ raise exc.GenericError('Failed to create %d OSDs' % errors)
+
+def activate(args, cfg):
+ LOG.debug(
+ 'Activating cluster %s disks %s',
+ args.cluster,
+ # join elements of t with ':', t's with ' '
+ # allow None in elements of t; print as empty
+ ' '.join(':'.join((s or '') for s in t) for t in args.disk),
+ )
+
+ for hostname, disk, journal in args.disk:
+
+ # TODO username
+ sudo = args.pushy(get_transport(hostname))
+
+ LOG.debug('Activating host %s disk %s', hostname, disk)
+
+ (distro, release, codename) = lsb.get_lsb_release(sudo)
+ init = lsb.choose_init(distro, codename)
+ LOG.debug('Distro %s codename %s, will use %s',
+ distro, codename, init)
+
+ activate_disk_r = sudo.compile(activate_disk)
+ err, cmd, stdout, stderr = activate_disk_r(
+ cluster=args.cluster,
+ disk=disk,
+ init=init,
+ )
+ sudo.close()
+ if err:
+ s = '{cmd} returned {ret}\n{out}\n{err}'.format(
+ cmd=cmd, ret=ret, out=out, err=err)
+ raise RuntimeError(s)
+
+# NOTE: this mirrors ceph-disk-prepare --zap-disk DEV
+def zap(dev):
+ import subprocess
+
+ try:
+ # this kills the crab
+ #
+ # sgdisk will wipe out the main copy of the GPT partition
+ # table (sorry), but it doesn't remove the backup copies, and
+ # subsequent commands will continue to complain and fail when
+ # they see those. zeroing the last few blocks of the device
+ # appears to do the trick.
+ lba_size = 4096
+ size = 33 * lba_size
+ with file(dev, 'wb') as f:
+ f.seek(-size, os.SEEK_END)
+ f.write(size*'\0')
+
+ subprocess.check_call(
+ args=[
+ 'sgdisk',
+ '--zap-all',
+ '--clear',
+ '--mbrtogpt',
+ '--',
+ dev,
+ ],
+ )
+ except subprocess.CalledProcessError as e:
+ raise RuntimeError(e)
+
+def disk_zap(args):
+ cfg = conf.load(args)
+
+ for hostname, disk, journal in args.disk:
+ LOG.debug('zapping %s on %s', disk, hostname)
+
+ # TODO username
+ sudo = args.pushy(get_transport(hostname))
+ zap_r = sudo.compile(zap)
+ zap_r(disk)
+ sudo.close()
+
+
+def list_disk():
+
+ def subproc_call(*args, **kwargs):
+ """
+ call subproc that might fail, collect returncode and stderr/stdout
+ to be used in pushy.compile()d functions. Returns 4-tuple of
+ (process exit code, command, stdout contents, stderr contents)
+ """
+ import subprocess
+ import tempfile
+
+ otmp = tempfile.TemporaryFile()
+ etmp = tempfile.TemporaryFile()
+ cmd = ' '.join(kwargs['args'])
+ errtxt = ''
+ ret = 0
+ kwargs.update(dict(stdout=otmp, stderr=etmp))
+ try:
+ subprocess.check_call(*args, **kwargs)
+ except subprocess.CalledProcessError as e:
+ ret = e.returncode
+ except Exception as e:
+ ret = -1
+ # OSError has errno
+ if hasattr(e, 'errno'):
+ ret = e.errno
+ errtxt = str(e)
+ otmp.seek(0)
+ etmp.seek(0)
+ return (ret, cmd, otmp.read(), errtxt + etmp.read())
+
+ ret, cmd, out, err = subproc_call(
+ args=[
+ 'ceph-disk',
+ 'list',
+ ],
+ )
+
+ return ret, cmd, out, err
+
+def disk_list(args, cfg):
+ for hostname, disk, journal in args.disk:
+
+ # TODO username
+ sudo = args.pushy(get_transport(hostname))
+
+ LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
+
+ list_disk_r = sudo.compile(list_disk)
+ ret, cmd, out, err = list_disk_r()
+ if ret:
+ LOG.error("disk list failed: %s", err)
+ else:
+ print out,
+
+ sudo.close()
+
+def osd_list(args, cfg):
+ LOG.error('Not yet implemented; see http://tracker.ceph.com/issues/5071')
+ sys.exit(1)
+
+def osd(args):
+ cfg = conf.load(args)
+
+ if args.subcommand == 'list':
+ osd_list(args, cfg)
+ elif args.subcommand == 'prepare':
+ prepare(args, cfg, activate_prepared_disk=False)
+ elif args.subcommand == 'create':
+ prepare(args, cfg, activate_prepared_disk=True)
+ elif args.subcommand == 'activate':
+ activate(args, cfg)
+ else:
+ LOG.error('subcommand %s not implemented', args.subcommand)
+ sys.exit(1)
+
+
+
+def disk(args):
+ cfg = conf.load(args)
+
+ if args.subcommand == 'list':
+ disk_list(args, cfg)
+ elif args.subcommand == 'prepare':
+ prepare(args, cfg, activate_prepared_disk=False)
+ elif args.subcommand == 'activate':
+ activate(args, cfg)
+ elif args.subcommand == 'zap':
+ disk_zap(args)
+ else:
+ LOG.error('subcommand %s not implemented', args.subcommand)
+ sys.exit(1)
+
+
+def colon_separated(s):
+ journal = None
+ disk = None
+ host = None
+ if s.count(':') == 2:
+ (host, disk, journal) = s.split(':')
+ elif s.count(':') == 1:
+ (host, disk) = s.split(':')
+ elif s.count(':') == 0:
+ (host) = s
+ else:
+ raise argparse.ArgumentTypeError('must be in form HOST:DISK[:JOURNAL]')
+
+ if disk:
+ # allow just "sdb" to mean /dev/sdb
+ disk = os.path.join('/dev', disk)
+ if journal is not None:
+ journal = os.path.join('/dev', journal)
+
+ return (host, disk, journal)
+
+
+@priority(50)
+def make(parser):
+ """
+ Prepare a data disk on remote host.
+ """
+ parser.add_argument(
+ 'subcommand',
+ metavar='SUBCOMMAND',
+ choices=[
+ 'list',
+ 'create',
+ 'prepare',
+ 'activate',
+ 'destroy',
+ ],
+ help='list, create (prepare+activate), prepare, activate, or destroy',
+ )
+ parser.add_argument(
+ 'disk',
+ nargs='+',
+ metavar='HOST:DISK[:JOURNAL]',
+ type=colon_separated,
+ help='host and disk to prepare',
+ )
+ parser.add_argument(
+ '--zap-disk',
+ action='store_true', default=None,
+ help='destroy existing partition table and content for DISK',
+ )
+ parser.add_argument(
+ '--dmcrypt',
+ action='store_true', default=None,
+ help='use dm-crypt on DISK',
+ )
+ parser.add_argument(
+ '--dmcrypt-key-dir',
+ metavar='KEYDIR',
+ default='/etc/ceph/dmcrypt-keys',
+ help='directory where dm-crypt keys are stored',
+ )
+ parser.set_defaults(
+ func=osd,
+ )
+
+
+@priority(50)
+def make_disk(parser):
+ """
+ Manage disks on a remote host.
+ """
+ parser.add_argument(
+ 'subcommand',
+ metavar='SUBCOMMAND',
+ choices=[
+ 'list',
+ 'prepare',
+ 'activate',
+ 'zap',
+ ],
+ help='list, prepare, activate, zap',
+ )
+ parser.add_argument(
+ 'disk',
+ nargs='+',
+ metavar='HOST[:DISK]',
+ type=colon_separated,
+ help='host (and optionally disk)',
+ )
+ parser.add_argument(
+ '--zap-disk',
+ action='store_true', default=None,
+ help='destroy existing partition table and content for DISK',
+ )
+ parser.add_argument(
+ '--dmcrypt',
+ action='store_true', default=None,
+ help='use dm-crypt on DISK',
+ )
+ parser.add_argument(
+ '--dmcrypt-key-dir',
+ metavar='KEYDIR',
+ default='/etc/ceph/dmcrypt-keys',
+ help='directory where dm-crypt keys are stored',
+ )
+ parser.set_defaults(
+ func=disk,
+ )
--- /dev/null
+import pushy.transport.ssh
+import pushy.transport.local
+import subprocess
+
+
+class Local_Popen(pushy.transport.local.Popen):
+ def __init__(self, command, address, **kwargs):
+ pushy.transport.BaseTransport.__init__(self, address)
+
+ self.__proc = subprocess.Popen(command, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ bufsize=65535)
+
+ self.stdout = self.__proc.stdout
+ self.stderr = self.__proc.stderr
+ self.stdin = self.__proc.stdin
+
+ def close(self):
+ self.stdin.close()
+ self.__proc.wait()
+
+class SshSudoTransport(object):
+ @staticmethod
+ def Popen(command, *a, **kw):
+ command = ['sudo'] + command
+ return pushy.transport.ssh.Popen(command, *a, **kw)
+
+class LocalSudoTransport(object):
+ @staticmethod
+ def Popen(command, *a, **kw):
+ command = ['sudo'] + command
+ return Local_Popen(command, *a, **kw)
+
+def get_transport(hostname):
+ import socket
+
+ myhostname = socket.gethostname().split('.')[0]
+ if hostname == myhostname:
+ return 'local+sudo:'
+ else:
+ return 'ssh+sudo:{hostname}'.format(hostname=hostname)
+
+def patch():
+ """
+ Monkey patches pushy so it supports running via (passphraseless)
+ sudo on the remote host.
+ """
+ pushy.transports['ssh+sudo'] = SshSudoTransport
+ pushy.transports['local+sudo'] = LocalSudoTransport
--- /dev/null
+import logging
+import os
+import subprocess
+import sys
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _prepend_path(env):
+ """
+ Make sure the PATH contains the location where the Python binary
+ lives. This makes sure cli tools installed in a virtualenv work.
+ """
+ if env is None:
+ env = os.environ
+ env = dict(env)
+ new = os.path.dirname(sys.executable)
+ path = env.get('PATH')
+ if path is not None:
+ new = new + ':' + path
+ env['PATH'] = new
+ return env
+
+
+class CLIFailed(Exception):
+ """CLI tool failed"""
+
+ def __init__(self, args, status):
+ self.args = args
+ self.status = status
+
+ def __str__(self):
+ return '{doc}: {args}: exited with status {status}'.format(
+ doc=self.__doc__,
+ args=self.args,
+ status=self.status,
+ )
+
+
+class CLIProcess(object):
+ def __init__(self, **kw):
+ self.kw = kw
+
+ def __enter__(self):
+ try:
+ self.p = subprocess.Popen(**self.kw)
+ except OSError as e:
+ raise AssertionError(
+ 'CLI tool {args!r} does not work: {err}'.format(
+ args=self.kw['args'],
+ err=e,
+ ),
+ )
+ else:
+ return self.p
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.p.wait()
+ if self.p.returncode != 0:
+ err = CLIFailed(
+ args=self.kw['args'],
+ status=self.p.returncode,
+ )
+ if exc_type is None:
+ # nothing else raised, so we should complain; if
+ # something else failed, we'll just log
+ raise err
+ else:
+ LOG.error(str(err))
+
+
+class CLITester(object):
+ # provide easy way for caller to access the exception class
+ # without importing us
+ Failed = CLIFailed
+
+ def __init__(self, tmpdir):
+ self.tmpdir = tmpdir
+
+ def __call__(self, **kw):
+ kw.setdefault('cwd', str(self.tmpdir))
+ kw['env'] = _prepend_path(kw.get('env'))
+ kw['env']['COLUMNS'] = '80'
+ return CLIProcess(**kw)
+
+
+def pytest_funcarg__cli(request):
+ """
+ Test command line behavior.
+ """
+
+ # the tmpdir here will be the same value as the test function
+ # sees; we rely on that to let caller prepare and introspect
+ # any files the cli tool will read or create
+ tmpdir = request.getfuncargvalue('tmpdir')
+
+ return CLITester(tmpdir=tmpdir)
--- /dev/null
+import contextlib
+import os
+
+
+@contextlib.contextmanager
+def directory(path):
+ prev = os.open('.', os.O_RDONLY | os.O_DIRECTORY)
+ try:
+ os.chdir(path)
+ yield
+ finally:
+ os.fchdir(prev)
+ os.close(prev)
--- /dev/null
+
+
+def fake_getaddrinfo(*a, **kw):
+ return_host = kw.get('return_host', 'host1')
+ return [[0,0,0,0, return_host]]
--- /dev/null
+import pytest
+import subprocess
+
+
+def test_help(tmpdir, cli):
+ with cli(
+ args=['ceph-deploy', '--help'],
+ stdout=subprocess.PIPE,
+ ) as p:
+ result = p.stdout.read()
+ assert 'usage: ceph-deploy' in result
+ assert 'optional arguments:' in result
+ assert 'commands:' in result
+
+
+def test_bad_command(tmpdir, cli):
+ with pytest.raises(cli.Failed) as err:
+ with cli(
+ args=['ceph-deploy', 'bork'],
+ stderr=subprocess.PIPE,
+ ) as p:
+ result = p.stderr.read()
+ assert 'usage: ceph-deploy' in result
+ assert err.value.status == 2
+ assert [p.basename for p in tmpdir.listdir()] == []
+
+
+def test_bad_cluster(tmpdir, cli):
+ with pytest.raises(cli.Failed) as err:
+ with cli(
+ args=['ceph-deploy', '--cluster=/evil-this-should-not-be-created', 'new'],
+ stderr=subprocess.PIPE,
+ ) as p:
+ result = p.stderr.read()
+ assert 'usage: ceph-deploy' in result
+ assert err.value.status == 2
+ assert [p.basename for p in tmpdir.listdir()] == []
--- /dev/null
+import argparse
+import collections
+import mock
+import pytest
+import subprocess
+
+from ..cli import main
+from .. import install
+
+from .directory import directory
+
+
+def test_help(tmpdir, cli):
+ with cli(
+ args=['ceph-deploy', 'install', '--help'],
+ stdout=subprocess.PIPE,
+ ) as p:
+ result = p.stdout.read()
+ assert 'usage: ceph-deploy' in result
+ assert 'positional arguments:' in result
+ assert 'optional arguments:' in result
+
+
+def test_bad_no_host(tmpdir, cli):
+ with pytest.raises(cli.Failed) as err:
+ with cli(
+ args=['ceph-deploy', 'install'],
+ stderr=subprocess.PIPE,
+ ) as p:
+ result = p.stderr.read()
+ assert 'usage: ceph-deploy install' in result
+ assert 'too few arguments' in result
+ assert err.value.status == 2
+
+
+def test_simple(tmpdir):
+ ns = argparse.Namespace()
+ ns.pushy = mock.Mock()
+ conn = mock.NonCallableMock(name='PushyClient')
+ ns.pushy.return_value = conn
+
+ mock_compiled = collections.defaultdict(mock.Mock)
+ conn.compile.return_value = mock.Mock(return_value = ('Ubuntu', 'precise','cuttlefish'))
+ fake_get_release = mock.Mock(return_value = ('Ubuntu', 'precise','cuttlefish'))
+ fake_distro = mock.Mock(name='FakeDistro')
+ fake_distro.return_value = fake_distro
+
+ try:
+ with directory(str(tmpdir)):
+ with mock.patch('ceph_deploy.hosts.lsb.get_lsb_release', fake_get_release):
+ with mock.patch('ceph_deploy.hosts.pushy', ns.pushy):
+ with mock.patch('ceph_deploy.hosts._get_distro', fake_distro):
+
+ main(
+ args=['-v', 'install', 'storehost1'],
+ namespace=ns,
+ )
+ except SystemExit as e:
+ raise AssertionError('Unexpected exit: %s', e)
+
+ connect_calls = ns.pushy.connect.call_args[0][0]
+ assert connect_calls == 'ssh+sudo:storehost1'
+ assert fake_distro.name == 'Ubuntu'
+ assert fake_distro.release == 'precise'
+ assert fake_distro.codename == 'cuttlefish'
--- /dev/null
+import argparse
+import collections
+import mock
+import pytest
+import subprocess
+
+from ..cli import main
+from .. import mon
+
+from .directory import directory
+from .fakes import fake_getaddrinfo
+
+def test_help(tmpdir, cli):
+ with cli(
+ args=['ceph-deploy', 'mon', '--help'],
+ stdout=subprocess.PIPE,
+ ) as p:
+ result = p.stdout.read()
+ assert 'usage: ceph-deploy' in result
+ assert 'Deploy ceph monitor on remote hosts.' in result
+ assert 'positional arguments:'
+ assert 'optional arguments:'
+
+
+def test_bad_no_conf(tmpdir, cli):
+ with pytest.raises(cli.Failed) as err:
+ with cli(
+ args=['ceph-deploy', 'mon'],
+ stderr=subprocess.PIPE,
+ ) as p:
+ result = p.stderr.read()
+ assert 'usage: ceph-deploy' in result
+ assert 'too few arguments' in result
+ assert err.value.status == 2
+
+
+def test_bad_no_mon(tmpdir, cli):
+ with tmpdir.join('ceph.conf').open('w'):
+ pass
+ with pytest.raises(cli.Failed) as err:
+ with cli(
+ args=['ceph-deploy', 'mon'],
+ stderr=subprocess.PIPE,
+ ) as p:
+ result = p.stderr.read()
+ assert 'usage: ceph-deploy mon' in result
+ assert 'too few arguments' in result
+ assert err.value.status == 2
+
+
+def test_simple(tmpdir, capsys):
+ with tmpdir.join('ceph.conf').open('w') as f:
+ f.write("""\
+[global]
+fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0
+mon initial members = host1
+""")
+
+ ns = argparse.Namespace()
+ ns.pushy = mock.Mock()
+ conn = mock.NonCallableMock(name='PushyClient')
+ ns.pushy.return_value = conn
+
+ mock_compiled = collections.defaultdict(mock.Mock)
+ conn.compile.side_effect = mock_compiled.__getitem__
+
+ MON_SECRET = 'AQBWDj5QAP6LHhAAskVBnUkYHJ7eYREmKo5qKA=='
+
+ def _create_mon(cluster, get_monitor_secret):
+ secret = get_monitor_secret()
+ assert secret == MON_SECRET
+
+ try:
+ with mock.patch('ceph_deploy.new.socket.gethostbyname'):
+ with mock.patch('socket.getaddrinfo', fake_getaddrinfo):
+ with directory(str(tmpdir)):
+ main(
+ args=['-v', 'new', 'host1'],
+ namespace=ns,
+ )
+ main(
+ args=['-v', 'mon', 'create', 'host1'],
+ namespace=ns,
+ )
+ except SystemExit as e:
+ raise AssertionError('Unexpected exit: %s', e)
+ out, err = capsys.readouterr()
+ err = err.lower()
+ assert 'creating new cluster named ceph' in err
+ assert 'monitor host1 at h' in err
+ assert 'resolving host host1' in err
+ assert "monitor initial members are ['host1']" in err
+ assert "monitor addrs are ['h']" in err
--- /dev/null
+import pytest
+from mock import patch
+import re
+import subprocess
+import uuid
+
+from .. import conf
+from ..cli import main
+from .directory import directory
+from .fakes import fake_getaddrinfo
+
+
+def test_help(tmpdir, cli):
+ with cli(
+ args=['ceph-deploy', 'new', '--help'],
+ stdout=subprocess.PIPE,
+ ) as p:
+ result = p.stdout.read()
+ assert 'usage: ceph-deploy new' in result
+ assert 'positional arguments' in result
+ assert 'optional arguments' in result
+
+
+def test_write_global_conf_section(tmpdir, cli):
+ with patch('ceph_deploy.new.socket.gethostbyname'):
+ with patch('ceph_deploy.new.socket.getaddrinfo', fake_getaddrinfo):
+ with directory(str(tmpdir)):
+ main(args=['new', 'host1'])
+ with tmpdir.join('ceph.conf').open() as f:
+ cfg = conf.parse(f)
+ assert cfg.sections() == ['global']
+
+
+def pytest_funcarg__newcfg(request):
+ tmpdir = request.getfuncargvalue('tmpdir')
+ cli = request.getfuncargvalue('cli')
+
+ def new(*args):
+ with patch('ceph_deploy.new.socket.gethostbyname'):
+ with patch('ceph_deploy.new.socket.getaddrinfo', fake_getaddrinfo):
+ with directory(str(tmpdir)):
+ main( args=['new'] + list(args))
+ with tmpdir.join('ceph.conf').open() as f:
+ cfg = conf.parse(f)
+ return cfg
+ return new
+
+
+def test_uuid(newcfg):
+ cfg = newcfg('host1')
+ fsid = cfg.get('global', 'fsid')
+ # make sure it's a valid uuid
+ uuid.UUID(hex=fsid)
+ # make sure it looks pretty, too
+ UUID_RE = re.compile(
+ r'^[0-9a-f]{8}-'
+ + r'[0-9a-f]{4}-'
+ # constant 4 here, we want to enforce randomness and not leak
+ # MACs or time
+ + r'4[0-9a-f]{3}-'
+ + r'[0-9a-f]{4}-'
+ + r'[0-9a-f]{12}$',
+ )
+ assert UUID_RE.match(fsid)
+
+
+def test_mons(newcfg):
+ cfg = newcfg('node01', 'node07', 'node34')
+ mon_initial_members = cfg.get('global', 'mon_initial_members')
+ assert mon_initial_members == 'node01, node07, node34'
+
+
+def test_defaults(newcfg):
+ cfg = newcfg('host1')
+ assert cfg.get('global', 'auth_supported') == 'cephx'
+ assert cfg.get('global', 'osd_journal_size') == '1024'
+ assert cfg.get('global', 'filestore_xattr_use_omap') == 'true'
--- /dev/null
+import argparse
+import collections
+import mock
+import pytest
+import subprocess
+
+from ..cli import main
+from .. import osd
+
+from .directory import directory
+
+
+def test_help(tmpdir, cli):
+ with cli(
+ args=['ceph-deploy', 'osd', '--help'],
+ stdout=subprocess.PIPE,
+ ) as p:
+ result = p.stdout.read()
+ assert 'usage: ceph-deploy osd' in result
+ assert 'positional arguments' in result
+ assert 'optional arguments' in result
+
+
+def test_bad_no_conf(tmpdir, cli):
+ with pytest.raises(cli.Failed) as err:
+ with cli(
+ args=['ceph-deploy', 'osd', 'fakehost:/does-not-exist'],
+ stderr=subprocess.PIPE,
+ ) as p:
+ result = p.stderr.read()
+ assert 'ceph-deploy osd: error' in result
+ assert 'invalid choice' in result
+ assert err.value.status == 2
+
+
+def test_bad_no_disk(tmpdir, cli):
+ with tmpdir.join('ceph.conf').open('w'):
+ pass
+ with pytest.raises(cli.Failed) as err:
+ with cli(
+ args=['ceph-deploy', 'osd'],
+ stderr=subprocess.PIPE,
+ ) as p:
+ result = p.stderr.read()
+ assert 'usage: ceph-deploy osd' in result
+ assert err.value.status == 2
+
+
+def test_simple(tmpdir, capsys):
+ with tmpdir.join('ceph.conf').open('w') as f:
+ f.write("""\
+[global]
+fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0
+mon host = host1
+""")
+
+ ns = argparse.Namespace()
+
+ conn_osd = mock.NonCallableMock(name='PushyClient-osd')
+ mock_compiled_osd = collections.defaultdict(mock.Mock)
+ #conn_osd.compile.side_effect = mock_compiled_osd.__getitem__
+ conn_osd.compile.return_value = mock.Mock(return_value='fakekeyring')
+
+ conn_mon = mock.NonCallableMock(name='PushyClient-mon')
+ mock_compiled_mon = collections.defaultdict(mock.Mock)
+ conn_mon.compile.side_effect = mock_compiled_mon.__getitem__
+
+ ns.pushy = mock.Mock(name='pushy namespace')
+
+ def _conn(url):
+ if url == 'ssh+sudo:host1':
+ return conn_mon
+ elif url == 'ssh+sudo:storehost1:sdc':
+ return conn_osd
+ else:
+ raise AssertionError('Unexpected connection url: %r', url)
+ ns.pushy.side_effect = _conn
+
+ BOOTSTRAP_KEY = 'fakekeyring'
+
+ mock_compiled_mon[osd.get_bootstrap_osd_key].side_effect = BOOTSTRAP_KEY
+
+ def _create_osd(cluster, find_key):
+ key = find_key()
+ assert key == BOOTSTRAP_KEY
+
+ mock_compiled_osd[osd.create_osd].side_effect = _create_osd
+
+ with directory(str(tmpdir)):
+ main(
+ args=['-v', 'gatherkeys', 'storehost1:sdc'],
+ namespace=ns,
+ )
+ main(
+ args=['-v', 'osd', 'prepare', 'storehost1:sdc'],
+ namespace=ns,
+ )
+ out, err = capsys.readouterr()
+ err = err.lower()
+ assert 'have ceph.mon.keyring' in err
+ assert 'have ceph.client.admin.keyring' in err
+ assert 'have ceph.bootstrap-osd.keyring' in err
+ assert 'got ceph.bootstrap-mds.keyring key from storehost1:sdc' in err
+ assert 'got ceph.bootstrap-osd.keyring key from storehost1:sdc' in err
--- /dev/null
+from cStringIO import StringIO
+from .. import conf
+
+
+def test_simple():
+ f = StringIO("""\
+[foo]
+bar = baz
+""")
+ cfg = conf.parse(f)
+ assert cfg.get('foo', 'bar') == 'baz'
+
+
+def test_indent_space():
+ f = StringIO("""\
+[foo]
+ bar = baz
+""")
+ cfg = conf.parse(f)
+ assert cfg.get('foo', 'bar') == 'baz'
+
+
+def test_indent_tab():
+ f = StringIO("""\
+[foo]
+\tbar = baz
+""")
+ cfg = conf.parse(f)
+ assert cfg.get('foo', 'bar') == 'baz'
+
+
+def test_words_underscore():
+ f = StringIO("""\
+[foo]
+bar_thud = baz
+""")
+ cfg = conf.parse(f)
+ assert cfg.get('foo', 'bar_thud') == 'baz'
+ assert cfg.get('foo', 'bar thud') == 'baz'
+
+
+def test_words_space():
+ f = StringIO("""\
+[foo]
+bar thud = baz
+""")
+ cfg = conf.parse(f)
+ assert cfg.get('foo', 'bar_thud') == 'baz'
+ assert cfg.get('foo', 'bar thud') == 'baz'
+
+
+def test_words_many():
+ f = StringIO("""\
+[foo]
+bar__ thud quux = baz
+""")
+ cfg = conf.parse(f)
+ assert cfg.get('foo', 'bar_thud_quux') == 'baz'
+ assert cfg.get('foo', 'bar thud quux') == 'baz'
--- /dev/null
+from pytest import raises
+
+from ceph_deploy import exc
+from ceph_deploy import hosts
+
+
+class TestNormalized(object):
+
+ def test_get_debian(self):
+ result = hosts._normalized_distro_name('Debian')
+ assert result == 'debian'
+
+ def test_get_ubuntu(self):
+ result = hosts._normalized_distro_name('Ubuntu')
+ assert result == 'ubuntu'
+
+ def test_get_suse(self):
+ result = hosts._normalized_distro_name('SUSE LINUX')
+ assert result == 'suse'
+
+ def test_get_redhat(self):
+ result = hosts._normalized_distro_name('RedHatEnterpriseLinux')
+ assert result == 'redhat'
+
+
+class TestGetDistro(object):
+
+ def test_get_debian(self):
+ result = hosts._get_distro('Debian')
+ assert result.__name__.endswith('debian')
+
+ def test_get_ubuntu(self):
+ # Ubuntu imports debian stuff
+ result = hosts._get_distro('Ubuntu')
+ assert result.__name__.endswith('debian')
+
+ def test_get_centos(self):
+ result = hosts._get_distro('CentOS')
+ assert result.__name__.endswith('centos')
+
+ def test_get_scientific(self):
+ result = hosts._get_distro('Scientific')
+ assert result.__name__.endswith('centos')
+
+ def test_get_redhat(self):
+ result = hosts._get_distro('RedHat')
+ assert result.__name__.endswith('centos')
+
+ def test_get_uknown(self):
+ with raises(exc.UnsupportedPlatform):
+ hosts._get_distro('Solaris')
+
+ def test_get_fallback(self):
+ result = hosts._get_distro('Solaris', 'Debian')
+ assert result.__name__.endswith('debian')
--- /dev/null
+import sys
+from mock import Mock, MagicMock, patch, call
+from ceph_deploy import mon
+from ceph_deploy.hosts.common import mon_create
+
+
+def path_exists(target_paths=None):
+ """
+ A quick helper that enforces a check for the existence of a path. Since we
+ are dealing with fakes, we allow to pass in a list of paths that are OK to
+ return True, otherwise return False.
+ """
+ target_paths = target_paths or []
+
+ def exists(path):
+ return path in target_paths
+ return exists
+
+
+def mock_open(mock=None, data=None):
+ """
+ Fake the behavior of `open` when used as a context manager
+ """
+ if mock is None:
+ mock = MagicMock(spec=file)
+
+ handle = MagicMock(spec=file)
+ handle.write.return_value = None
+ if data is None:
+ handle.__enter__.return_value = handle
+ else:
+ handle.__enter__.return_value = data
+ mock.return_value = handle
+ return mock
+
+
+class TestCreateMon(object):
+
+ def setup(self):
+ # this setup is way more verbose than normal
+ # but we are forced to because this function needs a lot
+ # passed in for remote execution. No other way around it.
+ self.socket = Mock()
+ self.socket.gethostname.return_value = 'hostname'
+ self.fake_write = Mock(name='fake_write')
+ self.fake_file = mock_open(data=self.fake_write)
+ self.fake_file.readline.return_value = self.fake_file
+ self.fake_file.readline.lstrip.return_value = ''
+ self.distro = Mock()
+ self.sprocess = Mock()
+ self.paths = Mock()
+ self.paths.mon.path = Mock(return_value='/cluster-hostname')
+ self.logger = Mock()
+ self.logger.info = self.logger.debug = lambda x: sys.stdout.write(str(x) + "\n")
+
+ def test_create_mon_tmp_path_if_nonexistent(self):
+ self.distro.sudo_conn.modules.os.path.exists = Mock(
+ side_effect=path_exists(['/cluster-hostname']))
+ self.paths.mon.constants.tmp_path = '/var/lib/ceph/tmp'
+ args = Mock(return_value=['cluster', '1234', 'initd'])
+ args.cluster = 'cluster'
+ with patch('ceph_deploy.hosts.common.conf.load'):
+ mon_create(self.distro, self.logger, args, Mock(), 'hostname')
+
+ result = self.distro.sudo_conn.modules.os.makedirs.call_args_list[-1]
+ assert result == call('/var/lib/ceph/tmp')
+
+ def test_create_mon_path_if_nonexistent(self):
+ self.distro.sudo_conn.modules.os.path.exists = Mock(
+ side_effect=path_exists(['/']))
+ args = Mock(return_value=['cluster', '1234', 'initd'])
+ args.cluster = 'cluster'
+ with patch('ceph_deploy.hosts.common.conf.load'):
+ mon_create(self.distro, self.logger, args, Mock(), 'hostname')
+
+ result = self.distro.sudo_conn.modules.os.makedirs.call_args_list[0]
+ assert result == call('/var/lib/ceph/mon/cluster-hostname')
+
+ def test_write_keyring(self):
+ self.distro.sudo_conn.modules.os.path.exists = Mock(
+ side_effect=path_exists(['/']))
+ args = Mock(return_value=['cluster', '1234', 'initd'])
+ args.cluster = 'cluster'
+ with patch('ceph_deploy.hosts.common.conf.load'):
+ with patch('ceph_deploy.hosts.common.remote') as fake_remote:
+ mon_create(self.distro, self.logger, args, Mock(), 'hostname')
+
+ # the second argument to `remote()` should be the write func
+ result = fake_remote.call_args_list[1][0][-1].__name__
+ assert result == 'write_monitor_keyring'
+
+ def test_write_done_path(self):
+ self.distro.sudo_conn.modules.os.path.exists = Mock(
+ side_effect=path_exists(['/']))
+ args = Mock(return_value=['cluster', '1234', 'initd'])
+ args.cluster = 'cluster'
+
+ with patch('ceph_deploy.hosts.common.conf.load'):
+ with patch('ceph_deploy.hosts.common.remote') as fake_remote:
+ mon_create(self.distro, self.logger, args, Mock(), 'hostname')
+
+ # the second to last argument to `remote()` should be the done path
+ # write
+ result = fake_remote.call_args_list[-2][0][-1].__name__
+ assert result == 'create_done_path'
+
+ def test_write_init_path(self):
+ self.distro.sudo_conn.modules.os.path.exists = Mock(
+ side_effect=path_exists(['/']))
+ args = Mock(return_value=['cluster', '1234', 'initd'])
+ args.cluster = 'cluster'
+
+ with patch('ceph_deploy.hosts.common.conf.load'):
+ with patch('ceph_deploy.hosts.common.remote') as fake_remote:
+ mon_create(self.distro, self.logger, args, Mock(), 'hostname')
+
+ result = fake_remote.call_args_list[-1][0][-1].__name__
+ assert result == 'create_init_path'
+
+
+class TestIsRunning(object):
+
+ def setup(self):
+ self.fake_popen = Mock()
+ self.fake_popen.return_value = self.fake_popen
+
+ def test_is_running_centos(self):
+ centos_out = ['', "mon.mire094: running {'version': '0.6.15'}"]
+ self.fake_popen.communicate = Mock(return_value=centos_out)
+ with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+ result = mon.is_running(['ceph', 'status'])
+ assert result is True
+
+ def test_is_not_running_centos(self):
+ centos_out = ['', "mon.mire094: not running {'version': '0.6.15'}"]
+ self.fake_popen.communicate = Mock(return_value=centos_out)
+ with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+ result = mon.is_running(['ceph', 'status'])
+ assert result is False
+
+ def test_is_dead_centos(self):
+ centos_out = ['', "mon.mire094: dead {'version': '0.6.15'}"]
+ self.fake_popen.communicate = Mock(return_value=centos_out)
+ with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+ result = mon.is_running(['ceph', 'status'])
+ assert result is False
+
+ def test_is_running_ubuntu(self):
+ ubuntu_out = ['', "ceph-mon (ceph/mira103) start/running, process 5866"]
+ self.fake_popen.communicate = Mock(return_value=ubuntu_out)
+ with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+ result = mon.is_running(['ceph', 'status'])
+ assert result is True
+
+ def test_is_not_running_ubuntu(self):
+ ubuntu_out = ['', "ceph-mon (ceph/mira103) start/dead, process 5866"]
+ self.fake_popen.communicate = Mock(return_value=ubuntu_out)
+ with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+ result = mon.is_running(['ceph', 'status'])
+ assert result is False
+
+ def test_is_dead_ubuntu(self):
+ ubuntu_out = ['', "ceph-mon (ceph/mira103) stop/not running, process 5866"]
+ self.fake_popen.communicate = Mock(return_value=ubuntu_out)
+ with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+ result = mon.is_running(['ceph', 'status'])
+ assert result is False
--- /dev/null
+import socket
+from mock import Mock
+from argparse import ArgumentError
+from pytest import raises
+
+from ceph_deploy.util import arg_validators
+
+
+class TestRegexMatch(object):
+
+ def test_match_raises(self):
+ validator = arg_validators.RegexMatch(r'\d+')
+ with raises(ArgumentError):
+ validator('1')
+
+ def test_match_passes(self):
+ validator = arg_validators.RegexMatch(r'\d+')
+ assert validator('foo') == 'foo'
+
+ def test_default_error_message(self):
+ validator = arg_validators.RegexMatch(r'\d+')
+ with raises(ArgumentError) as error:
+ validator('1')
+ message = error.value.message
+ assert message == 'must match pattern \d+'
+
+ def test_custom_error_message(self):
+ validator = arg_validators.RegexMatch(r'\d+', 'wat')
+ with raises(ArgumentError) as error:
+ validator('1')
+ message = error.value.message
+ assert message == 'wat'
+
+
+class TestHostName(object):
+
+ def setup(self):
+ self.fake_sock = Mock()
+ self.fake_sock.gaierror = socket.gaierror
+ self.fake_sock.gethostbyname.side_effect = socket.gaierror
+
+ def test_hostname_is_not_resolvable(self):
+ hostname = arg_validators.Hostname(self.fake_sock)
+ with raises(ArgumentError) as error:
+ hostname('unresolvable')
+ message = error.value.message
+ assert 'is not resolvable' in message
+
+ def test_hostname_with_name_is_not_resolvable(self):
+ hostname = arg_validators.Hostname(self.fake_sock)
+ with raises(ArgumentError) as error:
+ hostname('name:foo')
+ message = error.value.message
+ assert 'foo is not resolvable' in message
+
+ def test_ip_is_not_resolvable(self):
+ self.fake_sock.gethostbyname = Mock(return_value='192.168.1.111')
+ hostname = arg_validators.Hostname(self.fake_sock)
+ with raises(ArgumentError) as error:
+ hostname('name:192.168.1.111')
+ message = error.value.message
+ assert 'must be a hostname not an IP' in message
+
+ def test_host_is_resolvable(self):
+ self.fake_sock.gethostbyname = Mock()
+ hostname = arg_validators.Hostname(self.fake_sock)
+ result = hostname('name:example.com')
+ assert result == 'name:example.com'
--- /dev/null
+from ceph_deploy.util import constants
+
+
+class TestPaths(object):
+
+ def test_mon_path(self):
+ assert constants.mon_path.startswith('/')
+ assert constants.mon_path.endswith('/mon')
+
+ def test_mds_path(self):
+ assert constants.mds_path.startswith('/')
+ assert constants.mds_path.endswith('/mds')
+
+ def test_tmp_path(self):
+ assert constants.tmp_path.startswith('/')
+ assert constants.tmp_path.endswith('/tmp')
--- /dev/null
+from ceph_deploy.util import paths
+
+
+class TestMonPaths(object):
+
+ def test_base_path(self):
+ result = paths.mon.base('mycluster')
+ assert result.endswith('/mycluster-')
+
+ def test_path(self):
+ result = paths.mon.path('mycluster', 'myhostname')
+ assert result.startswith('/')
+ assert result.endswith('/mycluster-myhostname')
+
+ def test_done(self):
+ result = paths.mon.done('mycluster', 'myhostname')
+ assert result.startswith('/')
+ assert result.endswith('mycluster-myhostname/done')
+
+ def test_init(self):
+ result = paths.mon.init('mycluster', 'myhostname', 'init')
+ assert result.startswith('/')
+ assert result.endswith('mycluster-myhostname/init')
+
+ def test_keyring(self):
+ result = paths.mon.keyring('mycluster', 'myhostname')
+ assert result.startswith('/')
+ assert result.endswith('tmp/mycluster-myhostname.mon.keyring')
+
--- /dev/null
+import socket
+import argparse
+import re
+
+
+class RegexMatch(object):
+ """
+ Performs regular expression match on value.
+ If the regular expression pattern matches it will it will return an error
+ message that will work with argparse.
+ """
+
+ def __init__(self, pattern, statement=None):
+ self.string_pattern = pattern
+ self.pattern = re.compile(pattern)
+ self.statement = statement
+ if not self.statement:
+ self.statement = "must match pattern %s" % self.string_pattern
+
+ def __call__(self, string):
+ match = self.pattern.search(string)
+ if match:
+ raise argparse.ArgumentError(None, self.statement)
+ return string
+
+
+class Hostname(object):
+ """
+ Checks wether a given hostname is resolvable in DNS, otherwise raising and
+ argparse error.
+ """
+
+ def __init__(self, _socket=None):
+ self.socket = _socket or socket # just used for testing
+
+ def __call__(self, string):
+ host = string.split(':')[-1] # we might have name:host
+ try:
+ resolved_addr = self.socket.gethostbyname(host)
+ except self.socket.gaierror:
+ msg = "hostname: %s is not resolvable" % host
+ raise argparse.ArgumentError(None, msg)
+
+ if resolved_addr == host:
+ msg = "%s must be a hostname not an IP" % host
+ raise argparse.ArgumentError(None, msg)
+
+ return string
--- /dev/null
+from os.path import join
+
+# Base Path for ceph
+base_path = '/var/lib/ceph'
+
+tmp_path = join(base_path, 'tmp')
+
+mon_path = join(base_path, 'mon')
+
+mds_path = join(base_path, 'mds')
--- /dev/null
+import StringIO
+from ceph_deploy.util.decorators import remote_compile
+
+
+class remote(object):
+ """
+ Context manager for capturing all stdout, stderr on a remote client by
+ monkeypatching pushy's ``sys.stdout`` and ``sys.stderr`` modules when
+ executing remotely.
+
+ It will take care of compiling the function passed in so that the only
+ action left to the user of this context manager is to call that function
+ with whatever arguments are necessary.
+
+ For example::
+
+ with remote(client, logger, my_func) as remote_func:
+ remote_func(my_arg, my_other_arg)
+
+
+ At exit, it will use the logger instance to report errors (from captured
+ stderr) or info messages (from stdout).
+ """
+
+ def __init__(self, client, logger, func, mangle_exc=True, patch=True):
+ self.client = client
+ self.logger = logger
+ self.func = func
+ self.description = getattr(func, 'func_doc')
+ self.mangle_exc = mangle_exc
+ self.patch = patch
+
+ def __enter__(self):
+ if self.patch:
+ self.stdout = self.client.modules.sys.stdout
+ self.stderr = self.client.modules.sys.stderr
+
+ self.client.modules.sys.stdout = StringIO.StringIO()
+ self.client.modules.sys.stderr = StringIO.StringIO()
+ if self.description:
+ self.logger.info(self.description.strip())
+ return remote_compile(self.client, self.func)
+
+ def __exit__(self, e_type, e_val, e_traceback):
+ if self.patch:
+ stdout_lines = self.client.modules.sys.stdout.getvalue()
+ stderr_lines = self.client.modules.sys.stderr.getvalue()
+ self.write_log(stdout_lines, 'info')
+ self.write_log(stderr_lines, 'error')
+
+ # leave everything as it was
+ self.client.modules.sys.stdout = self.stdout
+ self.client.modules.sys.stdout = self.stderr
+ if not self.mangle_exc:
+ return False
+
+ if e_type is not None:
+ if hasattr(e_val, 'remote_traceback'):
+ for line in e_val.remote_traceback:
+ if line:
+ self.logger.error(line)
+ return True # So that we eat up the traceback
+
+ def write_log(self, lines, log_level):
+ logger = getattr(self.logger, log_level)
+ for line in lines.split('\n'):
+ if line:
+ logger(line)
--- /dev/null
+import logging
+import sys
+from functools import wraps
+
+
+def remote_compile(client, fn):
+ def outer(fn):
+ from functools import wraps
+ @wraps(fn)
+ def inner(*args, **kwargs):
+ class RemoteException(Exception):
+
+ def __init__(self, remote_traceback, err):
+ self.err = err
+ self.remote_traceback = remote_traceback
+
+ try:
+ fn(*args, **kwargs)
+ except Exception as err:
+ import traceback
+ remote_trace = traceback.format_exc()
+ raise RemoteException(remote_trace.split('\n'), err)
+ return inner
+ return client.compile(outer)(client.compile(fn))
+
+
+def catches(catch=None, handler=None, exit=True):
+ """
+ Very simple decorator that tries any of the exception(s) passed in as
+ a single exception class or tuple (containing multiple ones) returning the
+ exception message and optionally handling the problem if it raises with the
+ handler if it is provided.
+
+ So instead of doing something like this::
+
+ def bar():
+ try:
+ some_call()
+ print "Success!"
+ except TypeError, exc:
+ print "Error while handling some call: %s" % exc
+ sys.exit(1)
+
+ You would need to decorate it like this to have the same effect::
+
+ @catches(TypeError)
+ def bar():
+ some_call()
+ print "Success!"
+
+ If multiple exceptions need to be caught they need to be provided as a
+ tuple::
+
+ @catches((TypeError, AttributeError))
+ def bar():
+ some_call()
+ print "Success!"
+
+ If adding a handler, it should accept a single argument, which would be the
+ exception that was raised, it would look like::
+
+ def my_handler(exc):
+ print 'Handling exception %s' % str(exc)
+ raise SystemExit
+
+ @catches(KeyboardInterrupt, handler=my_handler)
+ def bar():
+ some_call()
+
+ Note that the handler needs to raise its SystemExit if it wants to halt
+ execution, otherwise the decorator would continue as a normal try/except
+ block.
+
+ """
+ catch = catch or Exception
+ logger = logging.getLogger('ceph_deploy')
+
+ def decorate(f):
+
+ @wraps(f)
+ def newfunc(*a, **kw):
+ try:
+ return f(*a, **kw)
+ except catch as e:
+ if handler:
+ return handler(e)
+ else:
+ logger.error(make_exception_message(e))
+ if exit:
+ sys.exit(1)
+ return newfunc
+
+ return decorate
+
+#
+# Decorator helpers
+#
+
+
+def make_exception_message(exc):
+ """
+ An exception is passed in and this function
+ returns the proper string depending on the result
+ so it is readable enough.
+ """
+ if str(exc):
+ return '%s: %s\n' % (exc.__class__.__name__, exc)
+ else:
+ return '%s\n' % (exc.__class__.__name__)
+
--- /dev/null
+import logging
+
+BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
+
+COLORS = {
+ 'WARNING': YELLOW,
+ 'INFO': WHITE,
+ 'DEBUG': BLUE,
+ 'CRITICAL': RED,
+ 'ERROR': RED
+}
+
+RESET_SEQ = "\033[0m"
+COLOR_SEQ = "\033[1;%dm"
+BOLD_SEQ = "\033[1m"
+
+BASE_COLOR_FORMAT = "[$BOLD%(name)s$RESET][%(color_levelname)-17s] %(message)s"
+BASE_FORMAT = "%(asctime)s [%(name)s][%(levelname)-6s] %(message)s"
+
+
+def color_message(message):
+ message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
+ return message
+
+
+class ColoredFormatter(logging.Formatter):
+ """
+ A very basic logging formatter that not only applies color to the levels of
+ the ouput but will also truncate the level names so that they do not alter
+ the visuals of logging when presented on the terminal.
+ """
+
+ def __init__(self, msg):
+ logging.Formatter.__init__(self, msg)
+
+ def format(self, record):
+ levelname = record.levelname
+ truncated_level = record.levelname[:6]
+ if levelname in COLORS:
+ levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + truncated_level + RESET_SEQ
+ record.color_levelname = levelname_color
+ return logging.Formatter.format(self, record)
+
+
+def color_format():
+ """
+ Main entry point to get a colored formatter, it will use the
+ BASE_FORMAT by default.
+ """
+ color_format = color_message(BASE_COLOR_FORMAT)
+ return ColoredFormatter(color_format)
--- /dev/null
+import mon
--- /dev/null
+"""
+Common paths for mon, based on the constant file paths defined in
+``ceph_deploy.util.constants``.
+All functions return a string representation of the absolute path
+construction.
+"""
+from os.path import join
+
+from ceph_deploy.util import constants
+
+
+def base(cluster):
+ cluster = "%s-" % cluster
+ return join(constants.mon_path, cluster)
+
+
+def path(cluster, hostname):
+ """
+ Example usage::
+
+ >>> mon.path('mycluster', 'hostname')
+ /var/lib/ceph/mon/mycluster-myhostname
+ """
+ return "%s%s" % (base(cluster), hostname)
+
+
+def done(cluster, hostname):
+ """
+ Example usage::
+
+ >>> mon.done('mycluster', 'hostname')
+ /var/lib/ceph/mon/mycluster-myhostname/done
+ """
+ return join(path(cluster, hostname), 'done')
+
+
+def init(cluster, hostname, init):
+ """
+ Example usage::
+
+ >>> mon.init('mycluster', 'hostname', 'init')
+ /var/lib/ceph/mon/mycluster-myhostname/init
+ """
+ return join(path(cluster, hostname), init)
+
+
+def keyring(cluster, hostname):
+ """
+ Example usage::
+
+ >>> mon.keyring('mycluster', 'myhostname')
+ /var/lib/ceph/tmp/mycluster-myhostname.mon.keyring
+ """
+ keyring_file = '%s-%s.mon.keyring' % (cluster, hostname)
+ return join(constants.tmp_path, keyring_file)
--- /dev/null
+from ceph_deploy.util import wrappers
+
+
+def apt(conn, logger, package, *a, **kw):
+ cmd = [
+ 'env',
+ 'DEBIAN_FRONTEND=noninteractive',
+ 'apt-get',
+ '-q',
+ 'install',
+ '--assume-yes',
+ package,
+ ]
+ return wrappers.check_call(
+ conn,
+ logger,
+ cmd,
+ *a,
+ **kw
+ )
+
+
+def apt_update(conn, logger):
+ cmd = [
+ 'apt-get',
+ '-q',
+ 'update',
+ ]
+ return wrappers.check_call(
+ conn,
+ logger,
+ cmd,
+ )
+
+
+def yum(conn, logger, package, *a, **kw):
+ cmd = [
+ 'yum',
+ '-y',
+ '-q',
+ 'install',
+ package,
+ ]
+ return wrappers.check_call(
+ conn,
+ logger,
+ cmd,
+ *a,
+ **kw
+ )
+
+
+def rpm(conn, logger, rpm_args=None, *a, **kw):
+ """
+ A minimal front end for ``rpm`. Extra flags can be passed in via
+ ``rpm_args`` as an iterable.
+ """
+ rpm_args = rpm_args or []
+ cmd = [
+ 'rpm',
+ '-Uvh',
+ ]
+ cmd.extend(rpm_args)
+ return wrappers.check_call(
+ conn,
+ logger,
+ cmd,
+ *a,
+ **kw
+ )
--- /dev/null
+"""
+In a lot of places we need to make system calls, mainly through subprocess.
+Here we define them and reuse them with the added functionality of getting
+logging and remote execution.
+
+This allows us to only remote-execute the actual calls, not whole functions.
+"""
+from ceph_deploy.util.decorators import remote_compile
+from ceph_deploy.util import context
+
+
+def check_call(conn, logger, args, *a, **kw):
+ """
+ Wraps ``subprocess.check_call`` for a remote call via ``pushy``
+ doing all the capturing and logging nicely upon failure/success
+
+ The mangling of the traceback when an exception ocurrs, is because the
+ caller gets eating up by not being executed in the actual function of
+ a given module (e.g. ``centos/install.py``) but rather here, where the
+ stack trace is no longer relevant.
+
+ :param args: The args to be passed onto ``check_call``
+ """
+ command = ' '.join(args)
+ patch = kw.pop('patch', True) # Always patch unless explicitly told to
+ logger.info('Running command: %s' % command)
+
+ def remote_call(args, *a, **kw):
+ import subprocess
+ subprocess.check_call(
+ args,
+ *a,
+ **kw
+ )
+
+ with context.remote(conn, logger, remote_call, mangle_exc=False, patch=patch) as call:
+ try:
+ return call(args, *a, **kw)
+ except Exception as err:
+ import inspect
+ stack = inspect.getframeinfo(inspect.currentframe().f_back)
+ if hasattr(err, 'remote_traceback'):
+ logger.error('Traceback (most recent call last):')
+ logger.error(' File "%s", line %s, in %s' % (
+ stack[0],
+ stack[1],
+ stack[2])
+ )
+ err.remote_traceback.pop(0)
+ for line in err.remote_traceback:
+ if line:
+ logger.error(line)
+ raise RuntimeError('Failed to execute command: %s' % ' '.join(args))
+ else:
+ raise err
--- /dev/null
+import argparse
+import re
+
+
+ALPHANUMERIC_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*$')
+
+
+def alphanumeric(s):
+ """
+ Enforces string to be alphanumeric with leading alpha.
+ """
+ if not ALPHANUMERIC_RE.match(s):
+ raise argparse.ArgumentTypeError(
+ 'argument must start with a letter and contain only letters and numbers',
+ )
+ return s
--- /dev/null
+./scripts/ceph-deploy /usr/bin
--- /dev/null
+ceph-deploy (1.2.1-1) precise; urgency=low
+
+ * New upstream release
+
+ -- Gary Lowell <gary.lowell@inktank.com> Thu, 15 Aug 2013 15:19:33 -0700
+
+ceph-deploy (1.2-1) precise; urgency=low
+
+ * New upstream release
+
+ -- Gary Lowell <gary.lowell@inktank.com> Mon, 12 Aug 2013 16:59:09 -0700
+
+ceph-deploy (1.1-1) precise; urgency=low
+
+ * New upstream release
+
+ -- Gary Lowell <gary.lowell@inktank.com> Tue, 18 Jun 2013 11:07:00 -0700
+
+ceph-deploy (1.0-1) stable; urgency=low
+
+ * New upstream release
+
+ -- Gary Lowell <gary.lowell@inktank.com> Fri, 24 May 2013 11:57:40 +0800
+
+ceph-deploy (0.0.1-1) unstable; urgency=low
+
+ * Initial release.
+
+ -- Gary Lowell <gary.lowell@inktank.com> Mon, 10 Mar 2013 18:38:40 +0800
--- /dev/null
+Source: ceph-deploy
+Maintainer: Sage Weil <sage@newdream.net>
+Uploaders: Sage Weil <sage@newdream.net>
+Section: admin
+Priority: optional
+Build-Depends: debhelper (>= 7), python-setuptools
+X-Python-Version: >= 2.4
+Standards-Version: 3.9.2
+Homepage: http://ceph.com/
+Vcs-Git: git://github.com/ceph/ceph-deploy.git
+Vcs-Browser: https://github.com/ceph/ceph-deploy
+
+Package: ceph-deploy
+Architecture: all
+Depends: python,
+ python-argparse,
+ python-pushy,
+ python-setuptools,
+ ${misc:Depends},
+ ${python:Depends}
+Description: Ceph-deploy is an easy to use configuration tool
+ for the Ceph distributed storage system.
+ .
+ This package includes the programs and libraries to support
+ simple ceph cluster deployment.
+
--- /dev/null
+Files: *
+Copyright: (c) 2004-2012 by Sage Weil <sage@newdream.net>
+License: LGPL2.1 (see /usr/share/common-licenses/LGPL-2.1)
--- /dev/null
+#!/usr/bin/make -f
+
+# Uncomment this to turn on verbose mode.
+export DH_VERBOSE=1
+@export DEB_PYTHON_INSTALL_ARGS_ALL += --install-lib=/usr/share/ceph-deploy
+
+%:
+ dh $@ --buildsystem python_distutils --with python2
+
--- /dev/null
+pytest >=2.1.3
+tox >=1.2
+mock >=1.0b1
--- /dev/null
+pushy >=0.5.1
--- /dev/null
+#! /bin/sh
+
+# Tag tree and update version number in change log and
+# in setup.py before building.
+
+REPO=debian-repo
+COMPONENT=main
+KEYID=${KEYID:-03C3951A} # default is autobuild keyid
+DEB_DIST="sid wheezy squeeze quantal precise oneiric natty raring"
+DEB_BUILD=$(lsb_release -s -c)
+RELEASE=0
+
+if [ X"$1" = X"--release" ] ; then
+ echo "Release Build"
+ RELEASE=1
+fi
+
+if [ ! -d debian ] ; then
+ echo "Are we in the right directory"
+ exit 1
+fi
+
+if gpg --list-keys 2>/dev/null | grep -q ${KEYID} ; then
+ echo "Signing packages and repo with ${KEYID}"
+else
+ echo "Package signing key (${KEYID}) not found"
+ echo "Have you set \$GNUPGHOME ? "
+ exit 3
+fi
+
+# Clean up any leftover builds
+rm -f ../ceph-deploy*.dsc ../ceph-deploy*.changes ../ceph-deploy*.deb ../ceph-deploy.tgz
+rm -rf ./debian-repo
+
+# Apply backport tag if release build
+if [ $RELEASE -eq 1 ] ; then
+ DEB_VERSION=$(dpkg-parsechangelog | sed -rne 's,^Version: (.*),\1, p')
+ BP_VERSION=${DEB_VERSION}${BPTAG}
+ DEBEMAIL="gary.lowell@inktank.com" dch -D $DIST --force-distribution -b -v "$BP_VERSION" "$comment"
+ dpkd-source -b .
+fi
+
+# Build Package
+echo "Building for dist: $DEB_BUILD"
+dpkg-buildpackage -k$KEYID
+if [ $? -ne 0 ] ; then
+ echo "Build failed"
+ exit 2
+fi
+
+# Build Repo
+PKG=../ceph-deploy*.changes
+mkdir -p $REPO/conf
+if [ -e $REPO/conf/distributions ] ; then
+ rm -f $REPO/conf/distributions
+fi
+
+for DIST in $DEB_DIST ; do
+ cat <<EOF >> $REPO/conf/distributions
+Codename: $DIST
+Suite: stable
+Components: $COMPONENT
+Architectures: amd64 armhf i386 source
+Origin: Inktank
+Description: Ceph distributed file system
+DebIndices: Packages Release . .gz .bz2
+DscIndices: Sources Release .gz .bz2
+Contents: .gz .bz2
+SignWith: $KEYID
+
+EOF
+done
+
+echo "Adding package to repo, dist: $DEB_BUILD ($PKG)"
+reprepro --ask-passphrase -b $REPO -C $COMPONENT --ignore=undefinedtarget --ignore=wrongdistribution include $DEB_BUILD $PKG
+
+#for DIST in $DEB_DIST
+#do
+# [ "$DIST" = "$DEB_BUILD" ] && continue
+# echo "Copying package to dist: $DIST"
+# reprepro -b $REPO --ignore=undefinedtarget --ignore=wrongdistribution copy $DIST $DEB_BUILD ceph-deploy
+#done
+
+echo "Done"
--- /dev/null
+#! /bin/sh
+
+# Tag tree and update version number in change log and
+# in setup.py before building.
+
+REPO=rpm-repo
+KEYID=${KEYID:-03C3951A} # Default is autobuild-key
+BUILDAREA=./rpmbuild
+DIST=el6
+RPM_BUILD=$(lsb_release -s -c)
+
+if [ ! -e setup.py ] ; then
+ echo "Are we in the right directory"
+ exit 1
+fi
+
+if gpg --list-keys 2>/dev/null | grep -q ${KEYID} ; then
+ echo "Signing packages and repo with ${KEYID}"
+else
+ echo "Package signing key (${KEYID}) not found"
+ echo "Have you set \$GNUPGHOME ? "
+ exit 3
+fi
+
+if ! CREATEREPO=`which createrepo` ; then
+ echo "Please install the createrepo package"
+ exit 4
+fi
+
+# Create Tarball
+python setup.py sdist --formats=bztar
+
+# Build RPM
+mkdir -p rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
+BUILDAREA=`readlink -fn ${BUILDAREA}` ### rpm wants absolute path
+cp ceph-deploy.spec ${BUILDAREA}/SPECS
+cp dist/*.tar.bz2 ${BUILDAREA}/SOURCES
+echo "buildarea is: ${BUILDAREA}"
+rpmbuild -ba --define "_topdir ${BUILDAREA}" --define "_unpackaged_files_terminate_build 0" ${BUILDAREA}/SPECS/ceph-deploy.spec
+
+# create repo
+DEST=${REPO}/${DIST}
+mkdir -p ${REPO}/${DIST}
+cp -r ${BUILDAREA}/*RPMS ${DEST}
+
+# Sign all the RPMs for this release
+rpm_list=`find ${REPO} -name "*.rpm" -print`
+rpm --addsign --define "_gpg_name ${KEYID}" $rpm_list
+
+# Construct repodata
+for dir in ${DEST}/SRPMS ${DEST}/RPMS/*
+do
+ if [ -d $dir ] ; then
+ createrepo $dir
+ gpg --detach-sign --armor -u ${KEYID} $dir/repodata/repomd.xml
+ fi
+done
+
+exit 0
--- /dev/null
+#!/usr/bin/env python
+import os
+import platform
+import sys
+"""
+ceph-deploy - admin tool for ceph
+"""
+
+if os.path.exists('/usr/share/pyshared/ceph_deploy'):
+ sys.path.insert(0,'/usr/share/pyshared/ceph_deploy')
+elif os.path.exists('/usr/share/ceph-deploy'):
+ sys.path.insert(0,'/usr/share/ceph-deploy')
+elif os.path.exists('/usr/share/pyshared/ceph-deploy'):
+ sys.path.insert(0,'/usr/share/pyshared/ceph-deploy')
+elif os.path.exists('/usr/lib/python2.6/site-packages/ceph_deploy'):
+ sys.path.insert(0,'/usr/lib/python2.6/site-packages/ceph_deploy')
+
+from ceph_deploy.cli import main
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+[pytest]
+norecursedirs = .* _* virtualenv
--- /dev/null
+from setuptools import setup, find_packages
+import os
+import sys
+import ceph_deploy
+
+def read(fname):
+ path = os.path.join(os.path.dirname(__file__), fname)
+ f = open(path)
+ return f.read()
+
+install_requires = []
+pyversion = sys.version_info[:2]
+if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1):
+ install_requires.append('argparse')
+
+setup(
+ name='ceph-deploy',
+ version=ceph_deploy.__version__,
+ packages=find_packages(),
+
+ author='Inktank',
+ author_email='ceph-devel@vger.kernel.org',
+ description='Deploy Ceph with minimal infrastructure',
+ long_description=read('README.rst'),
+ license='MIT',
+ keywords='ceph deploy',
+ url="https://github.com/ceph/ceph-deploy",
+
+ install_requires=[
+ 'setuptools',
+ 'pushy >=0.5.2',
+ ] + install_requires,
+
+ tests_require=[
+ 'pytest >=2.1.3',
+ 'mock >=1.0b1',
+ ],
+
+ entry_points={
+
+ 'console_scripts': [
+ 'ceph-deploy = ceph_deploy.cli:main',
+ ],
+
+ 'ceph_deploy.cli': [
+ 'new = ceph_deploy.new:make',
+ 'install = ceph_deploy.install:make',
+ 'uninstall = ceph_deploy.install:make_uninstall',
+ 'purge = ceph_deploy.install:make_purge',
+ 'purgedata = ceph_deploy.install:make_purge_data',
+ 'mon = ceph_deploy.mon:make',
+ 'gatherkeys = ceph_deploy.gatherkeys:make',
+ 'osd = ceph_deploy.osd:make',
+ 'disk = ceph_deploy.osd:make_disk',
+ 'mds = ceph_deploy.mds:make',
+ 'forgetkeys = ceph_deploy.forgetkeys:make',
+ 'config = ceph_deploy.config:make',
+ 'admin = ceph_deploy.admin:make',
+ ],
+
+ },
+ )
--- /dev/null
+[tox]
+envlist = py26, py27
+
+[testenv]
+deps=
+ pytest
+ mock
+commands=py.test -v {posargs:ceph_deploy}