From 543aefdfd02e517f0f0c0a2f426ad5653f5872d8 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 13 Jun 2013 17:44:25 -0700 Subject: [PATCH] install: error out if installing ceph-release rpm fails This is most noticeable when doing --dev=branch when the branch is not build and does not exist. Signed-off-by: Sage Weil --- .gitignore | 18 + LICENSE | 19 + MANIFEST.in | 3 + README.rst | 200 ++++++++++ bootstrap | 42 ++ ceph-deploy.spec | 83 ++++ ceph_deploy/__init__.py | 0 ceph_deploy/admin.py | 74 ++++ ceph_deploy/cli.py | 118 ++++++ ceph_deploy/cliutil.py | 8 + ceph_deploy/conf.py | 56 +++ ceph_deploy/config.py | 104 +++++ ceph_deploy/exc.py | 74 ++++ ceph_deploy/forgetkeys.py | 36 ++ ceph_deploy/gatherkeys.py | 86 ++++ ceph_deploy/install.py | 574 +++++++++++++++++++++++++++ ceph_deploy/lsb.py | 97 +++++ ceph_deploy/mds.py | 230 +++++++++++ ceph_deploy/memoize.py | 26 ++ ceph_deploy/misc.py | 11 + ceph_deploy/mon.py | 264 ++++++++++++ ceph_deploy/new.py | 136 +++++++ ceph_deploy/osd.py | 558 ++++++++++++++++++++++++++ ceph_deploy/sudo_pushy.py | 50 +++ ceph_deploy/test/__init__.py | 0 ceph_deploy/test/conftest.py | 98 +++++ ceph_deploy/test/directory.py | 13 + ceph_deploy/test/test_cli.py | 60 +++ ceph_deploy/test/test_cli_install.py | 89 +++++ ceph_deploy/test/test_cli_mon.py | 112 ++++++ ceph_deploy/test/test_cli_new.py | 110 +++++ ceph_deploy/test/test_cli_osd.py | 137 +++++++ ceph_deploy/test/test_conf.py | 59 +++ ceph_deploy/validate.py | 16 + debian/ceph-deploy.install | 1 + debian/changelog | 11 + debian/compat | 1 + debian/control | 26 ++ debian/copyright | 3 + debian/rules | 9 + debian/source/format | 1 + requirements-dev.txt | 3 + requirements.txt | 1 + scripts/build-debian.sh | 66 +++ scripts/build-rpm.sh | 59 +++ scripts/ceph-deploy | 21 + setup.cfg | 2 + setup.py | 63 +++ tox.ini | 8 + 49 files changed, 3836 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100755 bootstrap create mode 100644 ceph-deploy.spec create mode 100644 ceph_deploy/__init__.py create mode 100644 ceph_deploy/admin.py create mode 100644 ceph_deploy/cli.py create mode 100644 ceph_deploy/cliutil.py create mode 100644 ceph_deploy/conf.py create mode 100644 ceph_deploy/config.py create mode 100644 ceph_deploy/exc.py create mode 100644 ceph_deploy/forgetkeys.py create mode 100644 ceph_deploy/gatherkeys.py create mode 100644 ceph_deploy/install.py create mode 100644 ceph_deploy/lsb.py create mode 100644 ceph_deploy/mds.py create mode 100644 ceph_deploy/memoize.py create mode 100644 ceph_deploy/misc.py create mode 100644 ceph_deploy/mon.py create mode 100644 ceph_deploy/new.py create mode 100644 ceph_deploy/osd.py create mode 100644 ceph_deploy/sudo_pushy.py create mode 100644 ceph_deploy/test/__init__.py create mode 100644 ceph_deploy/test/conftest.py create mode 100644 ceph_deploy/test/directory.py create mode 100644 ceph_deploy/test/test_cli.py create mode 100644 ceph_deploy/test/test_cli_install.py create mode 100644 ceph_deploy/test/test_cli_mon.py create mode 100644 ceph_deploy/test/test_cli_new.py create mode 100644 ceph_deploy/test/test_cli_osd.py create mode 100644 ceph_deploy/test/test_conf.py create mode 100644 ceph_deploy/validate.py create mode 100644 debian/ceph-deploy.install create mode 100644 debian/changelog create mode 100644 debian/compat create mode 100644 debian/control create mode 100644 debian/copyright create mode 100755 debian/rules create mode 100644 debian/source/format create mode 100644 requirements-dev.txt create mode 100644 requirements.txt create mode 100755 scripts/build-debian.sh create mode 100755 scripts/build-rpm.sh create mode 100755 scripts/ceph-deploy create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 tox.ini diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9594060 --- /dev/null +++ b/.gitignore @@ -0,0 +1,18 @@ +*~ +.#* +## the next line needs to start with a backslash to avoid looking like +## a comment +\#*# +.*.swp + +*.pyc +*.pyo +*.egg-info +/build +/dist + +/virtualenv +/.tox + +/ceph-deploy +/*.conf diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..26624cf --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012 Inktank Storage, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..cf766b8 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,3 @@ +include *.rst +include LICENSE +prune ceph_deploy/test diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..a17808a --- /dev/null +++ b/README.rst @@ -0,0 +1,200 @@ +======================================================== + ceph-deploy -- Deploy Ceph with minimal infrastructure +======================================================== + +``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to +the servers, ``sudo``, and some Python. It runs fully on your +workstation, requiring no servers, databases, or anything like that. + +If you set up and tear down Ceph clusters a lot, and want minimal +extra bureaucracy, this is for you. + +It is not a generic deployment system, it is only for Ceph, and is designed +for users who want to quickly get Ceph running with sensible initial settings +without the overhead of installing Chef, Puppet or Juju. + +It does not handle client configuration beyond pushing the Ceph config file +and users who want fine-control over security settings, partitions or directory +locations should use a tool such as Chef or Puppet. + +Setup +===== + +To get the source tree ready for use, run this once:: + + ./bootstrap + +You can symlink the ``ceph-deploy`` script in this somewhere +convenient (like ``~/bin``), or add the current directory to ``PATH``, +or just always type the full path to ``ceph-deploy``. + +ceph-deploy at a minimum requires that the machine from which the script is +being run can ssh as root without password into each Ceph node. + +To enable this generate a new ssh keypair for the root user with no passphrase +and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in: + + /root/.ssh/authorized_keys + +and ensure that the following lines are in the sshd config: + + PermitRootLogin yes + PermitEmptyPasswords yes + +The machine running ceph-deploy does not need to have the Ceph packages installed +unless it needs to admin the cluster directly using the ``ceph`` command line tool. + +Managing an existing cluster +============================ + +You can use ceph-deploy to provision nodes for an existing cluster. +To grab a copy of the cluster configuration file (normally +``ceph.conf``):: + + ceph-deploy config pull HOST + +You will usually also want to gather the encryption keys used for that +cluster: + + ceph-deploy gatherkeys MONHOST + +At this point you can skip the steps below that create a new cluster +(you already have one) and optionally skip instalation and/or monitor +creation, depending on what you are trying to accomplish. + + +Creating a new cluster +====================== + +Creating a new configuration +---------------------------- + +To create a new configuration file and secret key, decide what hosts +will run ``ceph-mon``, and run:: + + ceph-deploy new MON [MON..] + +listing the hostnames of the monitors. Each ``MON`` can be + + * a simple hostname. It must be DNS resolvable without the fully + qualified domain name. + * a fully qualified domain name. The hostname is assumed to be the + leading component up to the first ``.``. + * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified + domain name or IP address. For example, ``foo``, + ``foo.example.com``, ``foo:something.example.com``, and + ``foo:1.2.3.4`` are all valid. Note, however, that the hostname + should match that configured on the host ``foo``. + +The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your +current directory. + + +Edit initial cluster configuration +---------------------------------- + +You want to review the generated ``ceph.conf`` file and make sure that +the ``mon_host`` setting contains the IP addresses you would like the +monitors to bind to. These are the IPs that clients will initially +contact to authenticate to the cluster, and they need to be reachable +both by external client-facing hosts and internal cluster daemons. + +Installing packages +=================== + +To install the Ceph software on the servers, run:: + + ceph-deploy install HOST [HOST..] + +This installs the current default *stable* release. You can choose a +different release track with command line options, for example to use +a release candidate:: + + ceph-deploy install --testing HOST + +Or to test a development branch:: + + ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] + + +Deploying monitors +================== + +To actually deploy ``ceph-mon`` to the hosts you chose, run:: + + ceph-deploy mon create HOST [HOST..] + +Without explicit hosts listed, hosts in ``mon_initial_members`` in the +config file are deployed. That is, the hosts you passed to +``ceph-deploy new`` are the default value here. + +Gather keys +=========== + +To gather authenticate keys (for administering the cluster and +bootstrapping new nodes) to the local directory, run:: + + ceph-deploy gatherkeys HOST [HOST...] + +where ``HOST`` is one of the monitor hosts. + +Once these keys are in the local directory, you can provision new OSDs etc. + + +Deploying OSDs +============== + +To prepare a node for running OSDs, run:: + + ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...] + +After that, the hosts will be running OSDs for the given data disks. +If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be +created and GPT labels will be used to mark and automatically activate +OSD volumes. If an existing partition is specified, the partition +table will not be modified. If you want to destroy the existing +partition table on DISK first, you can include the ``--zap-disk`` +option. + +If there is already a prepared disk or directory that is ready to become an +OSD, you can also do: + + ceph-deploy osd activate HOST:DIR[:JOURNAL] [...] + +This is useful when you are managing the mounting of volumes yourself. + + +Admin hosts +=========== + +To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` +keyring so that it can administer the cluster, run:: + + ceph-deploy admin HOST [HOST ...] + +Forget keys +=========== + +The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in +the local directory. If you are worried about them being there for security +reasons, run:: + + ceph-deploy forgetkeys + +and they will be removed. If you need them again later to deploy additional +nodes, simply re-run:: + + ceph-deploy gatherkeys HOST [HOST...] + +and they will be retrieved from an existing monitor node. + +Multiple clusters +================= + +All of the above commands take a ``--cluster=NAME`` option, allowing +you to manage multiple clusters conveniently from one workstation. +For example:: + + ceph-deploy --cluster=us-west new + vi us-west.conf + ceph-deploy --cluster=us-west mon diff --git a/bootstrap b/bootstrap new file mode 100755 index 0000000..7d1a7de --- /dev/null +++ b/bootstrap @@ -0,0 +1,42 @@ +#!/bin/sh +set -e + +if command -v lsb_release >/dev/null 2>&1; then + case "$(lsb_release --id --short)" in + Ubuntu|Debian) + for package in python-virtualenv; do + if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then + # add a space after old values + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages, please install them:" 1>&2 + echo " sudo apt-get install $missing" + exit 1 + fi + ;; + esac +else + if [ -f /etc/redhat-release ]; then + case "$(cat /etc/redhat-release | awk '{print $1}')" in + CentOS) + for package in python-virtualenv; do + if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages, please install them:" 1>&2 + echo " sudo yum install $missing" + exit 1 + fi + ;; + esac + fi +fi + +test -d virtualenv || virtualenv virtualenv +./virtualenv/bin/python setup.py develop +./virtualenv/bin/pip install -r requirements.txt -r requirements-dev.txt +test -e ceph-deploy || ln -s virtualenv/bin/ceph-deploy . diff --git a/ceph-deploy.spec b/ceph-deploy.spec new file mode 100644 index 0000000..e4db2f3 --- /dev/null +++ b/ceph-deploy.spec @@ -0,0 +1,83 @@ +# +# spec file for package ceph-deploy +# + +%if ! (0%{?fedora} > 12 || 0%{?rhel} > 5) +%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} +%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} +%endif + +################################################################################# +# common +################################################################################# +Name: ceph-deploy +Version: 1.0 +Release: 0 +Summary: Admin and deploy tool for Ceph +License: MIT +Group: System/Filesystems +URL: http://ceph.com/ +Source0: %{name}-%{version}.tar.bz2 +#Source0: https://github.com/ceph/ceph-deploy/archive/v0.1.tar.gz +#BuildRoot: %{_tmppath}/%{name}-%{version}-build +BuildRequires: python-devel +BuildRequires: python-distribute +BuildRequires: python-setuptools +BuildRequires: python-virtualenv +BuildRequires: pytest +BuildRequires: python-mock +BuildRequires: python-tox +Requires: python-argparse +#Requires: python-pushy +Requires: python-distribute +#Requires: lsb-release +Requires: ceph +%if 0%{?suse_version} && 0%{?suse_version} <= 1110 +%{!?python_sitelib: %global python_sitelib %(python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} +%else +BuildArch: noarch +%endif + +################################################################################# +# specific +################################################################################# +%if 0%{defined suse_version} +%py_requires +%if 0%{?suse_version} > 1210 +Requires: gptfdisk +%else +Requires: scsirastools +%endif +%else +Requires: gdisk +%endif + +%if 0%{?rhel} +BuildRequires: python >= %{pyver} +Requires: python >= %{pyver} +%endif + +%description +An easy to use admin tool for deploy ceph storage clusters. + +%prep +#%setup -q -n %{name} +%setup -q + +%build +#python setup.py build + +%install +python setup.py install --prefix=%{_prefix} --root=%{buildroot} +install -m 0755 -D scripts/ceph-deploy $RPM_BUILD_ROOT/usr/bin + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" + +%files +%defattr(-,root,root) +%doc LICENSE README.rst +%{_bindir}/ceph-deploy +%{python_sitelib}/* + +%changelog diff --git a/ceph_deploy/__init__.py b/ceph_deploy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ceph_deploy/admin.py b/ceph_deploy/admin.py new file mode 100644 index 0000000..aef60ee --- /dev/null +++ b/ceph_deploy/admin.py @@ -0,0 +1,74 @@ +import logging + +from cStringIO import StringIO + +from . import exc +from . import conf +from .cliutil import priority +from .sudo_pushy import get_transport + +LOG = logging.getLogger(__name__) + +def write_file(path, content): + try: + with file(path, 'w') as f: + f.write(content) + except: + pass + +def admin(args): + cfg = conf.load(args) + conf_data = StringIO() + cfg.write(conf_data) + + try: + with file('%s.client.admin.keyring' % args.cluster, 'rb') as f: + keyring = f.read() + except: + raise RuntimeError('%s.client.admin.keyring not found' % + args.cluster) + + errors = 0 + for hostname in args.client: + LOG.debug('Pushing admin keys and conf to %s', hostname) + try: + sudo = args.pushy(get_transport(hostname)) + write_conf_r = sudo.compile(conf.write_conf) + write_conf_r( + cluster=args.cluster, + conf=conf_data.getvalue(), + overwrite=args.overwrite_conf, + ) + + sudo = args.pushy(get_transport(hostname)) + write_file_r = sudo.compile(write_file) + error = write_file_r( + '/etc/ceph/%s.client.admin.keyring' % args.cluster, + keyring + ) + if error is not None: + raise exc.GenericError(error) + sudo.close() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to configure %d admin hosts' % errors) + + +@priority(70) +def make(parser): + """ + Push configuration and client.admin key to a remote host. + """ + parser.add_argument( + 'client', + metavar='HOST', + nargs='*', + help='host to configure for ceph administration', + ) + parser.set_defaults( + func=admin, + ) diff --git a/ceph_deploy/cli.py b/ceph_deploy/cli.py new file mode 100644 index 0000000..734ce14 --- /dev/null +++ b/ceph_deploy/cli.py @@ -0,0 +1,118 @@ +import pkg_resources +import argparse +import logging +import pushy +import sys + +from . import exc +from . import validate +from . import sudo_pushy + + +LOG = logging.getLogger(__name__) + + +def parse_args(args=None, namespace=None): + parser = argparse.ArgumentParser( + description='Deploy Ceph', + ) + verbosity = parser.add_mutually_exclusive_group(required=False) + verbosity.add_argument( + '-v', '--verbose', + action='store_true', dest='verbose', default=False, + help='be more verbose', + ) + verbosity.add_argument( + '-q', '--quiet', + action='store_true', dest='quiet', + help='be less verbose', + ) + parser.add_argument( + '-n', '--dry-run', + action='store_true', dest='dry_run', + help='do not perform any action, but report what would be done', + ) + parser.add_argument( + '--overwrite-conf', + action='store_true', + help='overwrite an existing conf file on remote host (if present)', + ) + parser.add_argument( + '--cluster', + metavar='NAME', + help='name of the cluster', + type=validate.alphanumeric, + ) + sub = parser.add_subparsers( + title='commands', + metavar='COMMAND', + help='description', + ) + entry_points = [ + (ep.name, ep.load()) + for ep in pkg_resources.iter_entry_points('ceph_deploy.cli') + ] + entry_points.sort( + key=lambda (name, fn): getattr(fn, 'priority', 100), + ) + for (name, fn) in entry_points: + p = sub.add_parser( + name, + description=fn.__doc__, + help=fn.__doc__, + ) + # ugly kludge but i really want to have a nice way to access + # the program name, with subcommand, later + p.set_defaults(prog=p.prog) + fn(p) + parser.set_defaults( + # we want to hold on to this, for later + prog=parser.prog, + + # unit tests can override this to mock pushy; no user-visible + # option sets this + pushy=pushy.connect, + + cluster='ceph', + ) + args = parser.parse_args(args=args, namespace=namespace) + return args + + +def main(args=None, namespace=None): + args = parse_args(args=args, namespace=namespace) + + console_loglevel = logging.INFO + if args.quiet: + console_loglevel = logging.WARNING + if args.verbose: + console_loglevel = logging.DEBUG + sh = logging.StreamHandler() + sh.setLevel(console_loglevel) + + fh = logging.FileHandler('{cluster}.log'.format(cluster=args.cluster)) + fh.setLevel(logging.DEBUG) + formatter = logging.Formatter( + '%(asctime)s %(name)s %(levelname)s %(message)s') + fh.setFormatter(formatter) + + # because we're in a module already, __name__ is not the ancestor of + # the rest of the package; use the root as the logger for everyone + root_logger = logging.getLogger() + + # allow all levels at root_logger, handlers control individual levels + root_logger.setLevel(logging.DEBUG) + + root_logger.addHandler(sh) + root_logger.addHandler(fh) + + sudo_pushy.patch() + + try: + return args.func(args) + except exc.DeployError as e: + print >> sys.stderr, '{prog}: {msg}'.format( + prog=args.prog, + msg=e, + ) + sys.exit(1) diff --git a/ceph_deploy/cliutil.py b/ceph_deploy/cliutil.py new file mode 100644 index 0000000..d273f31 --- /dev/null +++ b/ceph_deploy/cliutil.py @@ -0,0 +1,8 @@ +def priority(num): + """ + Decorator to add a `priority` attribute to the function. + """ + def add_priority(fn): + fn.priority = num + return fn + return add_priority diff --git a/ceph_deploy/conf.py b/ceph_deploy/conf.py new file mode 100644 index 0000000..1b67cfe --- /dev/null +++ b/ceph_deploy/conf.py @@ -0,0 +1,56 @@ +import ConfigParser +import contextlib + +from . import exc + + +class _TrimIndentFile(object): + def __init__(self, fp): + self.fp = fp + + def readline(self): + line = self.fp.readline() + return line.lstrip(' \t') + + +def _optionxform(s): + s = s.replace('_', ' ') + s = '_'.join(s.split()) + return s + + +def parse(fp): + cfg = ConfigParser.RawConfigParser() + cfg.optionxform = _optionxform + ifp = _TrimIndentFile(fp) + cfg.readfp(ifp) + return cfg + + +def load(args): + path = '{cluster}.conf'.format(cluster=args.cluster) + try: + f = file(path) + except IOError as e: + raise exc.ConfigError(e) + else: + with contextlib.closing(f): + return parse(f) + + +def write_conf(cluster, conf, overwrite): + import os + + path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster) + tmp = '{path}.{pid}.tmp'.format(path=path, pid=os.getpid()) + + if os.path.exists(path): + with file(path, 'rb') as f: + old = f.read() + if old != conf and not overwrite: + raise RuntimeError('config file %s exists with different content; use --overwrite-conf to overwrite' % path) + with file(tmp, 'w') as f: + f.write(conf) + f.flush() + os.fsync(f) + os.rename(tmp, path) diff --git a/ceph_deploy/config.py b/ceph_deploy/config.py new file mode 100644 index 0000000..74024ed --- /dev/null +++ b/ceph_deploy/config.py @@ -0,0 +1,104 @@ +import logging + +from cStringIO import StringIO + +from . import exc +from . import conf +from . import misc +from .cliutil import priority +from .sudo_pushy import get_transport + +LOG = logging.getLogger(__name__) + +def config_push(args): + cfg = conf.load(args) + conf_data = StringIO() + cfg.write(conf_data) + + errors = 0 + for hostname in args.client: + LOG.debug('Pushing config to %s', hostname) + try: + sudo = args.pushy(get_transport(hostname)) + write_conf_r = sudo.compile(conf.write_conf) + write_conf_r( + cluster=args.cluster, + conf=conf_data.getvalue(), + overwrite=args.overwrite_conf, + ) + sudo.close() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to config %d hosts' % errors) + + +def config_pull(args): + import os.path + + topath = '{cluster}.conf'.format(cluster=args.cluster) + frompath = '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster) + + errors = 0 + for hostname in args.client: + try: + LOG.debug('Checking %s for %s', hostname, frompath) + sudo = args.pushy(get_transport(hostname)) + get_file_r = sudo.compile(misc.get_file) + conf_file = get_file_r(path=frompath) + if conf_file is not None: + LOG.debug('Got %s from %s', frompath, hostname) + if os.path.exists(topath): + with file(topath, 'rb') as f: + existing = f.read() + if existing != conf_file and not args.overwrite_conf: + LOG.error('local config file %s exists with different content; use --overwrite-conf to overwrite' % topath) + raise + + with file(topath, 'w') as f: + f.write(conf_file) + return + sudo.close() + LOG.debug('Empty or missing %s on %s', frompath, hostname) + except: + LOG.error('Unable to pull %s from %s', frompath, hostname) + finally: + errors += 1 + + raise exc.GenericError('Failed to fetch config from %d hosts' % errors) + + +def config(args): + if args.subcommand == 'push': + config_push(args) + elif args.subcommand == 'pull': + config_pull(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + +@priority(70) +def make(parser): + """ + Push configuration file to a remote host. + """ + parser.add_argument( + 'subcommand', + metavar='SUBCOMMAND', + choices=[ + 'push', + 'pull', + ], + help='push or pull', + ) + parser.add_argument( + 'client', + metavar='HOST', + nargs='*', + help='host to push/pull the config to/from', + ) + parser.set_defaults( + func=config, + ) diff --git a/ceph_deploy/exc.py b/ceph_deploy/exc.py new file mode 100644 index 0000000..62e0eda --- /dev/null +++ b/ceph_deploy/exc.py @@ -0,0 +1,74 @@ +class DeployError(Exception): + """ + Unknown deploy error + """ + + def __str__(self): + doc = self.__doc__.strip() + return ': '.join([doc] + [str(a) for a in self.args]) + + +class UnableToResolveError(DeployError): + """ + Unable to resolve host + """ +class ClusterExistsError(DeployError): + """ + Cluster config exists already + """ + + +class ConfigError(DeployError): + """ + Cannot load config + """ + + +class NeedHostError(DeployError): + """ + No hosts specified to deploy to. + """ + + +class NeedMonError(DeployError): + """ + Cannot find nodes with ceph-mon. + """ + +class NeedDiskError(DeployError): + """ + Must supply disk/path argument + """ + +class UnsupportedPlatform(DeployError): + """ + Platform is not supported + """ + def __init__(self, distro, codename): + self.distro = distro + self.codename = codename + + def __str__(self): + return '{doc}: {distro} {codename}'.format( + doc=self.__doc__.strip(), + distro=self.distro, + codename=self.codename, + ) + +class MissingPackageError(DeployError): + """ + A required package or command is missing + """ + def __init__(self, message): + self.message = message + + def __str__(self): + return self.message + + +class GenericError(DeployError): + def __init__(self, message): + self.message = message + + def __str__(self): + return self.message diff --git a/ceph_deploy/forgetkeys.py b/ceph_deploy/forgetkeys.py new file mode 100644 index 0000000..86bedbe --- /dev/null +++ b/ceph_deploy/forgetkeys.py @@ -0,0 +1,36 @@ +import logging +import errno + +from .cliutil import priority + + +LOG = logging.getLogger(__name__) + + +def forgetkeys(args): + import os + for f in [ + 'mon', + 'client.admin', + 'bootstrap-osd', + 'bootstrap-mds', + ]: + try: + os.unlink('{cluster}.{what}.keyring'.format( + cluster=args.cluster, + what=f, + )) + except OSError, e: + if e.errno == errno.ENOENT: + pass + else: + raise + +@priority(100) +def make(parser): + """ + Remove authentication keys from the local directory. + """ + parser.set_defaults( + func=forgetkeys, + ) diff --git a/ceph_deploy/gatherkeys.py b/ceph_deploy/gatherkeys.py new file mode 100644 index 0000000..2261f3c --- /dev/null +++ b/ceph_deploy/gatherkeys.py @@ -0,0 +1,86 @@ +import os.path +import logging + +from .cliutil import priority +from . import misc +from .sudo_pushy import get_transport + +LOG = logging.getLogger(__name__) + +def fetch_file(args, frompath, topath, hosts): + # mon. + if os.path.exists(topath): + LOG.debug('Have %s', topath) + return True + else: + for hostname in hosts: + LOG.debug('Checking %s for %s', hostname, frompath) + sudo = args.pushy(get_transport(hostname)) + get_file_r = sudo.compile(misc.get_file) + key = get_file_r(path=frompath.format(hostname=hostname)) + if key is not None: + LOG.debug('Got %s key from %s.', topath, hostname) + with file(topath, 'w') as f: + f.write(key) + return True + sudo.close() + LOG.warning('Unable to find %s on %s', frompath, hosts) + return False + +def gatherkeys(args): + ret = 0 + + # client.admin + r = fetch_file( + args=args, + frompath='/etc/ceph/{cluster}.client.admin.keyring'.format( + cluster=args.cluster), + topath='{cluster}.client.admin.keyring'.format( + cluster=args.cluster), + hosts=args.mon, + ) + if not r: + ret = 1 + + # mon. + fetch_file( + args=args, + frompath='/var/lib/ceph/mon/%s-{hostname}/keyring' % args.cluster, + topath='{cluster}.mon.keyring'.format( + cluster=args.cluster), + hosts=args.mon, + ) + if not r: + ret = 1 + + # bootstrap + for what in ['osd', 'mds']: + r = fetch_file( + args=args, + frompath='/var/lib/ceph/bootstrap-{what}/{cluster}.keyring'.format( + cluster=args.cluster, + what=what), + topath='{cluster}.bootstrap-{what}.keyring'.format( + cluster=args.cluster, + what=what), + hosts=args.mon, + ) + if not r: + ret = 1 + + return ret + +@priority(40) +def make(parser): + """ + Gather authentication keys for provisioning new nodes. + """ + parser.add_argument( + 'mon', + metavar='HOST', + nargs='+', + help='monitor host to pull keys from', + ) + parser.set_defaults( + func=gatherkeys, + ) diff --git a/ceph_deploy/install.py b/ceph_deploy/install.py new file mode 100644 index 0000000..9994b8e --- /dev/null +++ b/ceph_deploy/install.py @@ -0,0 +1,574 @@ +import argparse +import logging + +from . import exc +from . import lsb +from .cliutil import priority +from .sudo_pushy import get_transport + +LOG = logging.getLogger(__name__) + +def install_suse(release, codename, version_kind, version): + import platform + import subprocess + + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + if codename == 'Mantis': + distro='opensuse12' + else: + distro='sles-11sp2' + + subprocess.check_call( + args='su -c \'rpm --import "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc"\''.format(key=key), + shell=True, + ) + + if version_kind == 'stable': + url = 'http://ceph.com/rpm-{version}/{distro}/'.format( + version=version, + distro=distro, + ) + elif version_kind == 'testing': + url = 'http://ceph.com/rpm-testing/{distro}'.format(distro=distro) + elif version_kind == 'dev': + url = 'http://gitbuilder.ceph.com/ceph-rpm-{distro}{release}-{machine}-basic/ref/{version}/'.format( + distro=distro, + release=release.split(".",1)[0], + machine=platform.machine(), + version=version, + ) + + subprocess.check_call( + args=['rpm', '-Uvh','--quiet', '{url}noarch/ceph-release-1-0.noarch.rpm'.format( + url=url + )] + ) + + subprocess.check_call( + args=[ + 'zypper', + '--non-interactive', + '--quiet', + 'install', + 'ceph', + 'ceph-common', + 'ceph-fs-common', + ], + ) + +def uninstall_suse(arg_purge=False): + import subprocess + + packages = [ + 'ceph', + 'ceph-mds', + 'ceph-common', + 'ceph-fs-common', + ] + args = [ + 'zypper', + '--non-interactive', + '--quiet', + 'remove', + ] + + args.extend(packages) + subprocess.check_call(args=args) + +def uninstall_debian(arg_purge=False): + import subprocess + + packages = [ + 'ceph', + 'ceph-mds', + 'ceph-common', + 'ceph-fs-common', + ] + args = [ + 'apt-get', + '-q', + 'remove', + '-f', + '-y', + '--force-yes', + ] + if arg_purge: + args.append('--purge') + args.append('--') + args.extend(packages) + subprocess.check_call(args=args) + +def install_fedora(release, codename, version_kind, version): + import platform + import subprocess + + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + subprocess.check_call( + args='su -c \'rpm --import "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc"\''.format(key=key), + shell=True, + ) + + if version_kind == 'stable': + url = 'http://ceph.com/rpm-{version}/fc{release}/'.format( + version=version, + release=release, + ) + elif version_kind == 'testing': + url = 'http://ceph.com/rpm-testing/fc{release}'.format( + release=release, + ) + elif version_kind == 'dev': + url = 'http://gitbuilder.ceph.com/ceph-rpm-fedora{release}-{machine}-basic/ref/{version}/'.format( + release=release.split(".",1)[0], + machine=platform.machine(), + version=version, + ) + + subprocess.check_call( + args=['rpm', '-Uvh','--quiet', '{url}noarch/ceph-release-1-0.fc{release}.noarch.rpm'.format( + url=url, + release=release, + )] + ) + + subprocess.check_call( + args=[ + 'yum', + '-y', + '-q', + 'install', + 'ceph', + 'ceph-common', + 'ceph-fs-common', + ], + ) + +def uninstall_fedora(arg_purge=False): + import subprocess + + packages = [ + 'ceph', + 'ceph-mds', + 'ceph-common', + 'ceph-fs-common', + ] + args = [ + 'yum', + '-q', + '-y', + 'remove', + ] + + args.extend(packages) + subprocess.check_call(args=args) + +def install_centos(release, codename, version_kind, version): + import platform + import subprocess + + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + subprocess.check_call( + args='su -c \'rpm --import "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc"\''.format(key=key), + shell=True, + ) + + if version_kind == 'stable': + url = 'http://ceph.com/rpm-{version}/el6/'.format( + version=version, + ) + elif version_kind == 'testing': + url = 'http://ceph.com/rpm-testing/' + elif version_kind == 'dev': + url = 'http://gitbuilder.ceph.com/ceph-rpm-centos{release}-{machine}-basic/ref/{version}/'.format( + release=release.split(".",1)[0], + machine=platform.machine(), + version=version, + ) + + subprocess.check_call( + args=['rpm', '-Uvh','--quiet', '{url}noarch/ceph-release-1-0.el6.noarch.rpm'.format( + url=url + )] + ) + + subprocess.check_call( + args=[ + 'yum', + '-y', + '-q', + 'install', + 'ceph', + 'ceph-common', + 'ceph-fs-common', + ], + ) + +def uninstall_centos(arg_purge=False): + import subprocess + + packages = [ + 'ceph', + 'ceph-mds', + 'ceph-common', + 'ceph-fs-common', + ] + args = [ + 'yum', + '-q', + '-y', + 'remove', + ] + + args.extend(packages) + subprocess.check_call(args=args) + +def uninstall_debian(arg_purge=False): + import subprocess + + packages = [ + 'ceph', + 'ceph-mds', + 'ceph-common', + 'ceph-fs-common', + ] + args = [ + 'apt-get', + '-q', + 'remove', + '-f', + '-y', + '--force-yes', + ] + if arg_purge: + args.append('--purge') + args.append('--') + args.extend(packages) + subprocess.check_call(args=args) + +def install_debian(release, codename, version_kind, version): + import platform + import subprocess + + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + subprocess.check_call( + args='wget -q -O- \'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc\' | apt-key add -'.format(key=key), + shell=True, + ) + + if version_kind == 'stable': + url = 'http://ceph.com/debian-{version}/'.format( + version=version, + ) + elif version_kind == 'testing': + url = 'http://ceph.com/debian-testing/' + elif version_kind == 'dev': + url = 'http://gitbuilder.ceph.com/ceph-deb-{codename}-{machine}-basic/ref/{version}'.format( + codename=codename, + machine=platform.machine(), + version=version, + ) + else: + raise RuntimeError('Unknown version kind: %r' % version_kind) + + with file('/etc/apt/sources.list.d/ceph.list', 'w') as f: + f.write('deb {url} {codename} main\n'.format( + url=url, + codename=codename, + )) + + subprocess.check_call( + args=[ + 'apt-get', + '-q', + 'update', + ], + ) + + # TODO this does not downgrade -- should it? + subprocess.check_call( + args=[ + 'env', + 'DEBIAN_FRONTEND=noninteractive', + 'DEBIAN_PRIORITY=critical', + 'apt-get', + '-q', + '-o', 'Dpkg::Options::=--force-confnew', + 'install', + '--no-install-recommends', + '--assume-yes', + '--', + 'ceph', + 'ceph-mds', + 'ceph-common', + 'ceph-fs-common', + # ceph only recommends gdisk, make sure we actually have + # it; only really needed for osds, but minimal collateral + 'gdisk', + ], + ) + +def purge_data_any(): + import subprocess + import os.path + + subprocess.call(args=[ + 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph', + ]) + if os.path.exists('/var/lib/ceph'): + subprocess.check_call(args=[ + 'find', '/var/lib/ceph', + '-mindepth', '1', + '-maxdepth', '2', + '-type', 'd', + '-exec', 'umount', '{}', ';', + ]) + subprocess.check_call(args=[ + 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph', + ]) + subprocess.check_call(args=[ + 'rm', '-rf', '--one-file-system', '--', '/etc/ceph', + ]) + +def install(args): + version = getattr(args, args.version_kind) + version_str = args.version_kind + if version: + version_str += ' version {version}'.format(version=version) + LOG.debug( + 'Installing %s on cluster %s hosts %s', + version_str, + args.cluster, + ' '.join(args.host), + ) + + for hostname in args.host: + LOG.debug('Detecting platform for host %s ...', hostname) + + # TODO username + sudo = args.pushy(get_transport(hostname)) + (distro, release, codename) = lsb.get_lsb_release(sudo) + LOG.debug('Distro %s release %s codename %s', distro, release, codename) + + if (distro == 'Debian' or distro == 'Ubuntu'): + install_r = sudo.compile(install_debian) + elif (distro == 'CentOS' or distro == 'Scientific') or distro.startswith('RedHat'): + install_r = sudo.compile(install_centos) + elif distro == 'Fedora': + install_r = sudo.compile(install_fedora) + elif (distro == 'SUSE LINUX'): + install_r = sudo.compile(install_suse) + else: + raise exc.UnsupportedPlatform(distro=distro, codename=codename) + + LOG.debug('Installing on host %s ...', hostname) + install_r( + release=release, + codename=codename, + version_kind=args.version_kind, + version=version, + ) + + sudo.close() + +def uninstall(args): + LOG.debug( + 'Uninstalling on cluster %s hosts %s', + args.cluster, + ' '.join(args.host), + ) + + for hostname in args.host: + LOG.debug('Detecting platform for host %s ...', hostname) + + # TODO username + sudo = args.pushy(get_transport(hostname)) + (distro, release, codename) = lsb.get_lsb_release(sudo) + LOG.debug('Distro %s codename %s', distro, codename) + + if (distro == 'Debian' or distro == 'Ubuntu'): + LOG.debug('Uninstalling on host %s ...', hostname) + uninstall_r = sudo.compile(uninstall_debian) + elif (distro == 'CentOS' or distro == 'Scientific') or distro.startswith('RedHat'): + LOG.debug('Uninstalling on host %s ...', hostname) + uninstall_r = sudo.compile(uninstall_centos) + else: + raise exc.UnsupportedPlatform(distro=distro, codename=codename) + + uninstall_r() + sudo.close() + +def purge(args): + LOG.debug( + 'Purging from cluster %s hosts %s', + args.cluster, + ' '.join(args.host), + ) + + for hostname in args.host: + LOG.debug('Detecting platform for host %s ...', hostname) + + # TODO username + sudo = args.pushy(get_transport(hostname)) + (distro, release, codename) = lsb.get_lsb_release(sudo) + LOG.debug('Distro %s codename %s', distro, codename) + + if (distro == 'Debian' or distro == 'Ubuntu'): + LOG.debug('Purging host %s ...', hostname) + purge_r = sudo.compile(uninstall_debian) + elif (distro == 'CentOS') or distro.startswith('RedHat'): + LOG.debug('Uninstalling on host %s ...', hostname) + purge_r = sudo.compile(uninstall_centos) + else: + raise exc.UnsupportedPlatform(distro=distro, codename=codename) + + purge_r(arg_purge=True) + sudo.close() + +def purge_data(args): + LOG.debug( + 'Purging data from cluster %s hosts %s', + args.cluster, + ' '.join(args.host), + ) + + for hostname in args.host: + # TODO username + sudo = args.pushy(get_transport(hostname)) + + LOG.debug('Purging data from host %s ...', hostname) + purge_data_any_r = sudo.compile(purge_data_any) + purge_data_any_r() + sudo.close() + +class StoreVersion(argparse.Action): + """ + Like ``"store"`` but also remember which one of the exclusive + options was set. + + There are three kinds of versions: stable, testing and dev. + This sets ``version_kind`` to be the right one of the above. + + This kludge essentially lets us differentiate explicitly set + values from defaults. + """ + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + namespace.version_kind = self.dest + + +@priority(20) +def make(parser): + """ + Install Ceph packages on remote hosts. + """ + + version = parser.add_mutually_exclusive_group() + + version.add_argument( + '--stable', + nargs='?', + action=StoreVersion, + choices=[ + 'bobtail', + 'cuttlefish', + ], + metavar='CODENAME', + help='install a release known as CODENAME (done by default) (default: %(default)s)', + ) + + version.add_argument( + '--testing', + nargs=0, + action=StoreVersion, + help='install the latest development release', + ) + + version.add_argument( + '--dev', + nargs='?', + action=StoreVersion, + const='master', + metavar='BRANCH_OR_TAG', + help='install a bleeding edge build from Git branch or tag (default: %(default)s)', + ) + + version.set_defaults( + func=install, + stable='cuttlefish', + dev='master', + version_kind='stable', + ) + + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to install on', + ) + parser.set_defaults( + func=install, + ) + + + +@priority(80) +def make_uninstall(parser): + """ + Remove Ceph packages from remote hosts. + """ + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to uninstall Ceph from', + ) + parser.set_defaults( + func=uninstall, + ) + +@priority(80) +def make_purge(parser): + """ + Remove Ceph packages from remote hosts and purge all data. + """ + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to purge Ceph from', + ) + parser.set_defaults( + func=purge, + ) + + +@priority(80) +def make_purge_data(parser): + """ + Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph + """ + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to purge Ceph data from', + ) + parser.set_defaults( + func=purge_data, + ) diff --git a/ceph_deploy/lsb.py b/ceph_deploy/lsb.py new file mode 100644 index 0000000..3965ed5 --- /dev/null +++ b/ceph_deploy/lsb.py @@ -0,0 +1,97 @@ +from . import exc + +def check_lsb_release(): + """ + Verify if lsb_release command is available + """ + import subprocess + + args = [ 'which', 'lsb_release', ] + process = subprocess.Popen( + args=args, + stdout=subprocess.PIPE, + ) + lsb_release_path, _ = process.communicate() + ret = process.wait() + if ret != 0: + raise RuntimeError('The lsb_release command was not found on remote host. Please install the lsb-release package.') + +def lsb_release(): + """ + Get LSB release information from lsb_release. + + Returns truple with distro, release and codename. Otherwise + the function raises an error (subprocess.CalledProcessError or + RuntimeError). + """ + import subprocess + + args = [ 'lsb_release', '-s', '-i' ] + process = subprocess.Popen( + args=args, + stdout=subprocess.PIPE, + ) + distro, _ = process.communicate() + ret = process.wait() + if ret != 0: + raise subprocess.CalledProcessError(ret, args, output=distro) + if distro == '': + raise RuntimeError('lsb_release gave invalid output for distro') + + args = [ 'lsb_release', '-s', '-r', ] + process = subprocess.Popen( + args=args, + stdout=subprocess.PIPE, + ) + release, _ = process.communicate() + ret = process.wait() + if ret != 0: + raise subprocess.CalledProcessError(ret, args, output=release) + if release == '': + raise RuntimeError('lsb_release gave invalid output for release') + + args = [ 'lsb_release', '-s', '-c', ] + process = subprocess.Popen( + args=args, + stdout=subprocess.PIPE, + ) + codename, _ = process.communicate() + ret = process.wait() + if ret != 0: + raise subprocess.CalledProcessError(ret, args, output=codename) + if codename == '': + raise RuntimeError('lsb_release gave invalid output for codename') + + return (str(distro).rstrip(), str(release).rstrip(), str(codename).rstrip()) + + +def get_lsb_release(sudo): + """ + Get LSB release information from lsb_release. + + Check if lsb_release is installed on the remote host and issue + a message if not. + + Returns truple with distro, release and codename. Otherwise + the function raises an error (subprocess.CalledProcessError or + RuntimeError). + """ + try: + check_lsb_release_r = sudo.compile(check_lsb_release) + status = check_lsb_release_r() + except RuntimeError as e: + raise exc.MissingPackageError(e.message) + + lsb_release_r = sudo.compile(lsb_release) + return lsb_release_r() + + +def choose_init(distro, codename): + """ + Select a init system for a given distribution. + + Returns the name of a init system (upstart, sysvinit ...). + """ + if distro == 'Ubuntu': + return 'upstart' + return 'sysvinit' diff --git a/ceph_deploy/mds.py b/ceph_deploy/mds.py new file mode 100644 index 0000000..b8f1a03 --- /dev/null +++ b/ceph_deploy/mds.py @@ -0,0 +1,230 @@ +import logging + +from cStringIO import StringIO + +from . import conf +from . import exc +from . import lsb +from .cliutil import priority +from .sudo_pushy import get_transport + + +LOG = logging.getLogger(__name__) + + +def get_bootstrap_mds_key(cluster): + """ + Read the bootstrap-mds key for `cluster`. + """ + path = '{cluster}.bootstrap-mds.keyring'.format(cluster=cluster) + try: + with file(path, 'rb') as f: + return f.read() + except IOError: + raise RuntimeError('bootstrap-mds keyring not found; run \'gatherkeys\'') + + +def create_mds_bootstrap(cluster, key): + """ + Run on mds node, writes the bootstrap key if not there yet. + + Returns None on success, error message on error exceptions. pushy + mangles exceptions to all be of type ExceptionProxy, so we can't + tell between bug and correctly handled failure, so avoid using + exceptions for non-exceptional runs. + """ + import os + + path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( + cluster=cluster, + ) + if not os.path.exists(path): + tmp = '{path}.{pid}.tmp'.format( + path=path, + pid=os.getpid(), + ) + # file() doesn't let us control access mode from the + # beginning, and thus would have a race where attacker can + # open before we chmod the file, so play games with os.open + fd = os.open( + tmp, + (os.O_WRONLY|os.O_CREAT|os.O_EXCL + |os.O_NOCTTY|os.O_NOFOLLOW), + 0600, + ) + with os.fdopen(fd, 'wb') as f: + f.write(key) + f.flush() + os.fsync(f) + os.rename(tmp, path) + + +def create_mds( + name, + cluster, + init, + ): + import os + import subprocess + import errno + + path = '/var/lib/ceph/mds/{cluster}-{name}'.format( + cluster=cluster, + name=name + ) + + try: + os.mkdir(path) + except OSError, e: + if e.errno == errno.EEXIST: + pass + else: + raise + + bootstrap_keyring = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( + cluster=cluster + ) + + keypath = os.path.join(path, 'keyring') + + subprocess.check_call( + args=[ + 'ceph', + '--cluster', cluster, + '--name', 'client.bootstrap-mds', + '--keyring', bootstrap_keyring, + 'auth', 'get-or-create', 'mds.{name}'.format(name=name), + 'osd', 'allow *', + 'mds', 'allow', + 'mon', 'allow rwx', + '-o', + os.path.join(keypath), + ], + ) + + with file(os.path.join(path, 'done'), 'wb') as f: + pass + + with file(os.path.join(path, init), 'wb') as f: + pass + + if init == 'upstart': + subprocess.check_call( + args=[ + 'initctl', + 'emit', + 'ceph-mds', + 'cluster={cluster}'.format(cluster=cluster), + 'id={name}'.format(name=name), + ]) + elif init == 'sysvinit': + subprocess.check_call( + args=[ + 'service', + 'ceph', + 'start', + 'mds.{name}'.format(name=name), + ]) + +def mds_create(args): + cfg = conf.load(args) + LOG.debug( + 'Deploying mds, cluster %s hosts %s', + args.cluster, + ' '.join(':'.join(x or '' for x in t) for t in args.mds), + ) + + if not args.mds: + raise exc.NeedHostError() + + key = get_bootstrap_mds_key(cluster=args.cluster) + + bootstrapped = set() + errors = 0 + for hostname, name in args.mds: + try: + # TODO username + sudo = args.pushy(get_transport(hostname)) + + (distro, release, codename) = lsb.get_lsb_release(sudo) + init = lsb.choose_init(distro, codename) + LOG.debug('Distro %s codename %s, will use %s', + distro, codename, init) + + if hostname not in bootstrapped: + bootstrapped.add(hostname) + LOG.debug('Deploying mds bootstrap to %s', hostname) + + write_conf_r = sudo.compile(conf.write_conf) + conf_data = StringIO() + cfg.write(conf_data) + write_conf_r( + cluster=args.cluster, + conf=conf_data.getvalue(), + overwrite=args.overwrite_conf, + ) + + create_mds_bootstrap_r = sudo.compile(create_mds_bootstrap) + error = create_mds_bootstrap_r( + cluster=args.cluster, + key=key, + ) + if error is not None: + raise exc.GenericError(error) + LOG.debug('Host %s is now ready for MDS use.', hostname) + + # create an mds + LOG.debug('Deploying mds.%s to %s', name, hostname) + create_mds_r = sudo.compile(create_mds) + create_mds_r( + name=name, + cluster=args.cluster, + init=init, + ) + sudo.close() + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to create %d MDSs' % errors) + + +def mds(args): + if args.subcommand == 'create': + mds_create(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + + +def colon_separated(s): + host = s + name = s + if s.count(':') == 1: + (host, name) = s.split(':') + return (host, name) + +@priority(30) +def make(parser): + """ + Deploy ceph MDS on remote hosts. + """ + parser.add_argument( + 'subcommand', + metavar='SUBCOMMAND', + choices=[ + 'create', + 'destroy', + ], + help='create or destroy', + ) + parser.add_argument( + 'mds', + metavar='HOST[:NAME]', + nargs='*', + type=colon_separated, + help='host (and optionally the daemon name) to deploy on', + ) + parser.set_defaults( + func=mds, + ) diff --git a/ceph_deploy/memoize.py b/ceph_deploy/memoize.py new file mode 100644 index 0000000..fd344a0 --- /dev/null +++ b/ceph_deploy/memoize.py @@ -0,0 +1,26 @@ +import functools + + +class NotFound(object): + """ + Sentinel object to say call was not memoized. + + Supposed to be faster than throwing exceptions on cache miss. + """ + def __str__(self): + return self.__class__.__name__ + +NotFound = NotFound() + + +def memoize(f): + cache = {} + + @functools.wraps(f) + def wrapper(*args, **kwargs): + key = (args, tuple(sorted(kwargs.iteritems()))) + val = cache.get(key, NotFound) + if val is NotFound: + val = cache[key] = f(*args, **kwargs) + return val + return wrapper diff --git a/ceph_deploy/misc.py b/ceph_deploy/misc.py new file mode 100644 index 0000000..0954800 --- /dev/null +++ b/ceph_deploy/misc.py @@ -0,0 +1,11 @@ + +def get_file(path): + """ + Run on mon node, grab a file. + """ + try: + with file(path, 'rb') as f: + return f.read() + except IOError: + pass + diff --git a/ceph_deploy/mon.py b/ceph_deploy/mon.py new file mode 100644 index 0000000..055f1f6 --- /dev/null +++ b/ceph_deploy/mon.py @@ -0,0 +1,264 @@ +import ConfigParser +import logging +import re + +from cStringIO import StringIO + +from . import conf +from . import exc +from . import lsb +from .cliutil import priority +from .sudo_pushy import get_transport + + +LOG = logging.getLogger(__name__) + + +def create_mon(cluster, monitor_keyring, init): + import os + import socket + import subprocess + + hostname = socket.gethostname().split('.')[0] + path = '/var/lib/ceph/mon/ceph-{hostname}'.format( + hostname=hostname, + ) + done_path = '/var/lib/ceph/mon/ceph-{hostname}/done'.format( + hostname=hostname, + ) + init_path = '/var/lib/ceph/mon/ceph-{hostname}/{init}'.format( + hostname=hostname, + init=init, + ) + + if not os.path.exists(path): + os.makedirs(path) + + if not os.path.exists(done_path): + if not os.path.exists('/var/lib/ceph/tmp'): + os.makedirs('/var/lib/ceph/tmp') + keyring = '/var/lib/ceph/tmp/{cluster}-{hostname}.mon.keyring'.format( + cluster=cluster, + hostname=hostname, + ) + + with file(keyring, 'w') as f: + f.write(monitor_keyring) + + subprocess.check_call( + args=[ + 'ceph-mon', + '--cluster', cluster, + '--mkfs', + '-i', hostname, + '--keyring', keyring, + ], + ) + os.unlink(keyring) + with file(done_path, 'w'): + pass + + if not os.path.exists(init_path): + with file(init_path, 'w'): + pass + + if init == 'upstart': + subprocess.check_call( + args=[ + 'initctl', + 'emit', + 'ceph-mon', + 'cluster={cluster}'.format(cluster=cluster), + 'id={hostname}'.format(hostname=hostname), + ], + ) + elif init == 'sysvinit': + subprocess.check_call( + args=[ + 'service', + 'ceph', + 'start', + 'mon.{hostname}'.format(hostname=hostname), + ], + ) + + +def mon_create(args): + + cfg = conf.load(args) + if not args.mon: + try: + mon_initial_members = cfg.get('global', 'mon_initial_members') + except (ConfigParser.NoSectionError, + ConfigParser.NoOptionError): + pass + else: + args.mon = re.split(r'[,\s]+', mon_initial_members) + + if not args.mon: + raise exc.NeedHostError() + + try: + with file('{cluster}.mon.keyring'.format(cluster=args.cluster), + 'rb') as f: + monitor_keyring = f.read() + except IOError: + raise RuntimeError('mon keyring not found; run \'new\' to create a new cluster') + + LOG.debug( + 'Deploying mon, cluster %s hosts %s', + args.cluster, + ' '.join(args.mon), + ) + + errors = 0 + for hostname in args.mon: + try: + LOG.debug('Deploying mon to %s', hostname) + + # TODO username + sudo = args.pushy(get_transport(hostname)) + + (distro, release, codename) = lsb.get_lsb_release(sudo) + init = lsb.choose_init(distro, codename) + LOG.debug('Distro %s codename %s, will use %s', + distro, codename, init) + + write_conf_r = sudo.compile(conf.write_conf) + conf_data = StringIO() + cfg.write(conf_data) + write_conf_r( + cluster=args.cluster, + conf=conf_data.getvalue(), + overwrite=args.overwrite_conf, + ) + + create_mon_r = sudo.compile(create_mon) + create_mon_r( + cluster=args.cluster, + monitor_keyring=monitor_keyring, + init=init, + ) + + # TODO add_bootstrap_peer_hint + + sudo.close() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to create %d monitors' % errors) + + +def destroy_mon(cluster): + import os + import subprocess + import socket + + hostname = socket.gethostname().split('.')[0] + path = '/var/lib/ceph/mon/ceph-{hostname}'.format( + hostname=hostname, + ) + + if os.path.exists(path): + # remove from cluster + subprocess.check_call( + args=[ + 'sudo', + 'ceph', + '--cluster={cluster}'.format(cluster=cluster), + '-n', 'mon.', + '-k', '{path}/keyring'.format(path=path), + 'mon', + 'remove', + hostname, + ], + ) + + # stop + if os.path.exists(os.path.join(path, 'upstart')): + subprocess.call( # ignore initctl error when job not running + args=[ + 'initctl', + 'stop', + 'ceph-mon', + 'cluster={cluster}'.format(cluster=cluster), + 'id={hostname}'.format(hostname=hostname), + ], + ) + elif os.path.exists(os.path.join(path, 'sysvinit')): + subprocess.check_call( + args=[ + 'service', + 'ceph', + 'stop', + 'mon.{hostname}'.format(hostname=hostname), + ], + ) + + # delete monitor directory + subprocess.check_call( + args=[ + 'rm', + '-rf', + path, + ], + ) + + +def mon_destroy(args): + errors = 0 + for hostname in args.mon: + try: + LOG.debug('Removing mon from %s', hostname) + + # TODO username + sudo = args.pushy(get_transport(hostname)) + + destroy_mon_r = sudo.compile(destroy_mon) + destroy_mon_r( + cluster=args.cluster, + ) + sudo.close() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to create %d monitors' % errors) + + +def mon(args): + if args.subcommand == 'create': + mon_create(args) + elif args.subcommand == 'destroy': + mon_destroy(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + +@priority(30) +def make(parser): + """ + Deploy ceph monitor on remote hosts. + """ + parser.add_argument( + 'subcommand', + metavar='SUBCOMMAND', + choices=[ + 'create', + 'destroy', + ], + help='create or destroy', + ) + parser.add_argument( + 'mon', + metavar='HOST', + nargs='*', + help='host to deploy on', + ) + parser.set_defaults( + func=mon, + ) diff --git a/ceph_deploy/new.py b/ceph_deploy/new.py new file mode 100644 index 0000000..606ba51 --- /dev/null +++ b/ceph_deploy/new.py @@ -0,0 +1,136 @@ +import ConfigParser +import errno +import logging +import os +import uuid +import struct +import time +import base64 +import socket + +from . import exc +from .cliutil import priority + + +LOG = logging.getLogger(__name__) + + +def generate_auth_key(): + key = os.urandom(16) + header = struct.pack(' 0: + name = name.split('.')[0] + LOG.debug('Resolving host %s', host) + ip = None + ip = get_nonlocal_ip(host) + LOG.debug('Monitor %s at %s', name, ip) + mon_initial_members.append(name) + mon_host.append(ip) + + LOG.debug('Monitor initial members are %s', mon_initial_members) + LOG.debug('Monitor addrs are %s', mon_host) + + cfg.set('global', 'mon initial members', ', '.join(mon_initial_members)) + # no spaces here, see http://tracker.newdream.net/issues/3145 + cfg.set('global', 'mon host', ','.join(mon_host)) + + # override undesirable defaults, needed until bobtail + + # http://tracker.newdream.net/issues/3136 + cfg.set('global', 'auth supported', 'cephx') + + # http://tracker.newdream.net/issues/3137 + cfg.set('global', 'osd journal size', '1024') + + # http://tracker.newdream.net/issues/3138 + cfg.set('global', 'filestore xattr use omap', 'true') + + path = '{name}.conf'.format( + name=args.cluster, + ) + + # FIXME: create a random key + LOG.debug('Creating a random mon key...') + mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key() + + keypath = '{name}.mon.keyring'.format( + name=args.cluster, + ) + + LOG.debug('Writing initial config to %s...', path) + if not args.dry_run: + tmp = '%s.tmp' % path + with file(tmp, 'w') as f: + cfg.write(f) + try: + os.rename(tmp, path) + except OSError as e: + if e.errno == errno.EEXIST: + raise exc.ClusterExistsError(path) + else: + raise + + LOG.debug('Writing monitor keyring to %s...', keypath) + if not args.dry_run: + tmp = '%s.tmp' % keypath + with file(tmp, 'w') as f: + f.write(mon_keyring) + try: + os.rename(tmp, keypath) + except OSError as e: + if e.errno == errno.EEXIST: + raise exc.ClusterExistsError(keypath) + else: + raise + + +@priority(10) +def make(parser): + """ + Start deploying a new cluster, and write a CLUSTER.conf and keyring for it. + """ + parser.add_argument( + 'mon', + metavar='MON', + nargs='+', + help='initial monitor hostname, fqdn, or hostname:fqdn pair', + ) + parser.set_defaults( + func=new, + ) diff --git a/ceph_deploy/osd.py b/ceph_deploy/osd.py new file mode 100644 index 0000000..d7ad622 --- /dev/null +++ b/ceph_deploy/osd.py @@ -0,0 +1,558 @@ +import argparse +import logging +import os +import sys + +from cStringIO import StringIO + +from . import conf +from . import exc +from . import lsb +from .cliutil import priority +from .sudo_pushy import get_transport + + +LOG = logging.getLogger(__name__) + + +def get_bootstrap_osd_key(cluster): + """ + Read the bootstrap-osd key for `cluster`. + """ + path = '{cluster}.bootstrap-osd.keyring'.format(cluster=cluster) + try: + with file(path, 'rb') as f: + return f.read() + except IOError: + raise RuntimeError('bootstrap-osd keyring not found; run \'gatherkeys\'') + +def create_osd(cluster, key): + """ + Run on osd node, writes the bootstrap key if not there yet. + + Returns None on success, error message on error exceptions. pushy + mangles exceptions to all be of type ExceptionProxy, so we can't + tell between bug and correctly handled failure, so avoid using + exceptions for non-exceptional runs. + """ + path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring'.format( + cluster=cluster, + ) + if not os.path.exists(path): + tmp = '{path}.{pid}.tmp'.format( + path=path, + pid=os.getpid(), + ) + # file() doesn't let us control access mode from the + # beginning, and thus would have a race where attacker can + # open before we chmod the file, so play games with os.open + fd = os.open( + tmp, + (os.O_WRONLY|os.O_CREAT|os.O_EXCL + |os.O_NOCTTY|os.O_NOFOLLOW), + 0600, + ) + with os.fdopen(fd, 'wb') as f: + f.write(key) + f.flush() + os.fsync(f) + os.rename(tmp, path) + + def subproc_call(*args, **kwargs): + """ + call subproc that might fail, collect returncode and stderr/stdout + to be used in pushy.compile()d functions. Returns 4-tuple of + (process exit code, command, stdout contents, stderr contents) + """ + import subprocess + import tempfile + + otmp = tempfile.TemporaryFile() + etmp = tempfile.TemporaryFile() + cmd = ' '.join(kwargs['args']) + ret = 0 + errtxt = '' + kwargs.update(dict(stdout=otmp, stderr=etmp)) + try: + subprocess.check_call(*args, **kwargs) + except subprocess.CalledProcessError as e: + ret = e.returncode + except Exception as e: + ret = -1 + # OSError has errno + if hasattr(e, 'errno'): + ret = e.errno + errtxt = str(e) + otmp.seek(0) + etmp.seek(0) + return (ret, cmd, otmp.read(), errtxt + etmp.read()) + + # in case disks have been prepared before we do this, activate + # them now. + return subproc_call( + args=[ + 'udevadm', + 'trigger', + '--subsystem-match=block', + '--action=add', + ], + ) + +def prepare_disk(cluster, disk, journal, activate_prepared_disk, zap, dmcrypt, dmcrypt_dir): + """ + Run on osd node, prepares a data disk for use. + """ + args = [ + 'ceph-disk-prepare', + ] + if zap: + args.append('--zap-disk') + if dmcrypt: + args.append('--dmcrypt') + if dmcrypt_dir is not None: + args.append('--dmcrypt-key-dir') + args.append(dmcrypt_dir) + args.extend([ + '--', + disk, + ]) + if journal is not None: + args.append(journal) + + def subproc_call(*args, **kwargs): + """ + call subproc that might fail, collect returncode and stderr/stdout + to be used in pushy.compile()d functions. Returns 4-tuple of + (process exit code, command, stdout contents, stderr contents) + """ + import subprocess + import tempfile + + otmp = tempfile.TemporaryFile() + etmp = tempfile.TemporaryFile() + cmd = ' '.join(kwargs['args']) + ret = 0 + errtxt = '' + kwargs.update(dict(stdout=otmp, stderr=etmp)) + try: + subprocess.check_call(*args, **kwargs) + except subprocess.CalledProcessError as e: + ret = e.returncode + except Exception as e: + ret = -1 + # OSError has errno + if hasattr(e, 'errno'): + ret = e.errno + errtxt = str(e) + otmp.seek(0) + etmp.seek(0) + return (ret, cmd, otmp.read(), errtxt + etmp.read()) + + ret = subproc_call(args=args) + if ret[0]: + return ret + if activate_prepared_disk: + ret = subproc_call( + args=[ + 'udevadm', + 'trigger', + '--subsystem-match=block', + '--action=add', + ], + ) + if ret[0]: + return ret + return (0, '', '', '') + + +def activate_disk(cluster, disk, init): + """ + Run on the osd node, activates a disk. + """ + def subproc_call(*args, **kwargs): + """ + call subproc that might fail, collect returncode and stderr/stdout + to be used in pushy.compile()d functions. Returns 4-tuple of + (process exit code, command, stdout contents, stderr contents) + """ + import subprocess + import tempfile + + otmp = tempfile.TemporaryFile() + etmp = tempfile.TemporaryFile() + cmd = ' '.join(kwargs['args']) + ret = 0 + errtxt = '' + kwargs.update(dict(stdout=otmp, stderr=etmp)) + try: + subprocess.check_call(*args, **kwargs) + except subprocess.CalledProcessError as e: + ret = e.returncode + except Exception as e: + ret = -1 + # OSError has errno + if hasattr(e, 'errno'): + ret = e.errno + errtxt = str(e) + otmp.seek(0) + etmp.seek(0) + return (ret, cmd, otmp.read(), errtxt + etmp.read()) + + return subproc_call( + args=[ + 'ceph-disk-activate', + '--mark-init', + init, + '--mount', + disk, + ]) + +def prepare(args, cfg, activate_prepared_disk): + LOG.debug( + 'Preparing cluster %s disks %s', + args.cluster, + ' '.join(':'.join(x or '' for x in t) for t in args.disk), + ) + + key = get_bootstrap_osd_key(cluster=args.cluster) + + bootstrapped = set() + errors = 0 + for hostname, disk, journal in args.disk: + try: + if disk is None: + raise exc.NeedDiskError(hostname) + # TODO username + sudo = args.pushy(get_transport(hostname)) + + if hostname not in bootstrapped: + bootstrapped.add(hostname) + LOG.debug('Deploying osd to %s', hostname) + + write_conf_r = sudo.compile(conf.write_conf) + conf_data = StringIO() + cfg.write(conf_data) + write_conf_r( + cluster=args.cluster, + conf=conf_data.getvalue(), + overwrite=args.overwrite_conf, + ) + + create_osd_r = sudo.compile(create_osd) + ret, cmd, out, err = create_osd_r( + cluster=args.cluster, + key=key, + ) + if ret: + s = '{cmd} returned {ret}\n{out}\n{err}'.format( + cmd=cmd, ret=ret, out=out, err=err) + LOG.debug('Failed preparing host %s: %s', hostname, s) + raise RuntimeError(s) + else: + LOG.debug('Host %s is now ready for osd use.', hostname) + + LOG.debug('Preparing host %s disk %s journal %s activate %s', + hostname, disk, journal, activate_prepared_disk) + + prepare_disk_r = sudo.compile(prepare_disk) + ret, cmd, out, err = prepare_disk_r( + cluster=args.cluster, + disk=disk, + journal=journal, + activate_prepared_disk=activate_prepared_disk, + zap=args.zap_disk, + dmcrypt=args.dmcrypt, + dmcrypt_dir=args.dmcrypt_key_dir, + ) + sudo.close() + if ret: + s = '{cmd} returned {ret}\n{out}\n{err}'.format( + cmd=cmd, ret=ret, out=out, err=err) + raise RuntimeError(s) + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to create %d OSDs' % errors) + +def activate(args, cfg): + LOG.debug( + 'Activating cluster %s disks %s', + args.cluster, + # join elements of t with ':', t's with ' ' + # allow None in elements of t; print as empty + ' '.join(':'.join((s or '') for s in t) for t in args.disk), + ) + + for hostname, disk, journal in args.disk: + + # TODO username + sudo = args.pushy(get_transport(hostname)) + + LOG.debug('Activating host %s disk %s', hostname, disk) + + (distro, release, codename) = lsb.get_lsb_release(sudo) + init = lsb.choose_init(distro, codename) + LOG.debug('Distro %s codename %s, will use %s', + distro, codename, init) + + activate_disk_r = sudo.compile(activate_disk) + err, cmd, stdout, stderr = activate_disk_r( + cluster=args.cluster, + disk=disk, + init=init, + ) + sudo.close() + if err: + s = '{cmd} returned {ret}\n{out}\n{err}'.format( + cmd=cmd, ret=ret, out=out, err=err) + raise RuntimeError(s) + +# NOTE: this mirrors ceph-disk-prepare --zap-disk DEV +def zap(dev): + import subprocess + + try: + # this kills the crab + # + # sgdisk will wipe out the main copy of the GPT partition + # table (sorry), but it doesn't remove the backup copies, and + # subsequent commands will continue to complain and fail when + # they see those. zeroing the last few blocks of the device + # appears to do the trick. + lba_size = 4096 + size = 33 * lba_size + with file(dev, 'wb') as f: + f.seek(-size, os.SEEK_END) + f.write(size*'\0') + + subprocess.check_call( + args=[ + 'sgdisk', + '--zap-all', + '--clear', + '--mbrtogpt', + '--', + dev, + ], + ) + except subprocess.CalledProcessError as e: + raise RuntimeError(e) + +def disk_zap(args): + cfg = conf.load(args) + + for hostname, disk, journal in args.disk: + LOG.debug('zapping %s on %s', disk, hostname) + + # TODO username + sudo = args.pushy(get_transport(hostname)) + zap_r = sudo.compile(zap) + zap_r(disk) + sudo.close() + + +def list_disk(): + + def subproc_call(*args, **kwargs): + """ + call subproc that might fail, collect returncode and stderr/stdout + to be used in pushy.compile()d functions. Returns 4-tuple of + (process exit code, command, stdout contents, stderr contents) + """ + import subprocess + import tempfile + + otmp = tempfile.TemporaryFile() + etmp = tempfile.TemporaryFile() + cmd = ' '.join(kwargs['args']) + errtxt = '' + ret = 0 + kwargs.update(dict(stdout=otmp, stderr=etmp)) + try: + subprocess.check_call(*args, **kwargs) + except subprocess.CalledProcessError as e: + ret = e.returncode + except Exception as e: + ret = -1 + # OSError has errno + if hasattr(e, 'errno'): + ret = e.errno + errtxt = str(e) + otmp.seek(0) + etmp.seek(0) + return (ret, cmd, otmp.read(), errtxt + etmp.read()) + + ret, cmd, out, err = subproc_call( + args=[ + 'ceph-disk', + 'list', + ], + ) + + return ret, cmd, out, err + +def disk_list(args, cfg): + for hostname, disk, journal in args.disk: + + # TODO username + sudo = args.pushy(get_transport(hostname)) + + LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname)) + + list_disk_r = sudo.compile(list_disk) + ret, cmd, out, err = list_disk_r() + if ret: + LOG.error("disk list failed: %s", err) + else: + print out, + + sudo.close() + +def osd_list(args, cfg): + LOG.error('Not yet implemented; see http://tracker.ceph.com/issues/5071') + sys.exit(1) + +def osd(args): + cfg = conf.load(args) + + if args.subcommand == 'list': + osd_list(args, cfg) + elif args.subcommand == 'prepare': + prepare(args, cfg, activate_prepared_disk=False) + elif args.subcommand == 'create': + prepare(args, cfg, activate_prepared_disk=True) + elif args.subcommand == 'activate': + activate(args, cfg) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + sys.exit(1) + + + +def disk(args): + cfg = conf.load(args) + + if args.subcommand == 'list': + disk_list(args, cfg) + elif args.subcommand == 'prepare': + prepare(args, cfg, activate_prepared_disk=False) + elif args.subcommand == 'activate': + activate(args, cfg) + elif args.subcommand == 'zap': + disk_zap(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + sys.exit(1) + + +def colon_separated(s): + journal = None + disk = None + host = None + if s.count(':') == 2: + (host, disk, journal) = s.split(':') + elif s.count(':') == 1: + (host, disk) = s.split(':') + elif s.count(':') == 0: + (host) = s + else: + raise argparse.ArgumentTypeError('must be in form HOST:DISK[:JOURNAL]') + + if disk: + # allow just "sdb" to mean /dev/sdb + disk = os.path.join('/dev', disk) + if journal is not None: + journal = os.path.join('/dev', journal) + + return (host, disk, journal) + + +@priority(50) +def make(parser): + """ + Prepare a data disk on remote host. + """ + parser.add_argument( + 'subcommand', + metavar='SUBCOMMAND', + choices=[ + 'list', + 'create', + 'prepare', + 'activate', + 'destroy', + ], + help='list, create (prepare+activate), prepare, activate, or destroy', + ) + parser.add_argument( + 'disk', + nargs='+', + metavar='HOST:DISK[:JOURNAL]', + type=colon_separated, + help='host and disk to prepare', + ) + parser.add_argument( + '--zap-disk', + action='store_true', default=None, + help='destroy existing partition table and content for DISK', + ) + parser.add_argument( + '--dmcrypt', + action='store_true', default=None, + help='use dm-crypt on DISK', + ) + parser.add_argument( + '--dmcrypt-key-dir', + metavar='KEYDIR', + default='/etc/ceph/dmcrypt-keys', + help='directory where dm-crypt keys are stored', + ) + parser.set_defaults( + func=osd, + ) + + +@priority(50) +def make_disk(parser): + """ + Manage disks on a remote host. + """ + parser.add_argument( + 'subcommand', + metavar='SUBCOMMAND', + choices=[ + 'list', + 'prepare', + 'activate', + 'zap', + ], + help='list, prepare, activate, zap', + ) + parser.add_argument( + 'disk', + nargs='+', + metavar='HOST[:DISK]', + type=colon_separated, + help='host (and optionally disk)', + ) + parser.add_argument( + '--zap-disk', + action='store_true', default=None, + help='destroy existing partition table and content for DISK', + ) + parser.add_argument( + '--dmcrypt', + action='store_true', default=None, + help='use dm-crypt on DISK', + ) + parser.add_argument( + '--dmcrypt-key-dir', + metavar='KEYDIR', + default='/etc/ceph/dmcrypt-keys', + help='directory where dm-crypt keys are stored', + ) + parser.set_defaults( + func=disk, + ) diff --git a/ceph_deploy/sudo_pushy.py b/ceph_deploy/sudo_pushy.py new file mode 100644 index 0000000..26cfcd3 --- /dev/null +++ b/ceph_deploy/sudo_pushy.py @@ -0,0 +1,50 @@ +import pushy.transport.ssh +import pushy.transport.local +import subprocess + + +class Local_Popen(pushy.transport.local.Popen): + def __init__(self, command, address, **kwargs): + pushy.transport.BaseTransport.__init__(self, address) + + self.__proc = subprocess.Popen(command, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + bufsize=65535) + + self.stdout = self.__proc.stdout + self.stderr = self.__proc.stderr + self.stdin = self.__proc.stdin + + def close(self): + self.stdin.close() + self.__proc.wait() + +class SshSudoTransport(object): + @staticmethod + def Popen(command, *a, **kw): + command = ['sudo'] + command + return pushy.transport.ssh.Popen(command, *a, **kw) + +class LocalSudoTransport(object): + @staticmethod + def Popen(command, *a, **kw): + command = ['sudo'] + command + return Local_Popen(command, *a, **kw) + +def get_transport(hostname): + import socket + + myhostname = socket.gethostname().split('.')[0] + if hostname == myhostname: + return 'local+sudo:' + else: + return 'ssh+sudo:{hostname}'.format(hostname=hostname) + +def patch(): + """ + Monkey patches pushy so it supports running via (passphraseless) + sudo on the remote host. + """ + pushy.transports['ssh+sudo'] = SshSudoTransport + pushy.transports['local+sudo'] = LocalSudoTransport diff --git a/ceph_deploy/test/__init__.py b/ceph_deploy/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ceph_deploy/test/conftest.py b/ceph_deploy/test/conftest.py new file mode 100644 index 0000000..819fc34 --- /dev/null +++ b/ceph_deploy/test/conftest.py @@ -0,0 +1,98 @@ +import logging +import os +import subprocess +import sys + + +LOG = logging.getLogger(__name__) + + +def _prepend_path(env): + """ + Make sure the PATH contains the location where the Python binary + lives. This makes sure cli tools installed in a virtualenv work. + """ + if env is None: + env = os.environ + env = dict(env) + new = os.path.dirname(sys.executable) + path = env.get('PATH') + if path is not None: + new = new + ':' + path + env['PATH'] = new + return env + + +class CLIFailed(Exception): + """CLI tool failed""" + + def __init__(self, args, status): + self.args = args + self.status = status + + def __str__(self): + return '{doc}: {args}: exited with status {status}'.format( + doc=self.__doc__, + args=self.args, + status=self.status, + ) + + +class CLIProcess(object): + def __init__(self, **kw): + self.kw = kw + + def __enter__(self): + try: + self.p = subprocess.Popen(**self.kw) + except OSError as e: + raise AssertionError( + 'CLI tool {args!r} does not work: {err}'.format( + args=self.kw['args'], + err=e, + ), + ) + else: + return self.p + + def __exit__(self, exc_type, exc_val, exc_tb): + self.p.wait() + if self.p.returncode != 0: + err = CLIFailed( + args=self.kw['args'], + status=self.p.returncode, + ) + if exc_type is None: + # nothing else raised, so we should complain; if + # something else failed, we'll just log + raise err + else: + LOG.error(str(err)) + + +class CLITester(object): + # provide easy way for caller to access the exception class + # without importing us + Failed = CLIFailed + + def __init__(self, tmpdir): + self.tmpdir = tmpdir + + def __call__(self, **kw): + kw.setdefault('cwd', str(self.tmpdir)) + kw['env'] = _prepend_path(kw.get('env')) + kw['env']['COLUMNS'] = '80' + return CLIProcess(**kw) + + +def pytest_funcarg__cli(request): + """ + Test command line behavior. + """ + + # the tmpdir here will be the same value as the test function + # sees; we rely on that to let caller prepare and introspect + # any files the cli tool will read or create + tmpdir = request.getfuncargvalue('tmpdir') + + return CLITester(tmpdir=tmpdir) diff --git a/ceph_deploy/test/directory.py b/ceph_deploy/test/directory.py new file mode 100644 index 0000000..81d3e19 --- /dev/null +++ b/ceph_deploy/test/directory.py @@ -0,0 +1,13 @@ +import contextlib +import os + + +@contextlib.contextmanager +def directory(path): + prev = os.open('.', os.O_RDONLY | os.O_DIRECTORY) + try: + os.chdir(path) + yield + finally: + os.fchdir(prev) + os.close(prev) diff --git a/ceph_deploy/test/test_cli.py b/ceph_deploy/test/test_cli.py new file mode 100644 index 0000000..c801ed7 --- /dev/null +++ b/ceph_deploy/test/test_cli.py @@ -0,0 +1,60 @@ +import pytest +import subprocess + + +def test_help(tmpdir, cli): + with cli( + args=['ceph-deploy', '--help'], + stdout=subprocess.PIPE, + ) as p: + got = p.stdout.read() + assert got == """\ +usage: ceph-deploy [-h] [-v] [--cluster NAME] COMMAND ... + +Deploy Ceph + +optional arguments: + -h, --help show this help message and exit + -v, --verbose be more verbose + --cluster NAME name of the cluster + +commands: + COMMAND description + new Start deploying a new cluster, and write a CLUSTER.conf for + it. + install Install Ceph packages on remote hosts. + mon Deploy ceph monitor on remote hosts. + osd Prepare a data disk on remote host. +""" + + +def test_bad_command(tmpdir, cli): + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', 'bork'], + stderr=subprocess.PIPE, + ) as p: + got = p.stderr.read() + assert got == """\ +usage: ceph-deploy [-h] [-v] [--cluster NAME] COMMAND ... +ceph-deploy: error: argument COMMAND: invalid choice: 'bork' (choose from 'new', 'install', 'mon', 'osd') +""" + + assert err.value.status == 2 + assert {p.basename for p in tmpdir.listdir()} == set() + + +def test_bad_cluster(tmpdir, cli): + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', '--cluster=/evil-this-should-not-be-created', 'new'], + stderr=subprocess.PIPE, + ) as p: + got = p.stderr.read() + assert got == """\ +usage: ceph-deploy [-h] [-v] [--cluster NAME] COMMAND ... +ceph-deploy: error: argument --cluster: argument must start with a letter and contain only letters and numbers +""" + + assert err.value.status == 2 + assert {p.basename for p in tmpdir.listdir()} == set() diff --git a/ceph_deploy/test/test_cli_install.py b/ceph_deploy/test/test_cli_install.py new file mode 100644 index 0000000..ec47dc6 --- /dev/null +++ b/ceph_deploy/test/test_cli_install.py @@ -0,0 +1,89 @@ +import argparse +import collections +import mock +import pytest +import subprocess + +from ..cli import main +from .. import install + +from .directory import directory + + +def test_help(tmpdir, cli): + with cli( + args=['ceph-deploy', 'install', '--help'], + stdout=subprocess.PIPE, + ) as p: + got = p.stdout.read() + assert got == """\ +usage: ceph-deploy install [-h] [--stable [CODENAME] | --testing | --dev + [BRANCH_OR_TAG]] + HOST [HOST ...] + +Install Ceph packages on remote hosts. + +positional arguments: + HOST hosts to install on + +optional arguments: + -h, --help show this help message and exit + --stable [CODENAME] install a release known as CODENAME (done by default) + (default: argonaut) + --testing install the latest development release + --dev [BRANCH_OR_TAG] + install a bleeding edge build from Git branch or tag + (default: master) +""" + + +def test_bad_no_host(tmpdir, cli): + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', 'install'], + stderr=subprocess.PIPE, + ) as p: + got = p.stderr.read() + assert got == """\ +usage: ceph-deploy install [-h] [--stable [CODENAME] | --testing | --dev + [BRANCH_OR_TAG]] + HOST [HOST ...] +ceph-deploy install: error: too few arguments +""" + + assert err.value.status == 2 + + +def test_simple(tmpdir): + ns = argparse.Namespace() + ns.pushy = mock.Mock() + conn = mock.NonCallableMock(name='PushyClient') + ns.pushy.return_value = conn + + mock_compiled = collections.defaultdict(mock.Mock) + conn.compile.side_effect = mock_compiled.__getitem__ + + mock_compiled[install.lsb_release].return_value = ('Ubuntu', 'precise') + + try: + with directory(str(tmpdir)): + main( + args=['-v', 'install', 'storehost1'], + namespace=ns, + ) + except SystemExit as e: + raise AssertionError('Unexpected exit: %s', e) + + ns.pushy.assert_has_calls([ + mock.call('ssh+sudo:storehost1'), + ]) + + mock_compiled.pop(install.lsb_release).assert_called_once_with() + + mock_compiled.pop(install.install_ubuntu).assert_called_once_with( + version_kind='stable', + codename='precise', + version='argonaut', + ) + + assert mock_compiled == {} diff --git a/ceph_deploy/test/test_cli_mon.py b/ceph_deploy/test/test_cli_mon.py new file mode 100644 index 0000000..6e90aaa --- /dev/null +++ b/ceph_deploy/test/test_cli_mon.py @@ -0,0 +1,112 @@ +import argparse +import collections +import mock +import pytest +import subprocess + +from ..cli import main +from .. import mon + +from .directory import directory + + +def test_help(tmpdir, cli): + with cli( + args=['ceph-deploy', 'mon', '--help'], + stdout=subprocess.PIPE, + ) as p: + got = p.stdout.read() + assert got == """\ +usage: ceph-deploy mon [-h] [HOST [HOST ...]] + +Deploy ceph monitor on remote hosts. + +positional arguments: + HOST host to deploy on + +optional arguments: + -h, --help show this help message and exit +""" + + +def test_bad_no_conf(tmpdir, cli): + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', 'mon'], + stderr=subprocess.PIPE, + ) as p: + got = p.stderr.read() + assert got == """\ +ceph-deploy: Cannot load config: [Errno 2] No such file or directory: 'ceph.conf' +""" + + assert err.value.status == 1 + + +def test_bad_no_mon(tmpdir, cli): + with tmpdir.join('ceph.conf').open('w'): + pass + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', 'mon'], + stderr=subprocess.PIPE, + ) as p: + got = p.stderr.read() + assert got == """\ +ceph-deploy: No hosts specified to deploy to. +""" + + assert err.value.status == 1 + + +def test_simple(tmpdir): + with tmpdir.join('ceph.conf').open('w') as f: + f.write("""\ +[global] +fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0 +mon initial members = host1 +""") + + ns = argparse.Namespace() + ns.pushy = mock.Mock() + conn = mock.NonCallableMock(name='PushyClient') + ns.pushy.return_value = conn + + mock_compiled = collections.defaultdict(mock.Mock) + conn.compile.side_effect = mock_compiled.__getitem__ + + MON_SECRET = 'AQBWDj5QAP6LHhAAskVBnUkYHJ7eYREmKo5qKA==' + + def _create_mon(cluster, get_monitor_secret): + secret = get_monitor_secret() + assert secret == MON_SECRET + + mock_compiled[mon.create_mon].side_effect = _create_mon + + try: + with directory(str(tmpdir)): + main( + args=['-v', 'mon'], + namespace=ns, + ) + except SystemExit as e: + raise AssertionError('Unexpected exit: %s', e) + + ns.pushy.assert_called_once_with('ssh+sudo:host1') + + mock_compiled.pop(mon.write_conf).assert_called_once_with( + cluster='ceph', + conf="""\ +[global] +fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0 +mon_initial_members = host1 + +""", + ) + + mock_compiled.pop(mon.create_mon).assert_called_once_with( + cluster='ceph', + get_monitor_secret=mock.ANY, + ) + + assert mock_compiled == {} diff --git a/ceph_deploy/test/test_cli_new.py b/ceph_deploy/test/test_cli_new.py new file mode 100644 index 0000000..cb2aba6 --- /dev/null +++ b/ceph_deploy/test/test_cli_new.py @@ -0,0 +1,110 @@ +import pytest +import re +import subprocess +import uuid + +from .. import conf + + +def test_help(tmpdir, cli): + with cli( + args=['ceph-deploy', 'new', '--help'], + stdout=subprocess.PIPE, + ) as p: + got = p.stdout.read() + assert got == """\ +usage: ceph-deploy new [-h] [MON [MON ...]] + +Start deploying a new cluster, and write a CLUSTER.conf for it. + +positional arguments: + MON initial monitor hosts + +optional arguments: + -h, --help show this help message and exit +""" + + +def test_simple(tmpdir, cli): + with cli( + args=['ceph-deploy', 'new'], + ): + pass + assert {p.basename for p in tmpdir.listdir()} == {'ceph.conf'} + with tmpdir.join('ceph.conf').open() as f: + cfg = conf.parse(f) + assert cfg.sections() == ['global'] + + +def test_named(tmpdir, cli): + with cli( + args=['ceph-deploy', '--cluster=foo', 'new'], + ): + pass + assert {p.basename for p in tmpdir.listdir()} == {'foo.conf'} + + +def test_exists(tmpdir, cli): + with cli( + args=['ceph-deploy', 'new'], + ): + pass + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', 'new'], + stderr=subprocess.PIPE, + ) as p: + got = p.stderr.read() + assert got == """\ +ceph-deploy: Cluster config exists already: ceph.conf +""" + + assert err.value.status == 1 + # no temp files left around + assert {p.basename for p in tmpdir.listdir()} == {'ceph.conf'} + + +def pytest_funcarg__newcfg(request): + tmpdir = request.getfuncargvalue('tmpdir') + cli = request.getfuncargvalue('cli') + + def new(*args): + with cli( + args=['ceph-deploy', 'new'] + list(args), + ): + pass + with tmpdir.join('ceph.conf').open() as f: + cfg = conf.parse(f) + return cfg + return new + + +def test_uuid(newcfg): + cfg = newcfg() + fsid = cfg.get('global', 'fsid') + # make sure it's a valid uuid + uuid.UUID(hex=fsid) + # make sure it looks pretty, too + UUID_RE = re.compile( + r'^[0-9a-f]{8}-' + + r'[0-9a-f]{4}-' + # constant 4 here, we want to enforce randomness and not leak + # MACs or time + + r'4[0-9a-f]{3}-' + + r'[0-9a-f]{4}-' + + r'[0-9a-f]{12}$', + ) + assert UUID_RE.match(fsid) + + +def test_mons(newcfg): + cfg = newcfg('node01', 'node07', 'node34') + mon_initial_members = cfg.get('global', 'mon_initial_members') + assert mon_initial_members == 'node01, node07, node34' + + +def test_defaults(newcfg): + cfg = newcfg() + assert cfg.get('global', 'auth_supported') == 'cephx' + assert cfg.get('global', 'osd_journal_size') == '1024' + assert cfg.get('global', 'filestore_xattr_use_omap') == 'true' diff --git a/ceph_deploy/test/test_cli_osd.py b/ceph_deploy/test/test_cli_osd.py new file mode 100644 index 0000000..f39981a --- /dev/null +++ b/ceph_deploy/test/test_cli_osd.py @@ -0,0 +1,137 @@ +import argparse +import collections +import mock +import pytest +import subprocess + +from ..cli import main +from .. import osd + +from .directory import directory + + +def test_help(tmpdir, cli): + with cli( + args=['ceph-deploy', 'osd', '--help'], + stdout=subprocess.PIPE, + ) as p: + got = p.stdout.read() + assert got == """\ +usage: ceph-deploy osd [-h] HOST:DISK [HOST:DISK ...] + +Prepare a data disk on remote host. + +positional arguments: + HOST:DISK host and disk to prepare + +optional arguments: + -h, --help show this help message and exit +""" + + +def test_bad_no_conf(tmpdir, cli): + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', 'osd', 'fakehost:/does-not-exist'], + stderr=subprocess.PIPE, + ) as p: + got = p.stderr.read() + assert got == """\ +ceph-deploy: Cannot load config: [Errno 2] No such file or directory: 'ceph.conf' +""" + + assert err.value.status == 1 + + +def test_bad_no_disk(tmpdir, cli): + with tmpdir.join('ceph.conf').open('w'): + pass + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', 'osd'], + stderr=subprocess.PIPE, + ) as p: + got = p.stderr.read() + assert got == """\ +usage: ceph-deploy osd [-h] HOST:DISK [HOST:DISK ...] +ceph-deploy osd: error: too few arguments +""" + + assert err.value.status == 2 + + +def test_simple(tmpdir): + with tmpdir.join('ceph.conf').open('w') as f: + f.write("""\ +[global] +fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0 +mon host = host1 +""") + + ns = argparse.Namespace() + + conn_osd = mock.NonCallableMock(name='PushyClient') + mock_compiled_osd = collections.defaultdict(mock.Mock) + conn_osd.compile.side_effect = mock_compiled_osd.__getitem__ + + conn_mon = mock.NonCallableMock(name='PushyClient') + mock_compiled_mon = collections.defaultdict(mock.Mock) + conn_mon.compile.side_effect = mock_compiled_mon.__getitem__ + + ns.pushy = mock.Mock() + + def _conn(url): + if url == 'ssh+sudo:host1': + return conn_mon + elif url == 'ssh+sudo:storehost1': + return conn_osd + else: + raise AssertionError('Unexpected connection url: %r', url) + ns.pushy.side_effect = _conn + + BOOTSTRAP_KEY = 'fakekeyring' + + mock_compiled_mon[osd.get_bootstrap_osd_key].return_value = BOOTSTRAP_KEY + + def _create_osd(cluster, find_key): + key = find_key() + assert key == BOOTSTRAP_KEY + + mock_compiled_osd[osd.create_osd].side_effect = _create_osd + + try: + with directory(str(tmpdir)): + main( + args=['-v', 'osd', 'storehost1:sdc'], + namespace=ns, + ) + except SystemExit as e: + raise AssertionError('Unexpected exit: %s', e) + + mock_compiled_mon.pop(osd.get_bootstrap_osd_key).assert_called_once_with( + cluster='ceph', + ) + + assert mock_compiled_mon == {} + + mock_compiled_osd.pop(osd.write_conf).assert_called_once_with( + cluster='ceph', + conf="""\ +[global] +fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0 +mon_host = host1 + +""", + ) + + mock_compiled_osd.pop(osd.create_osd).assert_called_once_with( + cluster='ceph', + find_key=mock.ANY, + ) + + mock_compiled_osd.pop(osd.prepare_disk).assert_called_once_with( + cluster='ceph', + disk='/dev/sdc', + ) + + assert mock_compiled_osd == {} diff --git a/ceph_deploy/test/test_conf.py b/ceph_deploy/test/test_conf.py new file mode 100644 index 0000000..faa3688 --- /dev/null +++ b/ceph_deploy/test/test_conf.py @@ -0,0 +1,59 @@ +from cStringIO import StringIO +from .. import conf + + +def test_simple(): + f = StringIO("""\ +[foo] +bar = baz +""") + cfg = conf.parse(f) + assert cfg.get('foo', 'bar') == 'baz' + + +def test_indent_space(): + f = StringIO("""\ +[foo] + bar = baz +""") + cfg = conf.parse(f) + assert cfg.get('foo', 'bar') == 'baz' + + +def test_indent_tab(): + f = StringIO("""\ +[foo] +\tbar = baz +""") + cfg = conf.parse(f) + assert cfg.get('foo', 'bar') == 'baz' + + +def test_words_underscore(): + f = StringIO("""\ +[foo] +bar_thud = baz +""") + cfg = conf.parse(f) + assert cfg.get('foo', 'bar_thud') == 'baz' + assert cfg.get('foo', 'bar thud') == 'baz' + + +def test_words_space(): + f = StringIO("""\ +[foo] +bar thud = baz +""") + cfg = conf.parse(f) + assert cfg.get('foo', 'bar_thud') == 'baz' + assert cfg.get('foo', 'bar thud') == 'baz' + + +def test_words_many(): + f = StringIO("""\ +[foo] +bar__ thud quux = baz +""") + cfg = conf.parse(f) + assert cfg.get('foo', 'bar_thud_quux') == 'baz' + assert cfg.get('foo', 'bar thud quux') == 'baz' diff --git a/ceph_deploy/validate.py b/ceph_deploy/validate.py new file mode 100644 index 0000000..8ef5e73 --- /dev/null +++ b/ceph_deploy/validate.py @@ -0,0 +1,16 @@ +import argparse +import re + + +ALPHANUMERIC_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*$') + + +def alphanumeric(s): + """ + Enforces string to be alphanumeric with leading alpha. + """ + if not ALPHANUMERIC_RE.match(s): + raise argparse.ArgumentTypeError( + 'argument must start with a letter and contain only letters and numbers', + ) + return s diff --git a/debian/ceph-deploy.install b/debian/ceph-deploy.install new file mode 100644 index 0000000..cec4ab6 --- /dev/null +++ b/debian/ceph-deploy.install @@ -0,0 +1 @@ +./scripts/ceph-deploy /usr/bin diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 0000000..f79f9f8 --- /dev/null +++ b/debian/changelog @@ -0,0 +1,11 @@ +ceph-deploy (1.0-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Fri, 24 May 2013 11:57:40 +0800 + +ceph-deploy (0.0.1-1) unstable; urgency=low + + * Initial release. + + -- Gary Lowell Mon, 10 Mar 2013 18:38:40 +0800 diff --git a/debian/compat b/debian/compat new file mode 100644 index 0000000..7f8f011 --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +7 diff --git a/debian/control b/debian/control new file mode 100644 index 0000000..826de39 --- /dev/null +++ b/debian/control @@ -0,0 +1,26 @@ +Source: ceph-deploy +Maintainer: Sage Weil +Uploaders: Sage Weil +Section: admin +Priority: optional +Build-Depends: debhelper (>= 7), python-setuptools +X-Python-Version: >= 2.4 +Standards-Version: 3.9.2 +Homepage: http://ceph.com/ +Vcs-Git: git://github.com/ceph/ceph-deploy.git +Vcs-Browser: https://github.com/ceph/ceph-deploy + +Package: ceph-deploy +Architecture: all +Depends: python, + python-argparse, + python-pushy, + python-setuptools, + ${misc:Depends}, + ${python:Depends} +Description: Ceph-deploy is an easy to use configuration tool + for the Ceph distributed storage system. + . + This package includes the programs and libraries to support + simple ceph cluster deployment. + diff --git a/debian/copyright b/debian/copyright new file mode 100644 index 0000000..93bc530 --- /dev/null +++ b/debian/copyright @@ -0,0 +1,3 @@ +Files: * +Copyright: (c) 2004-2012 by Sage Weil +License: LGPL2.1 (see /usr/share/common-licenses/LGPL-2.1) diff --git a/debian/rules b/debian/rules new file mode 100755 index 0000000..5151bb8 --- /dev/null +++ b/debian/rules @@ -0,0 +1,9 @@ +#!/usr/bin/make -f + +# Uncomment this to turn on verbose mode. +export DH_VERBOSE=1 +export DEB_PYTHON_INSTALL_ARGS_ALL += --install-lib=/usr/share/ceph-deploy + +%: + dh $@ + diff --git a/debian/source/format b/debian/source/format new file mode 100644 index 0000000..d3827e7 --- /dev/null +++ b/debian/source/format @@ -0,0 +1 @@ +1.0 diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..dbc0d19 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,3 @@ +pytest >=2.1.3 +tox >=1.2 +mock >=1.0b1 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..41b5dc8 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +pushy >=0.5.1 diff --git a/scripts/build-debian.sh b/scripts/build-debian.sh new file mode 100755 index 0000000..19446a4 --- /dev/null +++ b/scripts/build-debian.sh @@ -0,0 +1,66 @@ +#! /bin/sh + +# Tag tree and update version number in change log and +# in setup.py before building. + +REPO=debian-repo +COMPONENT=main +KEYID=03C3951A # Autobuild keyid +DEB_DIST="sid wheezy squeeze quantal precise oneiric natty raring" +DEB_BUILD=$(lsb_release -s -c) + +if [ ! -d debian ] ; then + echo "Are we in the right directory" + exit 1 +fi + +if gpg --list-keys 2>/dev/null | grep -q ${KEYID} ; then + echo "Signing packages and repo with ${KEYID}" +else + echo "Package signing key (${KEYID}) not found" + echo "Have you set \$GNUPGHOME ? " + exit 3 +fi + +# Build Package +echo "Building for dist: $DEB_BUILD" +dpkg-buildpackage -k$KEYID +if [ $? -ne 0 ] ; then + echo "Build failed" + exit 2 +fi + +# Build Repo +PKG=../ceph-deploy*.changes +mkdir -p $REPO/conf +if [ -e $REPO/conf/distributions ] ; then + rm -f $REPO/conf/distributions +fi + +for DIST in $DEB_DIST ; do + cat <> $REPO/conf/distributions +Codename: $DIST +Suite: stable +Components: $COMPONENT +Architectures: amd64 armhf i386 source +Origin: Inktank +Description: Ceph distributed file system +DebIndices: Packages Release . .gz .bz2 +DscIndices: Sources Release .gz .bz2 +Contents: .gz .bz2 +SignWith: $KEYID + +EOF +done + +echo "Adding package to repo, dist: $DEB_BUILD" +reprepro --ask-passphrase -b $REPO -C $COMPONENT --ignore=undefinedtarget --ignore=wrongdistribution include $DEB_BUILD $PKG + +for DIST in $DEB_DIST +do + [ "$DIST" = "$DEB_BUILD" ] && continue + echo "Copying package to dist: $DIST" + reprepro -b $REPO --ignore=undefinedtarget --ignore=wrongdistribution copy $DIST $DEB_BUILD ceph-deploy +done + +echo "Done" diff --git a/scripts/build-rpm.sh b/scripts/build-rpm.sh new file mode 100755 index 0000000..a22f3df --- /dev/null +++ b/scripts/build-rpm.sh @@ -0,0 +1,59 @@ +#! /bin/sh + +# Tag tree and update version number in change log and +# in setup.py before building. + +REPO=rpm-repo +KEYID=03C3951A # Autobuild keyid +BUILDAREA=./rpmbuild +DIST=el6 +RPM_BUILD=$(lsb_release -s -c) + +if [ ! -e setup.py ] ; then + echo "Are we in the right directory" + exit 1 +fi + +if gpg --list-keys 2>/dev/null | grep -q ${KEYID} ; then + echo "Signing packages and repo with ${KEYID}" +else + echo "Package signing key (${KEYID}) not found" + echo "Have you set \$GNUPGHOME ? " + exit 3 +fi + +if ! CREATEREPO=`which createrepo` ; then + echo "Please install the createrepo package" + exit 4 +fi + +# Create Tarball +python setup.py sdist --formats=bztar + +# Build RPM +mkdir -p rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} +BUILDAREA=`readlink -fn ${BUILDAREA}` ### rpm wants absolute path +cp ceph-deploy.spec ${BUILDAREA}/SPECS +cp dist/*.tar.bz2 ${BUILDAREA}/SOURCES +echo "buildarea is: ${BUILDAREA}" +rpmbuild -ba --define "_topdir ${BUILDAREA}" --define "_unpackaged_files_terminate_build 0" ${BUILDAREA}/SPECS/ceph-deploy.spec + +# create repo +DEST=${REPO}/${DIST} +mkdir -p ${REPO}/${DIST} +cp -r ${BUILDAREA}/*RPMS ${DEST} + +# Sign all the RPMs for this release +rpm_list=`find ${REPO} -name "*.rpm" -print` +rpm --addsign --define "_gpg_name ${KEYID}" $rpm_list + +# Construct repodata +for dir in ${DEST}/* +do + if [ -d $dir ] ; then + createrepo $dir + gpg --detach-sign --armor -u ${KEYID} $dir/repodata/repomd.xml + fi +done + +exit 0 diff --git a/scripts/ceph-deploy b/scripts/ceph-deploy new file mode 100755 index 0000000..828b1d8 --- /dev/null +++ b/scripts/ceph-deploy @@ -0,0 +1,21 @@ +#!/usr/bin/env python +import os +import platform +import sys +""" +ceph-deploy - admin tool for ceph +""" + +if os.path.exists('/usr/share/pyshared/ceph_deploy'): + sys.path.insert(0,'/usr/share/pyshared/ceph_deploy') +elif os.path.exists('/usr/share/ceph-deploy'): + sys.path.insert(0,'/usr/share/ceph-deploy') +elif os.path.exists('/usr/share/pyshared/ceph-deploy'): + sys.path.insert(0,'/usr/share/pyshared/ceph-deploy') +elif os.path.exists('/usr/lib/python2.6/site-packages/ceph_deploy'): + sys.path.insert(0,'/usr/lib/python2.6/site-packages/ceph_deploy') + +from ceph_deploy.cli import main + +if __name__ == '__main__': + main() diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..d9ec107 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[pytest] +norecursedirs = .* _* virtualenv diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..13fd526 --- /dev/null +++ b/setup.py @@ -0,0 +1,63 @@ +#!/usr/bin/python +from setuptools import setup, find_packages +import os +import sys + + +def read(fname): + path = os.path.join(os.path.dirname(__file__), fname) + f = open(path) + return f.read() + +install_requires = [] +pyversion = sys.version_info[:2] +if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1): + install_requires.append('argparse') + +setup( + name='ceph-deploy', + version='0.1', + packages=find_packages(), + + author='Tommi Virtanen', + author_email='tommi.virtanen@inktank.com', + description='Deploy Ceph with minimal infrastructure', + long_description=read('README.rst'), + license='MIT', + keywords='ceph deploy', + url="https://github.com/ceph/ceph-deploy", + + install_requires=[ + 'setuptools', + 'pushy >=0.5.1', + ] + install_requires, + + tests_require=[ + 'pytest >=2.1.3', + 'mock >=1.0b1', + ], + + entry_points={ + + 'console_scripts': [ + 'ceph-deploy = ceph_deploy.cli:main', + ], + + 'ceph_deploy.cli': [ + 'new = ceph_deploy.new:make', + 'install = ceph_deploy.install:make', + 'uninstall = ceph_deploy.install:make_uninstall', + 'purge = ceph_deploy.install:make_purge', + 'purgedata = ceph_deploy.install:make_purge_data', + 'mon = ceph_deploy.mon:make', + 'gatherkeys = ceph_deploy.gatherkeys:make', + 'osd = ceph_deploy.osd:make', + 'disk = ceph_deploy.osd:make_disk', + 'mds = ceph_deploy.mds:make', + 'forgetkeys = ceph_deploy.forgetkeys:make', + 'config = ceph_deploy.config:make', + 'admin = ceph_deploy.admin:make', + ], + + }, + ) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..c65b7af --- /dev/null +++ b/tox.ini @@ -0,0 +1,8 @@ +[tox] +envlist = py27 + +[testenv] +deps= + pytest + mock +commands=py.test {posargs:ceph_deploy} -- 2.47.3