From: Alfredo Deza Date: Thu, 14 Nov 2013 21:48:04 +0000 (-0500) Subject: allow environment variables too X-Git-Tag: v1.3.3~14^2~9 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=7daf549f10162c2222a22ab97778f09e4621569e;p=ceph-deploy.git allow environment variables too Signed-off-by: Alfredo Deza --- 7daf549f10162c2222a22ab97778f09e4621569e diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9caa3b0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,19 @@ +*~ +.#* +## the next line needs to start with a backslash to avoid looking like +## a comment +\#*# +.*.swp + +*.pyc +*.pyo +*.egg-info +/build +/dist +build + +/virtualenv +/.tox + +/ceph-deploy +/*.conf diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..26624cf --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012 Inktank Storage, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..22710c1 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +include *.rst +include LICENSE +include scripts/ceph-deploy +include vendor.py +prune ceph_deploy/test diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..201a0fe --- /dev/null +++ b/README.rst @@ -0,0 +1,338 @@ +======================================================== + ceph-deploy -- Deploy Ceph with minimal infrastructure +======================================================== + +``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to +the servers, ``sudo``, and some Python. It runs fully on your +workstation, requiring no servers, databases, or anything like that. + +If you set up and tear down Ceph clusters a lot, and want minimal +extra bureaucracy, this is for you. + +.. _what this tool is not: + +What this tool is not +--------------------- +It is not a generic deployment system, it is only for Ceph, and is designed +for users who want to quickly get Ceph running with sensible initial settings +without the overhead of installing Chef, Puppet or Juju. + +It does not handle client configuration beyond pushing the Ceph config file +and users who want fine-control over security settings, partitions or directory +locations should use a tool such as Chef or Puppet. + + +Installation +============ +Depending on what type of usage you are going to have with ``ceph-deploy`` you +might want to look into the different ways to install it. For automation, you +might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would +probably install from the OS packages or from the Python Package Index. + +Python Package Index +-------------------- +If you are familiar with Python install tools (like ``pip`` and +``easy_install``) you can easily install ``ceph-deploy`` like:: + + pip install ceph-deploy + +or:: + + easy_install ceph-deploy + + +It should grab all the dependencies for you and install into the current user's +environment. + +We highly recommend using ``virtualenv`` and installing dependencies in +a contained way. + + +DEB +--- +The DEB repo can be found at http://ceph.com/packages/ceph-extras/debian/ + +But they can also be found for ``ceph`` releases in the ``ceph`` repos like:: + + ceph.com/debian-{release} + ceph.com/debian-testing + +RPM +--- +The RPM repos can be found at http://ceph.com/packages/ceph-extras/rpm/ + +Make sure you add the proper one for your distribution. + +But they can also be found for ``ceph`` releases in the ``ceph`` repos like:: + + ceph.com/rpm-{release} + ceph.com/rpm-testing + + +bootstraping +------------ +To get the source tree ready for use, run this once:: + + ./bootstrap + +You can symlink the ``ceph-deploy`` script in this somewhere +convenient (like ``~/bin``), or add the current directory to ``PATH``, +or just always type the full path to ``ceph-deploy``. + + +SSH and Remote Connections +========================== +``ceph-deploy`` will attempt to connect via SSH to hosts when the hostnames do +not match the current host's hostname. For example, if you are connecting to +host ``node1`` it will attempt an SSH connection as long as the current host's +hostname is *not* ``node1``. + +ceph-deploy at a minimum requires that the machine from which the script is +being run can ssh as root without password into each Ceph node. + +To enable this generate a new ssh keypair for the root user with no passphrase +and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:: + + /root/.ssh/authorized_keys + +and ensure that the following lines are in the sshd config:: + + PermitRootLogin yes + PermitEmptyPasswords yes + +The machine running ceph-deploy does not need to have the Ceph packages +installed unless it needs to admin the cluster directly using the ``ceph`` +command line tool. + + +usernames +--------- +When not specified the connection will be done with the same username as the +one executing ``ceph-deploy``. This is useful if the same username is shared in +all the nodes but can be cumbersome if that is not the case. + +A way to avoid this is to define the correct usernames to connect with in the +SSH config, but you can also use the ``--username`` flag as well:: + + ceph-deploy --username ceph install node1 + +``ceph-deploy`` then in turn would use ``ceph@node1`` to connect to that host. + +This would be the same expectation for any action that warrants a connection to +a remote host. + + +Managing an existing cluster +============================ + +You can use ceph-deploy to provision nodes for an existing cluster. +To grab a copy of the cluster configuration file (normally +``ceph.conf``):: + + ceph-deploy config pull HOST + +You will usually also want to gather the encryption keys used for that +cluster:: + + ceph-deploy gatherkeys MONHOST + +At this point you can skip the steps below that create a new cluster +(you already have one) and optionally skip instalation and/or monitor +creation, depending on what you are trying to accomplish. + + +Creating a new cluster +====================== + +Creating a new configuration +---------------------------- + +To create a new configuration file and secret key, decide what hosts +will run ``ceph-mon``, and run:: + + ceph-deploy new MON [MON..] + +listing the hostnames of the monitors. Each ``MON`` can be + + * a simple hostname. It must be DNS resolvable without the fully + qualified domain name. + * a fully qualified domain name. The hostname is assumed to be the + leading component up to the first ``.``. + * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified + domain name or IP address. For example, ``foo``, + ``foo.example.com``, ``foo:something.example.com``, and + ``foo:1.2.3.4`` are all valid. Note, however, that the hostname + should match that configured on the host ``foo``. + +The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your +current directory. + + +Edit initial cluster configuration +---------------------------------- + +You want to review the generated ``ceph.conf`` file and make sure that +the ``mon_host`` setting contains the IP addresses you would like the +monitors to bind to. These are the IPs that clients will initially +contact to authenticate to the cluster, and they need to be reachable +both by external client-facing hosts and internal cluster daemons. + +Installing packages +=================== + +To install the Ceph software on the servers, run:: + + ceph-deploy install HOST [HOST..] + +This installs the current default *stable* release. You can choose a +different release track with command line options, for example to use +a release candidate:: + + ceph-deploy install --testing HOST + +Or to test a development branch:: + + ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] + + +Proxy or Firewall Installs +-------------------------- +If attempting to install behind a firewall or through a proxy you can +use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes +to the distro's repository in order to install the packages and it will go +straight to package installation. + +That will allow an environment without internet access to point to *its own +repositories*. This means that those repositories will need to be properly +setup (and mirrored with all the necessary dependencies) before attempting an +install. + +Another alternative is to set the `wget` env variables to point to the right +hosts, for example:: + + http_proxy=http://host:port + ftp_proxy=http://host:port + https_proxy=http://host:port + + + +Deploying monitors +================== + +To actually deploy ``ceph-mon`` to the hosts you chose, run:: + + ceph-deploy mon create HOST [HOST..] + +Without explicit hosts listed, hosts in ``mon_initial_members`` in the +config file are deployed. That is, the hosts you passed to +``ceph-deploy new`` are the default value here. + +Gather keys +=========== + +To gather authenticate keys (for administering the cluster and +bootstrapping new nodes) to the local directory, run:: + + ceph-deploy gatherkeys HOST [HOST...] + +where ``HOST`` is one of the monitor hosts. + +Once these keys are in the local directory, you can provision new OSDs etc. + + +Deploying OSDs +============== + +To prepare a node for running OSDs, run:: + + ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...] + +After that, the hosts will be running OSDs for the given data disks. +If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be +created and GPT labels will be used to mark and automatically activate +OSD volumes. If an existing partition is specified, the partition +table will not be modified. If you want to destroy the existing +partition table on DISK first, you can include the ``--zap-disk`` +option. + +If there is already a prepared disk or directory that is ready to become an +OSD, you can also do:: + + ceph-deploy osd activate HOST:DIR[:JOURNAL] [...] + +This is useful when you are managing the mounting of volumes yourself. + + +Admin hosts +=========== + +To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` +keyring so that it can administer the cluster, run:: + + ceph-deploy admin HOST [HOST ...] + +Forget keys +=========== + +The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in +the local directory. If you are worried about them being there for security +reasons, run:: + + ceph-deploy forgetkeys + +and they will be removed. If you need them again later to deploy additional +nodes, simply re-run:: + + ceph-deploy gatherkeys HOST [HOST...] + +and they will be retrieved from an existing monitor node. + +Multiple clusters +================= + +All of the above commands take a ``--cluster=NAME`` option, allowing +you to manage multiple clusters conveniently from one workstation. +For example:: + + ceph-deploy --cluster=us-west new + vi us-west.conf + ceph-deploy --cluster=us-west mon + +FAQ +=== + +Before anything +--------------- +Make sure you have the latest version of ``ceph-deploy``. It is actively +developed and releases are coming weekly (on average). The most recent versions +of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check +with your package manager and update if there is anything new. + +Why is feature X not implemented? +--------------------------------- +Usually, features are added when/if it is sensible for someone that wants to +get started with ceph and said feature would make sense in that context. If +you believe this is the case and you've read "`what this tool is not`_" and +still think feature ``X`` should exist in ceph-deploy, open a feature request +in the ceph tracker: http://tracker.ceph.com/projects/devops/issues + +A command gave me an error, what is going on? +--------------------------------------------- +Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host +that you have configured when creating the initial config. If a given command +is not working as expected try to run the command that failed in the remote +host and assert the behavior there. + +If the behavior in the remote host is the same, then it is probably not +something wrong with ``ceph-deploy`` per-se. Make sure you capture the output +of both the ``ceph-deploy`` output and the output of the command in the remote +host. + +Issues with monitors +-------------------- +If your monitors are not starting, make sure that the ``{hostname}`` you used +when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s`` +in the remote host. + +Newer versions of ``ceph-deploy`` should warn you if the results are different +but that might prevent the monitors from reaching quorum. diff --git a/bootstrap b/bootstrap new file mode 100755 index 0000000..55def90 --- /dev/null +++ b/bootstrap @@ -0,0 +1,58 @@ +#!/bin/sh +set -e + +if command -v lsb_release >/dev/null 2>&1; then + case "$(lsb_release --id --short)" in + Ubuntu|Debian) + for package in python-virtualenv; do + if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then + # add a space after old values + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages, please install them:" 1>&2 + echo " sudo apt-get install $missing" + exit 1 + fi + ;; + esac + + case "$(lsb_release --id --short | awk '{print $1}')" in + openSUSE|SUSE) + for package in python-virtualenv; do + if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages, please install them:" 1>&2 + echo " sudo zypper install $missing" + exit 1 + fi + ;; + esac + +else + if [ -f /etc/redhat-release ]; then + case "$(cat /etc/redhat-release | awk '{print $1}')" in + CentOS) + for package in python-virtualenv; do + if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages, please install them:" 1>&2 + echo " sudo yum install $missing" + exit 1 + fi + ;; + esac + fi +fi + +test -d virtualenv || virtualenv virtualenv +./virtualenv/bin/python setup.py develop +./virtualenv/bin/pip install -r requirements.txt -r requirements-dev.txt +test -e ceph-deploy || ln -s virtualenv/bin/ceph-deploy . diff --git a/ceph-deploy.spec b/ceph-deploy.spec new file mode 100644 index 0000000..26f74b2 --- /dev/null +++ b/ceph-deploy.spec @@ -0,0 +1,79 @@ +# +# spec file for package ceph-deploy +# + +%if ! (0%{?fedora} > 12 || 0%{?rhel} > 5) +%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} +%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} +%endif + +################################################################################# +# common +################################################################################# +Name: ceph-deploy +Version: 1.3.2 +Release: 0 +Summary: Admin and deploy tool for Ceph +License: MIT +Group: System/Filesystems +URL: http://ceph.com/ +Source0: %{name}-%{version}.tar.bz2 +BuildRoot: %{_tmppath}/%{name}-%{version}-build +BuildRequires: python-devel +BuildRequires: python-distribute +BuildRequires: python-setuptools +BuildRequires: python-virtualenv +BuildRequires: python-mock +BuildRequires: python-tox +%if 0%{?suse_version} +BuildRequires: python-pytest +%else +BuildRequires: pytest +%endif +BuildRequires: git +Requires: python-argparse +Requires: python-distribute +#Requires: lsb-release +#Requires: ceph +%if 0%{?suse_version} && 0%{?suse_version} <= 1110 +%{!?python_sitelib: %global python_sitelib %(python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} +%else +BuildArch: noarch +%endif + +################################################################################# +# specific +################################################################################# +%if 0%{defined suse_version} +%py_requires +%endif + +%if 0%{?rhel} +BuildRequires: python >= %{pyver} +Requires: python >= %{pyver} +%endif + +%description +An easy to use admin tool for deploy ceph storage clusters. + +%prep +#%setup -q -n %{name} +%setup -q + +%build +#python setup.py build + +%install +python setup.py install --prefix=%{_prefix} --root=%{buildroot} +install -m 0755 -D scripts/ceph-deploy $RPM_BUILD_ROOT/usr/bin + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" + +%files +%defattr(-,root,root) +%doc LICENSE README.rst +%{_bindir}/ceph-deploy +%{python_sitelib}/* + +%changelog diff --git a/ceph_deploy/__init__.py b/ceph_deploy/__init__.py new file mode 100644 index 0000000..1939a71 --- /dev/null +++ b/ceph_deploy/__init__.py @@ -0,0 +1,3 @@ + +__version__ = '1.3.2' + diff --git a/ceph_deploy/admin.py b/ceph_deploy/admin.py new file mode 100644 index 0000000..bc9a6fc --- /dev/null +++ b/ceph_deploy/admin.py @@ -0,0 +1,66 @@ +import logging + +from cStringIO import StringIO + +from . import exc +from . import conf +from .cliutil import priority +from . import hosts + +LOG = logging.getLogger(__name__) + + +def admin(args): + cfg = conf.load(args) + conf_data = StringIO() + cfg.write(conf_data) + + try: + with file('%s.client.admin.keyring' % args.cluster, 'rb') as f: + keyring = f.read() + except: + raise RuntimeError('%s.client.admin.keyring not found' % + args.cluster) + + errors = 0 + for hostname in args.client: + LOG.debug('Pushing admin keys and conf to %s', hostname) + try: + distro = hosts.get(hostname, username=args.username) + hostname = distro.conn.remote_module.shortname() + + distro.conn.remote_module.write_conf( + args.cluster, + conf_data.getvalue(), + args.overwrite_conf, + ) + + distro.conn.remote_module.write_file( + '/etc/ceph/%s.client.admin.keyring' % args.cluster, + keyring + ) + + distro.conn.exit() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to configure %d admin hosts' % errors) + + +@priority(70) +def make(parser): + """ + Push configuration and client.admin key to a remote host. + """ + parser.add_argument( + 'client', + metavar='HOST', + nargs='*', + help='host to configure for ceph administration', + ) + parser.set_defaults( + func=admin, + ) diff --git a/ceph_deploy/cli.py b/ceph_deploy/cli.py new file mode 100644 index 0000000..d7c3fb5 --- /dev/null +++ b/ceph_deploy/cli.py @@ -0,0 +1,138 @@ +import pkg_resources +import argparse +import logging +import textwrap +import sys +from string import join + +import ceph_deploy +from . import exc +from . import validate +from .util import log +from .util.decorators import catches + +LOG = logging.getLogger(__name__) + + +__header__ = textwrap.dedent(""" + -^- + / \\ + |O o| ceph-deploy v%s + ).-.( + '/|||\` + | '|` | + '|` +""" % ceph_deploy.__version__) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description='Easy Ceph deployment\n\n%s' % __header__, + ) + verbosity = parser.add_mutually_exclusive_group(required=False) + verbosity.add_argument( + '-v', '--verbose', + action='store_true', dest='verbose', default=False, + help='be more verbose', + ) + verbosity.add_argument( + '-q', '--quiet', + action='store_true', dest='quiet', + help='be less verbose', + ) + parser.add_argument( + '-n', '--dry-run', + action='store_true', dest='dry_run', + help='do not perform any action, but report what would be done', + ) + parser.add_argument( + '--version', + action='version', + version='%s' % ceph_deploy.__version__, + help='the current installed version of ceph-deploy', + ) + parser.add_argument( + '--username', + help='the username to connect to the remote host', + ) + parser.add_argument( + '--overwrite-conf', + action='store_true', + help='overwrite an existing conf file on remote host (if present)', + ) + parser.add_argument( + '--cluster', + metavar='NAME', + help='name of the cluster', + type=validate.alphanumeric, + ) + sub = parser.add_subparsers( + title='commands', + metavar='COMMAND', + help='description', + ) + entry_points = [ + (ep.name, ep.load()) + for ep in pkg_resources.iter_entry_points('ceph_deploy.cli') + ] + entry_points.sort( + key=lambda (name, fn): getattr(fn, 'priority', 100), + ) + for (name, fn) in entry_points: + p = sub.add_parser( + name, + description=fn.__doc__, + help=fn.__doc__, + ) + # ugly kludge but i really want to have a nice way to access + # the program name, with subcommand, later + p.set_defaults(prog=p.prog) + fn(p) + parser.set_defaults( + # we want to hold on to this, for later + prog=parser.prog, + cluster='ceph', + ) + return parser + + +@catches((KeyboardInterrupt, RuntimeError, exc.DeployError,)) +def main(args=None, namespace=None): + parser = get_parser() + + if len(sys.argv) < 2: + parser.print_help() + sys.exit() + else: + args = parser.parse_args(args=args, namespace=namespace) + + console_loglevel = logging.DEBUG # start at DEBUG for now + if args.quiet: + console_loglevel = logging.WARNING + if args.verbose: + console_loglevel = logging.DEBUG + + # Console Logger + sh = logging.StreamHandler() + sh.setFormatter(log.color_format()) + sh.setLevel(console_loglevel) + + # File Logger + fh = logging.FileHandler('{cluster}.log'.format(cluster=args.cluster)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(logging.Formatter(log.BASE_FORMAT)) + + # because we're in a module already, __name__ is not the ancestor of + # the rest of the package; use the root as the logger for everyone + root_logger = logging.getLogger() + + # allow all levels at root_logger, handlers control individual levels + root_logger.setLevel(logging.DEBUG) + + root_logger.addHandler(sh) + root_logger.addHandler(fh) + + LOG.info("Invoked (%s): %s" %(ceph_deploy.__version__, + join(sys.argv, " "))) + return args.func(args) diff --git a/ceph_deploy/cliutil.py b/ceph_deploy/cliutil.py new file mode 100644 index 0000000..d273f31 --- /dev/null +++ b/ceph_deploy/cliutil.py @@ -0,0 +1,8 @@ +def priority(num): + """ + Decorator to add a `priority` attribute to the function. + """ + def add_priority(fn): + fn.priority = num + return fn + return add_priority diff --git a/ceph_deploy/conf.py b/ceph_deploy/conf.py new file mode 100644 index 0000000..63ea0ba --- /dev/null +++ b/ceph_deploy/conf.py @@ -0,0 +1,71 @@ +import ConfigParser +import contextlib + +from . import exc + + +class _TrimIndentFile(object): + def __init__(self, fp): + self.fp = fp + + def readline(self): + line = self.fp.readline() + return line.lstrip(' \t') + + +class CephConf(ConfigParser.RawConfigParser): + def optionxform(self, s): + s = s.replace('_', ' ') + s = '_'.join(s.split()) + return s + + def safe_get(self, section, key): + """ + Attempt to get a configuration value from a certain section + in a ``cfg`` object but returning None if not found. Avoids the need + to be doing try/except {ConfigParser Exceptions} every time. + """ + try: + #Use full parent function so we can replace it in the class + # if desired + return ConfigParser.RawConfigParser.get(self, section, key) + except (ConfigParser.NoSectionError, + ConfigParser.NoOptionError): + return None + + +def parse(fp): + cfg = CephConf() + ifp = _TrimIndentFile(fp) + cfg.readfp(ifp) + return cfg + + +def load(args): + path = '{cluster}.conf'.format(cluster=args.cluster) + try: + f = file(path) + except IOError as e: + raise exc.ConfigError(e) + else: + with contextlib.closing(f): + return parse(f) + + +def write_conf(cluster, conf, overwrite): + """ write cluster configuration to /etc/ceph/{cluster}.conf """ + import os + + path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster) + tmp = '{path}.{pid}.tmp'.format(path=path, pid=os.getpid()) + + if os.path.exists(path): + with file(path, 'rb') as f: + old = f.read() + if old != conf and not overwrite: + raise RuntimeError('config file %s exists with different content; use --overwrite-conf to overwrite' % path) + with file(tmp, 'w') as f: + f.write(conf) + f.flush() + os.fsync(f) + os.rename(tmp, path) diff --git a/ceph_deploy/config.py b/ceph_deploy/config.py new file mode 100644 index 0000000..d34db67 --- /dev/null +++ b/ceph_deploy/config.py @@ -0,0 +1,105 @@ +import logging +from cStringIO import StringIO +import os.path + +from . import exc +from . import conf +from .cliutil import priority +from . import hosts + +LOG = logging.getLogger(__name__) + + +def config_push(args): + cfg = conf.load(args) + conf_data = StringIO() + cfg.write(conf_data) + + errors = 0 + for hostname in args.client: + LOG.debug('Pushing config to %s', hostname) + try: + distro = hosts.get(hostname, username=args.username) + + distro.conn.remote_module.write_conf( + args.cluster, + conf_data.getvalue(), + args.overwrite_conf, + ) + + distro.conn.exit() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to config %d hosts' % errors) + + +def config_pull(args): + + topath = '{cluster}.conf'.format(cluster=args.cluster) + frompath = '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster) + + errors = 0 + for hostname in args.client: + try: + LOG.debug('Checking %s for %s', hostname, frompath) + distro = hosts.get(hostname, username=args.username) + conf_file_contents = distro.conn.remote_module.get_file(frompath) + + if conf_file_contents is not None: + LOG.debug('Got %s from %s', frompath, hostname) + if os.path.exists(topath): + with file(topath, 'rb') as f: + existing = f.read() + if existing != conf_file_contents and not args.overwrite_conf: + LOG.error('local config file %s exists with different content; use --overwrite-conf to overwrite' % topath) + raise + + with file(topath, 'w') as f: + f.write(conf_file_contents) + return + distro.conn.exit() + LOG.debug('Empty or missing %s on %s', frompath, hostname) + except: + LOG.error('Unable to pull %s from %s', frompath, hostname) + finally: + errors += 1 + + raise exc.GenericError('Failed to fetch config from %d hosts' % errors) + + +def config(args): + if args.subcommand == 'push': + config_push(args) + elif args.subcommand == 'pull': + config_pull(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + + +@priority(70) +def make(parser): + """ + Push configuration file to a remote host. + """ + parser.add_argument( + 'subcommand', + metavar='SUBCOMMAND', + choices=[ + 'push', + 'pull', + ], + help='push or pull', + ) + parser.add_argument( + 'client', + metavar='HOST', + nargs='*', + help='host to push/pull the config to/from', + ) + parser.set_defaults( + func=config, + ) diff --git a/ceph_deploy/connection.py b/ceph_deploy/connection.py new file mode 100644 index 0000000..b88c1ac --- /dev/null +++ b/ceph_deploy/connection.py @@ -0,0 +1,52 @@ +import getpass +import socket +from ceph_deploy.lib.remoto import Connection + + +def get_connection(hostname, username, logger, threads=5, use_sudo=None): + """ + A very simple helper, meant to return a connection + that will know about the need to use sudo. + """ + if use_sudo is None: + use_sudo = needs_sudo() + if username: + hostname = "%s@%s" % (username, hostname) + try: + conn = Connection( + hostname, + logger=logger, + sudo=use_sudo, + threads=threads, + ) + + # Set a timeout value in seconds to disconnect and move on + # if no data is sent back. + conn.global_timeout = 300 + logger.debug("connected to host: %s " % hostname) + return conn + + except Exception as error: + msg = "connecting to host: %s " % hostname + errors = "resulted in errors: %s %s" % (error.__class__.__name__, error) + raise RuntimeError(msg + errors) + + +def get_local_connection(logger, use_sudo=False): + """ + Helper for local connections that are sometimes needed to operate + on local hosts + """ + return get_connection( + socket.gethostname(), # cannot rely on 'localhost' here + None, + logger=logger, + threads=1, + use_sudo=use_sudo + ) + + +def needs_sudo(): + if getpass.getuser() == 'root': + return False + return True diff --git a/ceph_deploy/exc.py b/ceph_deploy/exc.py new file mode 100644 index 0000000..62e0eda --- /dev/null +++ b/ceph_deploy/exc.py @@ -0,0 +1,74 @@ +class DeployError(Exception): + """ + Unknown deploy error + """ + + def __str__(self): + doc = self.__doc__.strip() + return ': '.join([doc] + [str(a) for a in self.args]) + + +class UnableToResolveError(DeployError): + """ + Unable to resolve host + """ +class ClusterExistsError(DeployError): + """ + Cluster config exists already + """ + + +class ConfigError(DeployError): + """ + Cannot load config + """ + + +class NeedHostError(DeployError): + """ + No hosts specified to deploy to. + """ + + +class NeedMonError(DeployError): + """ + Cannot find nodes with ceph-mon. + """ + +class NeedDiskError(DeployError): + """ + Must supply disk/path argument + """ + +class UnsupportedPlatform(DeployError): + """ + Platform is not supported + """ + def __init__(self, distro, codename): + self.distro = distro + self.codename = codename + + def __str__(self): + return '{doc}: {distro} {codename}'.format( + doc=self.__doc__.strip(), + distro=self.distro, + codename=self.codename, + ) + +class MissingPackageError(DeployError): + """ + A required package or command is missing + """ + def __init__(self, message): + self.message = message + + def __str__(self): + return self.message + + +class GenericError(DeployError): + def __init__(self, message): + self.message = message + + def __str__(self): + return self.message diff --git a/ceph_deploy/forgetkeys.py b/ceph_deploy/forgetkeys.py new file mode 100644 index 0000000..86bedbe --- /dev/null +++ b/ceph_deploy/forgetkeys.py @@ -0,0 +1,36 @@ +import logging +import errno + +from .cliutil import priority + + +LOG = logging.getLogger(__name__) + + +def forgetkeys(args): + import os + for f in [ + 'mon', + 'client.admin', + 'bootstrap-osd', + 'bootstrap-mds', + ]: + try: + os.unlink('{cluster}.{what}.keyring'.format( + cluster=args.cluster, + what=f, + )) + except OSError, e: + if e.errno == errno.ENOENT: + pass + else: + raise + +@priority(100) +def make(parser): + """ + Remove authentication keys from the local directory. + """ + parser.set_defaults( + func=forgetkeys, + ) diff --git a/ceph_deploy/gatherkeys.py b/ceph_deploy/gatherkeys.py new file mode 100644 index 0000000..eccbf7f --- /dev/null +++ b/ceph_deploy/gatherkeys.py @@ -0,0 +1,89 @@ +import os.path +import logging + +from .cliutil import priority +from . import hosts + + +LOG = logging.getLogger(__name__) + + +def fetch_file(args, frompath, topath, _hosts): + if os.path.exists(topath): + LOG.debug('Have %s', topath) + return True + else: + for hostname in _hosts: + LOG.debug('Checking %s for %s', hostname, frompath) + distro = hosts.get(hostname, username=args.username) + key = distro.conn.remote_module.get_file( + frompath.format(hostname=hostname) + ) + + if key is not None: + LOG.debug('Got %s key from %s.', topath, hostname) + with file(topath, 'w') as f: + f.write(key) + return True + distro.conn.exit() + LOG.warning('Unable to find %s on %s', frompath, _hosts) + return False + + +def gatherkeys(args): + ret = 0 + + # client.admin + r = fetch_file( + args=args, + frompath='/etc/ceph/{cluster}.client.admin.keyring'.format( + cluster=args.cluster), + topath='{cluster}.client.admin.keyring'.format( + cluster=args.cluster), + _hosts=args.mon, + ) + if not r: + ret = 1 + + # mon. + r = fetch_file( + args=args, + frompath='/var/lib/ceph/mon/%s-{hostname}/keyring' % args.cluster, + topath='{cluster}.mon.keyring'.format(cluster=args.cluster), + _hosts=args.mon, + ) + if not r: + ret = 1 + + # bootstrap + for what in ['osd', 'mds']: + r = fetch_file( + args=args, + frompath='/var/lib/ceph/bootstrap-{what}/{cluster}.keyring'.format( + cluster=args.cluster, + what=what), + topath='{cluster}.bootstrap-{what}.keyring'.format( + cluster=args.cluster, + what=what), + _hosts=args.mon, + ) + if not r: + ret = 1 + + return ret + + +@priority(40) +def make(parser): + """ + Gather authentication keys for provisioning new nodes. + """ + parser.add_argument( + 'mon', + metavar='HOST', + nargs='+', + help='monitor host to pull keys from', + ) + parser.set_defaults( + func=gatherkeys, + ) diff --git a/ceph_deploy/hosts/__init__.py b/ceph_deploy/hosts/__init__.py new file mode 100644 index 0000000..f4ba203 --- /dev/null +++ b/ceph_deploy/hosts/__init__.py @@ -0,0 +1,82 @@ +""" +We deal (mostly) with remote hosts. To avoid special casing each different +commands (e.g. using `yum` as opposed to `apt`) we can make a one time call to +that remote host and set all the special cases for running commands depending +on the type of distribution/version we are dealing with. +""" +import logging +from ceph_deploy import exc, lsb +from ceph_deploy.hosts import debian, centos, fedora, suse, remotes +from ceph_deploy.connection import get_connection + +logger = logging.getLogger() + + +def get(hostname, username=None, fallback=None): + """ + Retrieve the module that matches the distribution of a ``hostname``. This + function will connect to that host and retrieve the distribution + informaiton, then return the appropriate module and slap a few attributes + to that module defining the information it found from the hostname. + + For example, if host ``node1.example.com`` is an Ubuntu server, the + ``debian`` module would be returned and the following would be set:: + + module.name = 'ubuntu' + module.release = '12.04' + module.codename = 'precise' + + :param hostname: A hostname that is reachable/resolvable over the network + :param fallback: Optional fallback to use if no supported distro is found + """ + conn = get_connection( + hostname, + username=username, + logger=logging.getLogger(hostname) + ) + conn.import_module(remotes) + distro_name, release, codename = conn.remote_module.platform_information() + if not codename: + raise exc.UnsupportedPlatform(distro=distro_name, codename=codename) + + machine_type = conn.remote_module.machine_type() + + module = _get_distro(distro_name) + module.name = distro_name + module.release = release + module.codename = codename + module.conn = conn + module.machine_type = machine_type + module.init = lsb.choose_init(distro_name, codename) + + return module + + +def _get_distro(distro, fallback=None): + distro = _normalized_distro_name(distro) + distributions = { + 'debian': debian, + 'ubuntu': debian, + 'centos': centos, + 'scientific': centos, + 'redhat': centos, + 'fedora': fedora, + 'suse': suse, + } + try: + return distributions[distro] + except KeyError: + if fallback: + return _get_distro(fallback) + raise exc.UnsupportedPlatform(distro=distro, codename='') + + +def _normalized_distro_name(distro): + distro = distro.lower() + if distro.startswith(('redhat', 'red hat')): + return 'redhat' + elif distro.startswith(('scientific', 'scientific linux')): + return 'scientific' + elif distro.startswith(('suse', 'opensuse')): + return 'suse' + return distro diff --git a/ceph_deploy/hosts/centos/__init__.py b/ceph_deploy/hosts/centos/__init__.py new file mode 100644 index 0000000..b6f4858 --- /dev/null +++ b/ceph_deploy/hosts/centos/__init__.py @@ -0,0 +1,10 @@ +import mon +from install import install, firewall_install +from uninstall import uninstall + +# Allow to set some information about this distro +# + +distro = None +release = None +codename = None diff --git a/ceph_deploy/hosts/centos/install.py b/ceph_deploy/hosts/centos/install.py new file mode 100644 index 0000000..1dd0a63 --- /dev/null +++ b/ceph_deploy/hosts/centos/install.py @@ -0,0 +1,126 @@ +from ceph_deploy.util import pkg_managers, templates +from ceph_deploy.lib.remoto import process + + +def install(distro, version_kind, version, adjust_repos): + release = distro.release + machine = distro.machine_type + + # Even before EPEL, make sure we have `wget` + pkg_managers.yum(distro.conn, 'wget') + + # Get EPEL installed before we continue: + if adjust_repos: + install_epel(distro) + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + if adjust_repos: + process.run( + distro.conn, + [ + 'rpm', + '--import', + "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc".format(key=key) + ] + ) + + if version_kind == 'stable': + url = 'http://ceph.com/rpm-{version}/el6/'.format( + version=version, + ) + elif version_kind == 'testing': + url = 'http://ceph.com/rpm-testing/' + elif version_kind == 'dev': + url = 'http://gitbuilder.ceph.com/ceph-rpm-centos{release}-{machine}-basic/ref/{version}/'.format( + release=release.split(".",1)[0], + machine=machine, + version=version, + ) + + process.run( + distro.conn, + [ + 'rpm', + '-Uvh', + '--replacepkgs', + '{url}noarch/ceph-release-1-0.el6.noarch.rpm'.format(url=url), + ], + ) + + process.run( + distro.conn, + [ + 'yum', + '-y', + '-q', + 'install', + 'ceph', + ], + ) + + +def install_epel(distro): + """ + CentOS and Scientific need the EPEL repo, otherwise Ceph cannot be + installed. + """ + if distro.name.lower() in ['centos', 'scientific']: + distro.conn.logger.info('adding EPEL repository') + if float(distro.release) >= 6: + process.run( + distro.conn, + ['wget', 'http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm'], + ) + pkg_managers.rpm( + distro.conn, + [ + '--replacepkgs', + 'epel-release-6*.rpm', + ], + ) + else: + process.run( + distro.conn, + ['wget', 'http://dl.fedoraproject.org/pub/epel/5/x86_64/epel-release-5-4.noarch.rpm'], + ) + pkg_managers.rpm( + distro.conn, + [ + '--replacepkgs', + 'epel-release-5*.rpm' + ], + ) + + +def firewall_install(distro, repo_url, gpg_url, adjust_repos): + repo_url = repo_url.strip('/') # Remove trailing slashes + gpg_fallback = 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' + logger = distro.conn.logger + if gpg_url is None: + logger.warning('--gpg-url was not used, will fallback') + logger.warning('using GPG fallback: %s', gpg_fallback) + gpg_url = gpg_fallback + # Before any install, make sure we have `wget` + pkg_managers.yum(distro.conn, 'wget') + + if adjust_repos: + process.run( + distro.conn, + [ + 'rpm', + '--import', + gpg_url, + ] + ) + + ceph_repo_content = templates.ceph_repo.format( + repo_url=repo_url, + gpg_url=gpg_url + ) + + distro.conn.remote_module.write_yum_repo(ceph_repo_content) + + pkg_managers.yum(distro.conn, 'ceph') diff --git a/ceph_deploy/hosts/centos/mon/__init__.py b/ceph_deploy/hosts/centos/mon/__init__.py new file mode 100644 index 0000000..fca0e0d --- /dev/null +++ b/ceph_deploy/hosts/centos/mon/__init__.py @@ -0,0 +1 @@ +from create import create diff --git a/ceph_deploy/hosts/centos/mon/create.py b/ceph_deploy/hosts/centos/mon/create.py new file mode 100644 index 0000000..16b9f22 --- /dev/null +++ b/ceph_deploy/hosts/centos/mon/create.py @@ -0,0 +1,21 @@ +from ceph_deploy.hosts import common +from ceph_deploy.lib.remoto import process + + +def create(distro, args, monitor_keyring): + hostname = distro.conn.remote_module.shortname() + common.mon_create(distro, args, monitor_keyring, hostname) + service = distro.conn.remote_module.which_service() + + process.run( + distro.conn, + [ + service, + 'ceph', + '-c', + '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster), + 'start', + 'mon.{hostname}'.format(hostname=hostname) + ], + timeout=7, + ) diff --git a/ceph_deploy/hosts/centos/uninstall.py b/ceph_deploy/hosts/centos/uninstall.py new file mode 100644 index 0000000..4d17133 --- /dev/null +++ b/ceph_deploy/hosts/centos/uninstall.py @@ -0,0 +1,12 @@ +from ceph_deploy.util import pkg_managers + + +def uninstall(conn, purge=False): + packages = [ + 'ceph', + ] + + pkg_managers.yum_remove( + conn, + packages, + ) diff --git a/ceph_deploy/hosts/common.py b/ceph_deploy/hosts/common.py new file mode 100644 index 0000000..673cd30 --- /dev/null +++ b/ceph_deploy/hosts/common.py @@ -0,0 +1,67 @@ +from ceph_deploy.util import paths +from ceph_deploy import conf +from ceph_deploy.lib.remoto import process +from StringIO import StringIO + + +def ceph_version(conn): + """ + Log the remote ceph-version by calling `ceph --version` + """ + return process.run(conn, ['ceph', '--version']) + + +def mon_create(distro, args, monitor_keyring, hostname): + logger = distro.conn.logger + logger.debug('remote hostname: %s' % hostname) + path = paths.mon.path(args.cluster, hostname) + done_path = paths.mon.done(args.cluster, hostname) + init_path = paths.mon.init(args.cluster, hostname, distro.init) + + configuration = conf.load(args) + conf_data = StringIO() + configuration.write(conf_data) + + # write the configuration file + distro.conn.remote_module.write_conf( + args.cluster, + conf_data.getvalue(), + args.overwrite_conf, + ) + + # if the mon path does not exist, create it + distro.conn.remote_module.create_mon_path(path) + + logger.debug('checking for done path: %s' % done_path) + if not distro.conn.remote_module.path_exists(done_path): + logger.debug('done path does not exist: %s' % done_path) + if not distro.conn.remote_module.path_exists(paths.mon.constants.tmp_path): + logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path) + distro.conn.remote_module.makedir(paths.mon.constants.tmp_path) + keyring = paths.mon.keyring(args.cluster, hostname) + + logger.info('creating keyring file: %s' % keyring) + distro.conn.remote_module.write_monitor_keyring( + keyring, + monitor_keyring, + ) + + process.run( + distro.conn, + [ + 'ceph-mon', + '--cluster', args.cluster, + '--mkfs', + '-i', hostname, + '--keyring', keyring, + ], + ) + + logger.info('unlinking keyring file %s' % keyring) + distro.conn.remote_module.unlink(keyring) + + # create the done file + distro.conn.remote_module.create_done_path(done_path) + + # create init path + distro.conn.remote_module.create_init_path(init_path) diff --git a/ceph_deploy/hosts/debian/__init__.py b/ceph_deploy/hosts/debian/__init__.py new file mode 100644 index 0000000..3d105e7 --- /dev/null +++ b/ceph_deploy/hosts/debian/__init__.py @@ -0,0 +1,10 @@ +import mon +from install import install +from uninstall import uninstall + +# Allow to set some information about this distro +# + +distro = None +release = None +codename = None diff --git a/ceph_deploy/hosts/debian/install.py b/ceph_deploy/hosts/debian/install.py new file mode 100644 index 0000000..f5e3a5c --- /dev/null +++ b/ceph_deploy/hosts/debian/install.py @@ -0,0 +1,93 @@ +from ceph_deploy.lib.remoto import process + + +def install(distro, version_kind, version, adjust_repos): + codename = distro.codename + machine = distro.machine_type + + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + # Make sure ca-certificates is installed + process.run( + distro.conn, + [ + 'env', + 'DEBIAN_FRONTEND=noninteractive', + 'apt-get', + '-q', + 'install', + '--assume-yes', + 'ca-certificates', + ] + ) + + if adjust_repos: + process.run( + distro.conn, + [ + 'wget', + '-q', + '-O', + '{key}.asc'.format(key=key), + 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc'.format(key=key), + ], + stop_on_nonzero=False, + ) + + process.run( + distro.conn, + [ + 'apt-key', + 'add', + '{key}.asc'.format(key=key) + ] + ) + + if version_kind == 'stable': + url = 'http://ceph.com/debian-{version}/'.format( + version=version, + ) + elif version_kind == 'testing': + url = 'http://ceph.com/debian-testing/' + elif version_kind == 'dev': + url = 'http://gitbuilder.ceph.com/ceph-deb-{codename}-{machine}-basic/ref/{version}'.format( + codename=codename, + machine=machine, + version=version, + ) + else: + raise RuntimeError('Unknown version kind: %r' % version_kind) + + distro.conn.remote_module.write_sources_list(url, codename) + + process.run( + distro.conn, + ['apt-get', '-q', 'update'], + ) + + # TODO this does not downgrade -- should it? + process.run( + distro.conn, + [ + 'env', + 'DEBIAN_FRONTEND=noninteractive', + 'DEBIAN_PRIORITY=critical', + 'apt-get', + '-q', + '-o', 'Dpkg::Options::=--force-confnew', + '--no-install-recommends', + '--assume-yes', + 'install', + '--', + 'ceph', + 'ceph-mds', + 'ceph-common', + 'ceph-fs-common', + # ceph only recommends gdisk, make sure we actually have + # it; only really needed for osds, but minimal collateral + 'gdisk', + ], + ) diff --git a/ceph_deploy/hosts/debian/mon/__init__.py b/ceph_deploy/hosts/debian/mon/__init__.py new file mode 100644 index 0000000..fca0e0d --- /dev/null +++ b/ceph_deploy/hosts/debian/mon/__init__.py @@ -0,0 +1 @@ +from create import create diff --git a/ceph_deploy/hosts/debian/mon/create.py b/ceph_deploy/hosts/debian/mon/create.py new file mode 100644 index 0000000..29fa49c --- /dev/null +++ b/ceph_deploy/hosts/debian/mon/create.py @@ -0,0 +1,42 @@ +from ceph_deploy.hosts import common +from ceph_deploy.lib.remoto import process + + +def create(distro, args, monitor_keyring): + logger = distro.conn.logger + hostname = distro.conn.remote_module.shortname() + common.mon_create(distro, args, monitor_keyring, hostname) + service = distro.conn.remote_module.which_service() + + if not service: + logger.warning('could not find `service` executable') + + if distro.init == 'upstart': # Ubuntu uses upstart + process.run( + distro.conn, + [ + 'initctl', + 'emit', + 'ceph-mon', + 'cluster={cluster}'.format(cluster=args.cluster), + 'id={hostname}'.format(hostname=hostname), + ], + timeout=7, + ) + + elif distro.init == 'sysvinit': # Debian uses sysvinit + + process.run( + distro.conn, + [ + service, + 'ceph', + '-c', + '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster), + 'start', + 'mon.{hostname}'.format(hostname=hostname) + ], + timeout=7, + ) + else: + raise RuntimeError('create cannot use init %s' % distro.init) diff --git a/ceph_deploy/hosts/debian/uninstall.py b/ceph_deploy/hosts/debian/uninstall.py new file mode 100644 index 0000000..8ffdca1 --- /dev/null +++ b/ceph_deploy/hosts/debian/uninstall.py @@ -0,0 +1,16 @@ +from ceph_deploy.util import pkg_managers +from ceph_deploy.lib.remoto import process + + +def uninstall(conn, purge=False): + packages = [ + 'ceph', + 'ceph-mds', + 'ceph-common', + 'ceph-fs-common', + ] + pkg_managers.apt_remove( + conn, + packages, + purge=purge, + ) diff --git a/ceph_deploy/hosts/fedora/__init__.py b/ceph_deploy/hosts/fedora/__init__.py new file mode 100644 index 0000000..3d105e7 --- /dev/null +++ b/ceph_deploy/hosts/fedora/__init__.py @@ -0,0 +1,10 @@ +import mon +from install import install +from uninstall import uninstall + +# Allow to set some information about this distro +# + +distro = None +release = None +codename = None diff --git a/ceph_deploy/hosts/fedora/install.py b/ceph_deploy/hosts/fedora/install.py new file mode 100644 index 0000000..d6963ae --- /dev/null +++ b/ceph_deploy/hosts/fedora/install.py @@ -0,0 +1,63 @@ +from ceph_deploy.lib.remoto import process + + +def install(distro, version_kind, version, adjust_repos): + release = distro.release + machine = distro.machine_type + + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + if adjust_repos: + process.run( + distro.conn, + [ + 'rpm', + '--import', + "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc".format(key=key) + ] + ) + + if version_kind == 'stable': + url = 'http://ceph.com/rpm-{version}/fc{release}/'.format( + version=version, + release=release, + ) + elif version_kind == 'testing': + url = 'http://ceph.com/rpm-testing/fc{release}'.format( + release=release, + ) + elif version_kind == 'dev': + url = 'http://gitbuilder.ceph.com/ceph-rpm-fc{release}-{machine}-basic/ref/{version}/'.format( + release=release.split(".", 1)[0], + machine=machine, + version=version, + ) + + process.run( + distro.conn, + [ + 'rpm', + '-Uvh', + '--replacepkgs', + '--force', + '--quiet', + '{url}noarch/ceph-release-1-0.fc{release}.noarch.rpm'.format( + url=url, + release=release, + ), + ] + ) + + process.run( + distro.conn, + [ + 'yum', + '-y', + '-q', + 'install', + 'ceph', + ], + ) diff --git a/ceph_deploy/hosts/fedora/mon/__init__.py b/ceph_deploy/hosts/fedora/mon/__init__.py new file mode 100644 index 0000000..fca0e0d --- /dev/null +++ b/ceph_deploy/hosts/fedora/mon/__init__.py @@ -0,0 +1 @@ +from create import create diff --git a/ceph_deploy/hosts/fedora/mon/create.py b/ceph_deploy/hosts/fedora/mon/create.py new file mode 100644 index 0000000..16b9f22 --- /dev/null +++ b/ceph_deploy/hosts/fedora/mon/create.py @@ -0,0 +1,21 @@ +from ceph_deploy.hosts import common +from ceph_deploy.lib.remoto import process + + +def create(distro, args, monitor_keyring): + hostname = distro.conn.remote_module.shortname() + common.mon_create(distro, args, monitor_keyring, hostname) + service = distro.conn.remote_module.which_service() + + process.run( + distro.conn, + [ + service, + 'ceph', + '-c', + '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster), + 'start', + 'mon.{hostname}'.format(hostname=hostname) + ], + timeout=7, + ) diff --git a/ceph_deploy/hosts/fedora/uninstall.py b/ceph_deploy/hosts/fedora/uninstall.py new file mode 100644 index 0000000..4c80827 --- /dev/null +++ b/ceph_deploy/hosts/fedora/uninstall.py @@ -0,0 +1,13 @@ +from ceph_deploy.util import pkg_managers + + +def uninstall(conn, purge=False): + packages = [ + 'ceph', + ] + + pkg_managers.yum_remove( + conn, + packages, + ) + diff --git a/ceph_deploy/hosts/remotes.py b/ceph_deploy/hosts/remotes.py new file mode 100644 index 0000000..0c17e61 --- /dev/null +++ b/ceph_deploy/hosts/remotes.py @@ -0,0 +1,192 @@ +import errno +import socket +import os +import shutil +import tempfile +import platform + + +def platform_information(): + """ detect platform information from remote host """ + distro, release, codename = platform.linux_distribution() + if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian + debian_codenames = { + '8': 'jessie', + '7': 'wheezy', + '6': 'squeeze', + } + major_version = release.split('.')[0] + codename = debian_codenames.get(major_version, '') + + return ( + str(distro).rstrip(), + str(release).rstrip(), + str(codename).rstrip() + ) + + +def machine_type(): + """ detect machine type """ + return platform.machine() + + +def write_sources_list(url, codename): + """add ceph deb repo to sources.list""" + with file('/etc/apt/sources.list.d/ceph.list', 'w') as f: + f.write('deb {url} {codename} main\n'.format( + url=url, + codename=codename, + )) + + +def write_yum_repo(content): + """set the contents of /etc/yum.repos.d/ceph.repo""" + write_file('/etc/yum.repos.d/ceph.repo', content) + + +def write_conf(cluster, conf, overwrite): + """ write cluster configuration to /etc/ceph/{cluster}.conf """ + path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster) + tmp_file = tempfile.NamedTemporaryFile(delete=False) + err_msg = 'config file %s exists with different content; use --overwrite-conf to overwrite' % path + + if os.path.exists(path): + with file(path, 'rb') as f: + old = f.read() + if old != conf and not overwrite: + raise RuntimeError(err_msg) + tmp_file.write(conf) + tmp_file.close() + shutil.move(tmp_file.name, path) + return + if os.path.exists('/etc/ceph'): + with open(path, 'w') as f: + f.write(conf) + else: + err_msg = '/etc/ceph/ does not exist - could not write config' + raise RuntimeError(err_msg) + + +def write_keyring(path, key): + """ create a keyring file """ + tmp_file = tempfile.NamedTemporaryFile(delete=False) + tmp_file.write(key) + shutil.move(tmp_file.name, path) + + +def create_mon_path(path): + """create the mon path if it does not exist""" + if not os.path.exists(path): + os.makedirs(path) + + +def create_done_path(done_path): + """create a done file to avoid re-doing the mon deployment""" + with file(done_path, 'w'): + pass + + +def create_init_path(init_path): + """create the init path if it does not exist""" + if not os.path.exists(init_path): + with file(init_path, 'w'): + pass + + +def append_to_file(file_path, contents): + """append contents to file""" + with open(file_path, 'a') as f: + f.write(contents) + + +def path_exists(path): + return os.path.exists(path) + + +def makedir(path): + os.makedirs(path) + + +def unlink(_file): + os.unlink(_file) + + +def write_monitor_keyring(keyring, monitor_keyring): + """create the monitor keyring file""" + write_file(keyring, monitor_keyring) + + +def write_file(path, content): + with file(path, 'w') as f: + f.write(content) + + +def touch_file(path): + with file(path, 'wb') as f: # noqa + pass + + +def get_file(path): + """ fetch remote file """ + try: + with file(path, 'rb') as f: + return f.read() + except IOError: + pass + + +def shortname(): + """get remote short hostname""" + return socket.gethostname().split('.', 1)[0] + + +def which_service(): + """ locating the `service` executable... """ + locations = ['/sbin/service', '/usr/sbin/service'] + for location in locations: + if os.path.exists(location): + return location + + +def make_mon_removed_dir(path, file_name): + """ move old monitor data """ + try: + os.makedirs('/var/lib/ceph/mon-removed') + except OSError, e: + if e.errno != errno.EEXIST: + raise + shutil.move(path, os.path.join('/var/lib/ceph/mon-removed/', file_name)) + + +def safe_mkdir(path): + """ create path if it doesn't exist """ + try: + os.mkdir(path) + except OSError, e: + if e.errno == errno.EEXIST: + pass + else: + raise + + +def zeroing(dev): + """ zeroing last few blocks of device """ + # this kills the crab + # + # sgdisk will wipe out the main copy of the GPT partition + # table (sorry), but it doesn't remove the backup copies, and + # subsequent commands will continue to complain and fail when + # they see those. zeroing the last few blocks of the device + # appears to do the trick. + lba_size = 4096 + size = 33 * lba_size + return True + with file(dev, 'wb') as f: + f.seek(-size, os.SEEK_END) + f.write(size*'\0') + + +# remoto magic, needed to execute these functions remotely +if __name__ == '__channelexec__': + for item in channel: # noqa + channel.send(eval(item)) # noqa diff --git a/ceph_deploy/hosts/suse/__init__.py b/ceph_deploy/hosts/suse/__init__.py new file mode 100644 index 0000000..3d105e7 --- /dev/null +++ b/ceph_deploy/hosts/suse/__init__.py @@ -0,0 +1,10 @@ +import mon +from install import install +from uninstall import uninstall + +# Allow to set some information about this distro +# + +distro = None +release = None +codename = None diff --git a/ceph_deploy/hosts/suse/install.py b/ceph_deploy/hosts/suse/install.py new file mode 100644 index 0000000..87feed9 --- /dev/null +++ b/ceph_deploy/hosts/suse/install.py @@ -0,0 +1,66 @@ +from ceph_deploy.lib.remoto import process + + +def install(distro, version_kind, version, adjust_repos): + release = distro.release + machine = distro.machine_type + + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + if distro.codename == 'Mantis': + distro = 'opensuse12' + else: + distro = 'sles-11sp2' + + if adjust_repos: + process.run( + distro.conn, + [ + 'rpm', + '--import', + "https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc".format(key=key) + ] + ) + + if version_kind == 'stable': + url = 'http://ceph.com/rpm-{version}/{distro}/'.format( + version=version, + distro=distro, + ) + elif version_kind == 'testing': + url = 'http://ceph.com/rpm-testing/{distro}'.format(distro=distro) + elif version_kind == 'dev': + url = 'http://gitbuilder.ceph.com/ceph-rpm-{distro}{release}-{machine}-basic/ref/{version}/'.format( + distro=distro, + release=release.split(".", 1)[0], + machine=machine, + version=version, + ) + + process.run( + distro.conn, + [ + 'rpm', + '-Uvh', + '--replacepkgs', + '--force', + '--quiet', + '{url}noarch/ceph-release-1-0.noarch.rpm'.format( + url=url, + ), + ] + ) + + process.run( + distro.conn, + [ + 'zypper', + '--non-interactive', + '--quiet', + 'install', + 'ceph', + ], + ) diff --git a/ceph_deploy/hosts/suse/mon/__init__.py b/ceph_deploy/hosts/suse/mon/__init__.py new file mode 100644 index 0000000..fca0e0d --- /dev/null +++ b/ceph_deploy/hosts/suse/mon/__init__.py @@ -0,0 +1 @@ +from create import create diff --git a/ceph_deploy/hosts/suse/mon/create.py b/ceph_deploy/hosts/suse/mon/create.py new file mode 100644 index 0000000..0c1316c --- /dev/null +++ b/ceph_deploy/hosts/suse/mon/create.py @@ -0,0 +1,20 @@ +from ceph_deploy.hosts import common +from ceph_deploy.lib.remoto import process + + +def create(distro, args, monitor_keyring): + hostname = distro.conn.remote_module.shortname() + common.mon_create(distro, args, monitor_keyring, hostname) + service = distro.conn.remote_module.which_service() + + process.run( + distro.conn, + [ + 'rcceph', + '-c', + '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster), + 'start', + 'mon.{hostname}'.format(hostname=hostname) + ], + timeout=7, + ) diff --git a/ceph_deploy/hosts/suse/uninstall.py b/ceph_deploy/hosts/suse/uninstall.py new file mode 100644 index 0000000..740f6f3 --- /dev/null +++ b/ceph_deploy/hosts/suse/uninstall.py @@ -0,0 +1,19 @@ +from ceph_deploy.lib.remoto import process + + +def uninstall(conn, purge=False): + packages = [ + 'ceph', + 'libcephfs1', + 'librados2', + 'librbd1', + ] + cmd = [ + 'zypper', + '--non-interactive', + '--quiet', + 'remove', + ] + + cmd.extend(packages) + process.run(conn, cmd) diff --git a/ceph_deploy/install.py b/ceph_deploy/install.py new file mode 100644 index 0000000..1768431 --- /dev/null +++ b/ceph_deploy/install.py @@ -0,0 +1,309 @@ +import argparse +import logging +from distutils.util import strtobool +import os + +from . import hosts +from .cliutil import priority +from .lib.remoto import process + + +LOG = logging.getLogger(__name__) + + +def ceph_is_installed(conn): + """ + Check if the ceph packages are installed by looking for the + presence of the ceph command. + """ + stdout, stderr, return_code = process.check( + conn, + ['which', 'ceph'], + ) + return not return_code + + +def install(args): + version = getattr(args, args.version_kind) + version_str = args.version_kind + + if version: + version_str += ' version {version}'.format(version=version) + LOG.debug( + 'Installing %s on cluster %s hosts %s', + version_str, + args.cluster, + ' '.join(args.host), + ) + for hostname in args.host: + LOG.debug('Detecting platform for host %s ...', hostname) + distro = hosts.get(hostname, username=args.username) + LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) + rlogger = logging.getLogger(hostname) + rlogger.info('installing ceph on %s' % hostname) + repo_url = os.environ.get('CEPH_DEPLOY_REPO_URL') or args.repo_url + gpg_url = os.environ.get('CEPH_DEPLOY_GPG_URL') or args.gpg_url + if repo_url: + rlogger.info('using custom repository location: %s', repo_url) + distro.firewall_install( + distro, + repo_url, + gpg_url, + args.adjust_repos + ) + else: + distro.install( + distro, + args.version_kind, + version, + args.adjust_repos + ) + # Check the ceph version we just installed + hosts.common.ceph_version(distro.conn) + distro.conn.exit() + + +def uninstall(args): + LOG.debug( + 'Uninstalling on cluster %s hosts %s', + args.cluster, + ' '.join(args.host), + ) + + for hostname in args.host: + LOG.debug('Detecting platform for host %s ...', hostname) + + distro = hosts.get(hostname, username=args.username) + LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) + rlogger = logging.getLogger(hostname) + rlogger.info('uninstalling ceph on %s' % hostname) + distro.uninstall(distro.conn) + distro.conn.exit() + + +def purge(args): + LOG.debug( + 'Purging from cluster %s hosts %s', + args.cluster, + ' '.join(args.host), + ) + + for hostname in args.host: + LOG.debug('Detecting platform for host %s ...', hostname) + + distro = hosts.get(hostname, username=args.username) + LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) + rlogger = logging.getLogger(hostname) + rlogger.info('purging host ... %s' % hostname) + distro.uninstall(distro.conn, purge=True) + distro.conn.exit() + + +def purge_data(args): + LOG.debug( + 'Purging data from cluster %s hosts %s', + args.cluster, + ' '.join(args.host), + ) + + installed_hosts = [] + for hostname in args.host: + distro = hosts.get(hostname, username=args.username) + if ceph_is_installed(distro.conn): + installed_hosts.append(hostname) + distro.conn.exit() + + if installed_hosts: + print "ceph is still installed on: ", installed_hosts + answer = raw_input("Continue (y/n)") + if not strtobool(answer): + return + + for hostname in args.host: + distro = hosts.get(hostname, username=args.username) + LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) + rlogger = logging.getLogger(hostname) + rlogger.info('purging data on %s' % hostname) + + process.run( + distro.conn, + [ + 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph', + ] + ) + if distro.conn.remote_module.path_exists('/var/lib/ceph'): + process.run( + distro.conn, + [ + 'find', '/var/lib/ceph', + '-mindepth', '1', + '-maxdepth', '2', + '-type', 'd', + '-exec', 'umount', '{}', ';', + ] + ) + process.run( + distro.conn, + [ + 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph', + ] + ) + + process.run( + distro.conn, + [ + 'rm', '-rf', '--one-file-system', '--', '/etc/ceph/*', + ] + ) + + distro.conn.exit() + + +class StoreVersion(argparse.Action): + """ + Like ``"store"`` but also remember which one of the exclusive + options was set. + + There are three kinds of versions: stable, testing and dev. + This sets ``version_kind`` to be the right one of the above. + + This kludge essentially lets us differentiate explicitly set + values from defaults. + """ + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + namespace.version_kind = self.dest + + +@priority(20) +def make(parser): + """ + Install Ceph packages on remote hosts. + """ + + version = parser.add_mutually_exclusive_group() + + version.add_argument( + '--stable', + nargs='?', + action=StoreVersion, + choices=[ + 'bobtail', + 'cuttlefish', + 'dumpling', + 'emperor', + ], + metavar='CODENAME', + help='install a release known as CODENAME (done by default) (default: %(default)s)', + ) + + version.add_argument( + '--testing', + nargs=0, + action=StoreVersion, + help='install the latest development release', + ) + + version.add_argument( + '--dev', + nargs='?', + action=StoreVersion, + const='master', + metavar='BRANCH_OR_TAG', + help='install a bleeding edge build from Git branch or tag (default: %(default)s)', + ) + + version.add_argument( + '--adjust-repos', + dest='adjust_repos', + action='store_true', + help='install packages modifying source repos', + ) + + version.add_argument( + '--no-adjust-repos', + dest='adjust_repos', + action='store_false', + help='install packages without modifying source repos', + ) + + version.set_defaults( + func=install, + stable='emperor', + dev='master', + version_kind='stable', + adjust_repos=True, + ) + + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to install on', + ) + + version.add_argument( + '--repo-url', + nargs='?', + dest='repo_url', + help='specify a repo URL that mirrors/contains ceph packages', + ) + + version.add_argument( + '--gpg-url', + nargs='?', + dest='gpg_url', + help='specify a GPG key URL to be used with custom repos (defaults to ceph.com)' + ) + + parser.set_defaults( + func=install, + ) + + +@priority(80) +def make_uninstall(parser): + """ + Remove Ceph packages from remote hosts. + """ + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to uninstall Ceph from', + ) + parser.set_defaults( + func=uninstall, + ) + + +@priority(80) +def make_purge(parser): + """ + Remove Ceph packages from remote hosts and purge all data. + """ + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to purge Ceph from', + ) + parser.set_defaults( + func=purge, + ) + + +@priority(80) +def make_purge_data(parser): + """ + Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph + """ + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to purge Ceph data from', + ) + parser.set_defaults( + func=purge_data, + ) diff --git a/ceph_deploy/lib/__init__.py b/ceph_deploy/lib/__init__.py new file mode 100644 index 0000000..c741c0b --- /dev/null +++ b/ceph_deploy/lib/__init__.py @@ -0,0 +1,10 @@ +""" +This module is meant for vendorizing Python libraries. Most libraries will need +to have some ``sys.path`` alterations done unless they are doing relative +imports. + +Do **not** add anything to this module that does not represent a vendorized +library. +""" + +import remoto diff --git a/ceph_deploy/lsb.py b/ceph_deploy/lsb.py new file mode 100644 index 0000000..b7f3e73 --- /dev/null +++ b/ceph_deploy/lsb.py @@ -0,0 +1,121 @@ +import logging +from . import exc + + +logger = logging.getLogger(__name__) + + +def check_lsb_release(): + """ + Verify if lsb_release command is available + """ + import subprocess + + args = [ 'which', 'lsb_release', ] + process = subprocess.Popen( + args=args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + lsb_release_path, _ = process.communicate() + ret = process.wait() + if ret != 0: + raise RuntimeError('The lsb_release command was not found on remote host. Please install the lsb-release package.') + + +def lsb_fallback(conn): + """ + This fallback will attempt to detect the distro, release and codename for + a given remote host when lsb fails. It uses the + ``platform.linux_distribution`` module that should be fairly robust and + would prevent us from adding repositories and installing a package just to + detect a platform. + """ + distro, release, codename = conn.modules.platform.linux_distribution() + return ( + str(distro).rstrip(), + str(release).rstrip(), + str(codename).rstrip() + ) + + +def lsb_release(): + """ + Get LSB release information from lsb_release. + + Returns truple with distro, release and codename. Otherwise + the function raises an error (subprocess.CalledProcessError or + RuntimeError). + """ + import subprocess + + args = [ 'lsb_release', '-s', '-i' ] + process = subprocess.Popen( + args=args, + stdout=subprocess.PIPE, + ) + distro, _ = process.communicate() + ret = process.wait() + if ret != 0: + raise subprocess.CalledProcessError(ret, args, output=distro) + if distro == '': + raise RuntimeError('lsb_release gave invalid output for distro') + + args = [ 'lsb_release', '-s', '-r', ] + process = subprocess.Popen( + args=args, + stdout=subprocess.PIPE, + ) + release, _ = process.communicate() + ret = process.wait() + if ret != 0: + raise subprocess.CalledProcessError(ret, args, output=release) + if release == '': + raise RuntimeError('lsb_release gave invalid output for release') + + args = [ 'lsb_release', '-s', '-c', ] + process = subprocess.Popen( + args=args, + stdout=subprocess.PIPE, + ) + codename, _ = process.communicate() + ret = process.wait() + if ret != 0: + raise subprocess.CalledProcessError(ret, args, output=codename) + if codename == '': + raise RuntimeError('lsb_release gave invalid output for codename') + + return (str(distro).rstrip(), str(release).rstrip(), str(codename).rstrip()) + + +def get_lsb_release(sudo): + """ + Get LSB release information from lsb_release. + + Check if lsb_release is installed on the remote host and issue + a message if not. + + Returns truple with distro, release and codename. Otherwise + the function raises an error (subprocess.CalledProcessError or + RuntimeError). + """ + try: + check_lsb_release_r = sudo.compile(check_lsb_release) + status = check_lsb_release_r() + except RuntimeError as e: + logger.warning('lsb_release was not found - inferring OS details') + return lsb_fallback(sudo) + + lsb_release_r = sudo.compile(lsb_release) + return lsb_release_r() + + +def choose_init(distro, codename): + """ + Select a init system for a given distribution. + + Returns the name of a init system (upstart, sysvinit ...). + """ + if distro == 'Ubuntu': + return 'upstart' + return 'sysvinit' diff --git a/ceph_deploy/mds.py b/ceph_deploy/mds.py new file mode 100644 index 0000000..bae4b13 --- /dev/null +++ b/ceph_deploy/mds.py @@ -0,0 +1,205 @@ +from cStringIO import StringIO +import errno +import logging +import os + +from . import conf +from . import exc +from . import hosts +from .lib.remoto import process +from .cliutil import priority + + +LOG = logging.getLogger(__name__) + + +def get_bootstrap_mds_key(cluster): + """ + Read the bootstrap-mds key for `cluster`. + """ + path = '{cluster}.bootstrap-mds.keyring'.format(cluster=cluster) + try: + with file(path, 'rb') as f: + return f.read() + except IOError: + raise RuntimeError('bootstrap-mds keyring not found; run \'gatherkeys\'') + + +def create_mds(conn, name, cluster, init): + + path = '/var/lib/ceph/mds/{cluster}-{name}'.format( + cluster=cluster, + name=name + ) + + conn.remote_module.safe_mkdir(path) + + bootstrap_keyring = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( + cluster=cluster + ) + + keypath = os.path.join(path, 'keyring') + + stdout, stderr, returncode = process.check( + conn, + [ + 'ceph', + '--cluster', cluster, + '--name', 'client.bootstrap-mds', + '--keyring', bootstrap_keyring, + 'auth', 'get-or-create', 'mds.{name}'.format(name=name), + 'osd', 'allow rwx', + 'mds', 'allow', + 'mon', 'allow profile mds', + '-o', + os.path.join(keypath), + ] + ) + if returncode > 0 and returncode != errno.EACCES: + for line in stderr: + conn.logger.error(line) + for line in stdout: + # yes stdout as err because this is an error + conn.logger.error(line) + conn.logger.error('exit code from command was: %s' % returncode) + raise RuntimeError('could not create mds') + + process.check( + conn, + [ + 'ceph', + '--cluster', cluster, + '--name', 'client.bootstrap-mds', + '--keyring', bootstrap_keyring, + 'auth', 'get-or-create', 'mds.{name}'.format(name=name), + 'osd', 'allow *', + 'mds', 'allow', + 'mon', 'allow rwx', + '-o', + os.path.join(keypath), + ] + ) + + conn.remote_module.touch_file(os.path.join(path, 'done')) + conn.remote_module.touch_file(os.path.join(path, init)) + + if init == 'upstart': + process.run( + conn, + [ + 'initctl', + 'emit', + 'ceph-mds', + 'cluster={cluster}'.format(cluster=cluster), + 'id={name}'.format(name=name), + ], + timeout=7 + ) + elif init == 'sysvinit': + process.run( + conn, + [ + 'service', + 'ceph', + 'start', + 'mds.{name}'.format(name=name), + ], + timeout=7 + ) + + +def mds_create(args): + cfg = conf.load(args) + LOG.debug( + 'Deploying mds, cluster %s hosts %s', + args.cluster, + ' '.join(':'.join(x or '' for x in t) for t in args.mds), + ) + + if not args.mds: + raise exc.NeedHostError() + + key = get_bootstrap_mds_key(cluster=args.cluster) + + bootstrapped = set() + errors = 0 + for hostname, name in args.mds: + try: + distro = hosts.get(hostname, username=args.username) + rlogger = distro.conn.logger + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + LOG.debug('remote host will use %s', distro.init) + + if hostname not in bootstrapped: + bootstrapped.add(hostname) + LOG.debug('deploying mds bootstrap to %s', hostname) + conf_data = StringIO() + cfg.write(conf_data) + distro.conn.remote_module.write_conf( + args.cluster, + conf_data.getvalue(), + args.overwrite_conf, + ) + + path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( + cluster=args.cluster, + ) + + if not distro.conn.remote_module.path_exists(path): + rlogger.warning('mds keyring does not exist yet, creating one') + distro.conn.remote_module.write_keyring(path, key) + + create_mds(distro.conn, name, args.cluster, distro.init) + distro.conn.exit() + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to create %d MDSs' % errors) + + +def mds(args): + if args.subcommand == 'create': + mds_create(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + + +def colon_separated(s): + host = s + name = s + if s.count(':') == 1: + (host, name) = s.split(':') + return (host, name) + + +@priority(30) +def make(parser): + """ + Deploy ceph MDS on remote hosts. + """ + parser.add_argument( + 'subcommand', + metavar='SUBCOMMAND', + choices=[ + 'create', + 'destroy', + ], + help='create or destroy', + ) + parser.add_argument( + 'mds', + metavar='HOST[:NAME]', + nargs='*', + type=colon_separated, + help='host (and optionally the daemon name) to deploy on', + ) + parser.set_defaults( + func=mds, + ) diff --git a/ceph_deploy/memoize.py b/ceph_deploy/memoize.py new file mode 100644 index 0000000..fd344a0 --- /dev/null +++ b/ceph_deploy/memoize.py @@ -0,0 +1,26 @@ +import functools + + +class NotFound(object): + """ + Sentinel object to say call was not memoized. + + Supposed to be faster than throwing exceptions on cache miss. + """ + def __str__(self): + return self.__class__.__name__ + +NotFound = NotFound() + + +def memoize(f): + cache = {} + + @functools.wraps(f) + def wrapper(*args, **kwargs): + key = (args, tuple(sorted(kwargs.iteritems()))) + val = cache.get(key, NotFound) + if val is NotFound: + val = cache[key] = f(*args, **kwargs) + return val + return wrapper diff --git a/ceph_deploy/misc.py b/ceph_deploy/misc.py new file mode 100644 index 0000000..1620e1f --- /dev/null +++ b/ceph_deploy/misc.py @@ -0,0 +1,22 @@ + +def mon_hosts(mons): + """ + Iterate through list of MON hosts, return tuples of (name, host). + """ + for m in mons: + if m.count(':'): + (name, host) = m.split(':') + else: + name = m + host = m + if name.count('.') > 0: + name = name.split('.')[0] + yield (name, host) + +def remote_shortname(socket): + """ + Obtains remote hostname of the socket and cuts off the domain part + of its FQDN. + """ + return socket.gethostname().split('.', 1)[0] + diff --git a/ceph_deploy/mon.py b/ceph_deploy/mon.py new file mode 100644 index 0000000..0c22868 --- /dev/null +++ b/ceph_deploy/mon.py @@ -0,0 +1,402 @@ +import argparse +import json +import logging +import re +import os +from textwrap import dedent +import time + +from . import conf, exc +from .cliutil import priority +from .util import paths +from .lib.remoto import process +from . import hosts +from .misc import mon_hosts +from .connection import get_connection +from . import gatherkeys + + +LOG = logging.getLogger(__name__) + + +def mon_status_check(conn, logger, hostname, args): + """ + A direct check for JSON output on the monitor status. + + For newer versions of Ceph (dumpling and newer) a new mon_status command + was added ( `ceph daemon mon mon_status` ) and should be revisited if the + output changes as this check depends on that availability. + + """ + mon = 'mon.%s' % hostname + + out, err, code = process.check( + conn, + [ + 'ceph', + '--cluster={cluster}'.format(cluster=args.cluster), + '--admin-daemon', + '/var/run/ceph/ceph-%s.asok' % mon, + 'mon_status', + ], + ) + + for line in err: + logger.error(line) + + try: + return json.loads(''.join(out)) + except ValueError: + return {} + + +def catch_mon_errors(conn, logger, hostname, cfg, args): + """ + Make sure we are able to catch up common mishaps with monitors + and use that state of a monitor to determine what is missing + and warn apropriately about it. + """ + monmap = mon_status_check(conn, logger, hostname, args).get('monmap', {}) + mon_initial_members = cfg.safe_get('global', 'mon_initial_members') + public_addr = cfg.safe_get('global', 'public_addr') + public_network = cfg.safe_get('global', 'public_network') + mon_in_monmap = [ + mon.get('name') + for mon in monmap.get('mons', [{}]) + if mon.get('name') == hostname + ] + if mon_initial_members is None or not hostname in mon_initial_members: + logger.warning('%s is not defined in `mon initial members`', hostname) + if not mon_in_monmap: + logger.warning('monitor %s does not exist in monmap', hostname) + if not public_addr and not public_network: + logger.warning('neither `public_addr` nor `public_network` keys are defined for monitors') + logger.warning('monitors may not be able to form quorum') + + +def mon_status(conn, logger, hostname, args, silent=False): + """ + run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide + not only the output, but be able to return a boolean status of what is + going on. + ``False`` represents a monitor that is not doing OK even if it is up and + running, while ``True`` would mean the monitor is up and running correctly. + """ + mon = 'mon.%s' % hostname + + try: + out = mon_status_check(conn, logger, hostname, args) + if not out: + logger.warning('monitor: %s, might not be running yet' % mon) + return False + + if not silent: + logger.debug('*'*80) + logger.debug('status for monitor: %s' % mon) + for line in json.dumps(out, indent=2, sort_keys=True).split('\n'): + logger.debug(line) + logger.debug('*'*80) + if out['rank'] >= 0: + logger.info('monitor: %s is running' % mon) + return True + logger.info('monitor: %s is not running' % mon) + return False + except RuntimeError: + logger.info('monitor: %s is not running' % mon) + return False + + +def mon_create(args): + + cfg = conf.load(args) + if not args.mon: + mon_initial_members = cfg.safe_get('global', 'mon_initial_members') + args.mon = re.split(r'[,\s]+', mon_initial_members) + + if not args.mon: + raise exc.NeedHostError() + + try: + with file('{cluster}.mon.keyring'.format(cluster=args.cluster), + 'rb') as f: + monitor_keyring = f.read() + except IOError: + raise RuntimeError('mon keyring not found; run \'new\' to create a new cluster') + + LOG.debug( + 'Deploying mon, cluster %s hosts %s', + args.cluster, + ' '.join(args.mon), + ) + + errors = 0 + for (name, host) in mon_hosts(args.mon): + try: + # TODO add_bootstrap_peer_hint + LOG.debug('detecting platform for host %s ...', name) + distro = hosts.get(host, username=args.username) + LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) + rlogger = logging.getLogger(name) + + # ensure remote hostname is good to go + hostname_is_compatible(distro.conn, rlogger, name) + rlogger.debug('deploying mon to %s', name) + distro.mon.create(distro, args, monitor_keyring) + + # tell me the status of the deployed mon + time.sleep(2) # give some room to start + mon_status(distro.conn, rlogger, name, args) + catch_mon_errors(distro.conn, rlogger, name, cfg, args) + distro.conn.exit() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to create %d monitors' % errors) + + +def hostname_is_compatible(conn, logger, provided_hostname): + """ + Make sure that the host that we are connecting to has the same value as the + `hostname` in the remote host, otherwise mons can fail not reaching quorum. + """ + logger.debug('determining if provided host has same hostname in remote') + remote_hostname = conn.remote_module.shortname() + if remote_hostname == provided_hostname: + return + logger.warning('*'*80) + logger.warning('provided hostname must match remote hostname') + logger.warning('provided hostname: %s' % provided_hostname) + logger.warning('remote hostname: %s' % remote_hostname) + logger.warning('monitors may not reach quorum and create-keys will not complete') + logger.warning('*'*80) + + +def destroy_mon(conn, cluster, hostname): + import datetime + import time + retries = 5 + + path = paths.mon.path(cluster, hostname) + + if conn.remote_module.path_exists(path): + # remove from cluster + process.run( + conn, + [ + 'ceph', + '--cluster={cluster}'.format(cluster=cluster), + '-n', 'mon.', + '-k', '{path}/keyring'.format(path=path), + 'mon', + 'remove', + hostname, + ], + timeout=7, + ) + + # stop + if conn.remote_module.path_exists(os.path.join(path, 'upstart')): + status_args = [ + 'initctl', + 'status', + 'ceph-mon', + 'cluster={cluster}'.format(cluster=cluster), + 'id={hostname}'.format(hostname=hostname), + ] + + elif conn.remote_module.path_exists(os.path.join(path, 'sysvinit')): + status_args = [ + 'service', + 'ceph', + 'status', + 'mon.{hostname}'.format(hostname=hostname), + ] + + while retries: + conn.logger.info('polling the daemon to verify it stopped') + if is_running(conn, status_args): + time.sleep(5) + retries -= 1 + if retries <= 0: + raise RuntimeError('ceph-mon deamon did not stop') + else: + break + + # archive old monitor directory + fn = '{cluster}-{hostname}-{stamp}'.format( + hostname=hostname, + cluster=cluster, + stamp=datetime.datetime.utcnow().strftime("%Y-%m-%dZ%H:%M:%S"), + ) + + process.run( + conn, + [ + 'mkdir', + '-p', + '/var/lib/ceph/mon-removed', + ], + ) + + conn.remote_module.make_mon_removed_dir(path, fn) + + +def mon_destroy(args): + errors = 0 + for (name, host) in mon_hosts(args.mon): + try: + LOG.debug('Removing mon from %s', name) + + distro = hosts.get(host, username=args.username) + hostname = distro.conn.remote_module.shortname() + + destroy_mon( + distro.conn, + args.cluster, + hostname, + ) + distro.conn.exit() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to destroy %d monitors' % errors) + + +def mon_create_initial(args): + cfg = conf.load(args) + cfg_initial_members = cfg.safe_get('global', 'mon_initial_members') + if cfg_initial_members is None: + raise RuntimeError('No `mon initial members` defined in config') + mon_initial_members = re.split(r'[,\s]+', cfg_initial_members) + + # create them normally through mon_create + mon_create(args) + + # make the sets to be able to compare late + mon_in_quorum = set([]) + mon_members = set([host for host in mon_initial_members]) + + for host in mon_initial_members: + mon_name = 'mon.%s' % host + LOG.info('processing monitor %s', mon_name) + sleeps = [20, 20, 15, 10, 10, 5] + tries = 5 + rlogger = logging.getLogger(host) + rconn = get_connection(host, username=args.username, logger=rlogger) + while tries: + status = mon_status_check(rconn, rlogger, host, args) + has_reached_quorum = status.get('state', '') in ['peon', 'leader'] + if not has_reached_quorum: + LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries)) + tries -= 1 + sleep_seconds = sleeps.pop() + LOG.warning('waiting %s seconds before retrying', sleep_seconds) + time.sleep(sleep_seconds) # Magic number + else: + mon_in_quorum.add(host) + LOG.info('%s monitor has reached quorum!', mon_name) + break + rconn.exit() + + if mon_in_quorum == mon_members: + LOG.info('all initial monitors are running and have formed quorum') + LOG.info('Running gatherkeys...') + gatherkeys.gatherkeys(args) + else: + LOG.error('Some monitors have still not reached quorum:') + for host in mon_members - mon_in_quorum: + LOG.error('%s', host) + + +def mon(args): + if args.subcommand == 'create': + mon_create(args) + elif args.subcommand == 'destroy': + mon_destroy(args) + elif args.subcommand == 'create-initial': + mon_create_initial(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + + +@priority(30) +def make(parser): + """ + Deploy ceph monitor on remote hosts. + """ + sub_command_help = dedent(""" + Subcommands: + + create-initial + Will deploy for monitors defined in `mon initial members`, wait until + they form quorum and then gatherkeys, reporting the monitor status along + the process. If monitors don't form quorum the command will eventually + time out. + + create + Deploy monitors by specifying them like: + + ceph-deploy mon create node1 node2 node3 + + If no hosts are passed it will default to use the `mon initial members` + defined in the configuration. + + destroy + Completely remove monitors on a remote host. Requires hostname(s) as + arguments. + """) + parser.formatter_class = argparse.RawDescriptionHelpFormatter + parser.description = sub_command_help + + parser.add_argument( + 'subcommand', + choices=[ + 'create', + 'create-initial', + 'destroy', + ], + ) + parser.add_argument( + 'mon', + nargs='*', + ) + parser.set_defaults( + func=mon, + ) + +# +# Helpers +# + + +def is_running(conn, args): + """ + Run a command to check the status of a mon, return a boolean. + + We heavily depend on the format of the output, if that ever changes + we need to modify this. + Check daemon status for 3 times + output of the status should be similar to:: + + mon.mira094: running {"version":"0.61.5"} + + or when it fails:: + + mon.mira094: dead {"version":"0.61.5"} + mon.mira094: not running {"version":"0.61.5"} + """ + stdout, stderr, _ = process.check( + conn, + args + ) + result_string = ' '.join(stdout) + for run_check in [': running', ' start/running']: + if run_check in result_string: + return True + return False diff --git a/ceph_deploy/new.py b/ceph_deploy/new.py new file mode 100644 index 0000000..5a19bec --- /dev/null +++ b/ceph_deploy/new.py @@ -0,0 +1,193 @@ +import errno +import logging +import os +import uuid +import struct +import time +import base64 +import socket + +from . import exc +from .cliutil import priority +from .conf import CephConf +from . import hosts +from .util import arg_validators, ssh +from .misc import mon_hosts +from .lib.remoto import process +from .connection import get_local_connection + + +LOG = logging.getLogger(__name__) + + +def generate_auth_key(): + key = os.urandom(16) + header = struct.pack( + '>> mon.path('mycluster', 'hostname') + /var/lib/ceph/mon/mycluster-myhostname + """ + return "%s%s" % (base(cluster), hostname) + + +def done(cluster, hostname): + """ + Example usage:: + + >>> mon.done('mycluster', 'hostname') + /var/lib/ceph/mon/mycluster-myhostname/done + """ + return join(path(cluster, hostname), 'done') + + +def init(cluster, hostname, init): + """ + Example usage:: + + >>> mon.init('mycluster', 'hostname', 'init') + /var/lib/ceph/mon/mycluster-myhostname/init + """ + return join(path(cluster, hostname), init) + + +def keyring(cluster, hostname): + """ + Example usage:: + + >>> mon.keyring('mycluster', 'myhostname') + /var/lib/ceph/tmp/mycluster-myhostname.mon.keyring + """ + keyring_file = '%s-%s.mon.keyring' % (cluster, hostname) + return join(constants.tmp_path, keyring_file) diff --git a/ceph_deploy/util/pkg_managers.py b/ceph_deploy/util/pkg_managers.py new file mode 100644 index 0000000..45ca1f8 --- /dev/null +++ b/ceph_deploy/util/pkg_managers.py @@ -0,0 +1,108 @@ +from ceph_deploy.lib.remoto import process + + +def apt(conn, package, *a, **kw): + cmd = [ + 'env', + 'DEBIAN_FRONTEND=noninteractive', + 'apt-get', + '-q', + 'install', + '--assume-yes', + package, + ] + return process.run( + conn, + cmd, + *a, + **kw + ) + + +def apt_remove(conn, packages, *a, **kw): + purge = kw.pop('purge', False) + cmd = [ + 'apt-get', + '-q', + 'remove', + '-f', + '-y', + '--force-yes', + ] + if purge: + cmd.append('--purge') + cmd.append('--') + cmd.extend(packages) + + return process.run( + conn, + cmd, + *a, + **kw + ) + + +def apt_update(conn): + cmd = [ + 'apt-get', + '-q', + 'update', + ] + return process.run( + conn, + cmd, + ) + + +def yum(conn, package, *a, **kw): + cmd = [ + 'yum', + '-y', + '-q', + 'install', + package, + ] + return process.run( + conn, + cmd, + *a, + **kw + ) + + +def yum_remove(conn, packages, *a, **kw): + cmd = [ + 'yum', + '-y', + '-q', + 'remove', + ] + if isinstance(packages, str): + cmd.append(packages) + else: + cmd.extend(packages) + return process.run( + conn, + cmd, + *a, + **kw + ) + + +def rpm(conn, rpm_args=None, *a, **kw): + """ + A minimal front end for ``rpm`. Extra flags can be passed in via + ``rpm_args`` as an iterable. + """ + rpm_args = rpm_args or [] + cmd = [ + 'rpm', + '-Uvh', + ] + cmd.extend(rpm_args) + return process.run( + conn, + cmd, + *a, + **kw + ) diff --git a/ceph_deploy/util/ssh.py b/ceph_deploy/util/ssh.py new file mode 100644 index 0000000..3d38998 --- /dev/null +++ b/ceph_deploy/util/ssh.py @@ -0,0 +1,32 @@ +import logging +from ceph_deploy.lib.remoto import process +from ceph_deploy.lib.remoto.connection import needs_ssh +from ceph_deploy.connection import get_local_connection + + +def can_connect_passwordless(hostname): + """ + Ensure that current host can SSH remotely to the remote + host using the ``BatchMode`` option to prevent a password prompt. + + That attempt will error with an exit status of 255 and a ``Permission + denied`` message. + """ + # Ensure we are not doing this for local hosts + if not needs_ssh(hostname): + return True + + logger = logging.getLogger(hostname) + with get_local_connection(logger) as conn: + # Check to see if we can login, disabling password prompts + command = ['ssh', '-CT', '-o', 'BatchMode=yes', hostname] + out, err, retval = process.check(conn, command, stop_on_error=False) + expected_error = 'Permission denied ' + has_key_error = False + for line in err: + if expected_error in line: + has_key_error = True + + if retval == 255 and has_key_error: + return False + return True diff --git a/ceph_deploy/util/templates.py b/ceph_deploy/util/templates.py new file mode 100644 index 0000000..0f0e6bb --- /dev/null +++ b/ceph_deploy/util/templates.py @@ -0,0 +1,27 @@ + + +ceph_repo = """ +[ceph] +name=Ceph packages for $basearch +baseurl={repo_url}/$basearch +enabled=1 +gpgcheck=1 +type=rpm-md +gpgkey={gpg_url} + +[ceph-noarch] +name=Ceph noarch packages +baseurl={repo_url}/noarch +enabled=1 +gpgcheck=1 +type=rpm-md +gpgkey={gpg_url} + +[ceph-source] +name=Ceph source packages +baseurl={repo_url}/SRPMS +enabled=0 +gpgcheck=1 +type=rpm-md +gpgkey={gpg_url} +""" diff --git a/ceph_deploy/validate.py b/ceph_deploy/validate.py new file mode 100644 index 0000000..8ef5e73 --- /dev/null +++ b/ceph_deploy/validate.py @@ -0,0 +1,16 @@ +import argparse +import re + + +ALPHANUMERIC_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*$') + + +def alphanumeric(s): + """ + Enforces string to be alphanumeric with leading alpha. + """ + if not ALPHANUMERIC_RE.match(s): + raise argparse.ArgumentTypeError( + 'argument must start with a letter and contain only letters and numbers', + ) + return s diff --git a/debian/ceph-deploy.install b/debian/ceph-deploy.install new file mode 100644 index 0000000..cec4ab6 --- /dev/null +++ b/debian/ceph-deploy.install @@ -0,0 +1 @@ +./scripts/ceph-deploy /usr/bin diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 0000000..b6de9c7 --- /dev/null +++ b/debian/changelog @@ -0,0 +1,83 @@ +ceph-deploy (1.3.2-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Wed, 13 Nov 2013 00:22:12 +0000 + +ceph-deploy (1.3.1-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Wed, 06 Nov 2013 20:02:54 +0000 + +ceph-deploy (1.3-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Fri, 01 Nov 2013 05:28:02 +0000 + +ceph-deploy (1.2.7) stable; urgency=low + + * New upstream release + + -- Gary Lowell Mon, 07 Oct 2013 18:33:45 +0000 + +ceph-deploy (1.2.6-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Wed, 18 Sep 2013 09:26:57 -0700 + +ceph-deploy (1.2.5-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Tue, 17 Sep 2013 19:25:43 -0700 + +ceph-deploy (1.2.4-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Tue, 17 Sep 2013 11:19:59 -0700 + +ceph-deploy (1.2.3) precise; urgency=low + + * New upstream release + + -- Gary Lowell Thu, 29 Aug 2013 15:20:22 -0700 + +ceph-deploy (1.2.2) precise; urgency=low + + * New upstream release + + -- Gary Lowell Thu, 22 Aug 2013 12:26:56 -0700 + +ceph-deploy (1.2.1-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Thu, 15 Aug 2013 15:19:33 -0700 + +ceph-deploy (1.2-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Mon, 12 Aug 2013 16:59:09 -0700 + +ceph-deploy (1.1-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Tue, 18 Jun 2013 11:07:00 -0700 + +ceph-deploy (1.0-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Fri, 24 May 2013 11:57:40 +0800 + +ceph-deploy (0.0.1-1) unstable; urgency=low + + * Initial release. + + -- Gary Lowell Mon, 10 Mar 2013 18:38:40 +0800 diff --git a/debian/compat b/debian/compat new file mode 100644 index 0000000..7f8f011 --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +7 diff --git a/debian/control b/debian/control new file mode 100644 index 0000000..d435e92 --- /dev/null +++ b/debian/control @@ -0,0 +1,25 @@ +Source: ceph-deploy +Maintainer: Sage Weil +Uploaders: Sage Weil +Section: admin +Priority: optional +Build-Depends: debhelper (>= 7), python-setuptools, git +X-Python-Version: >= 2.4 +Standards-Version: 3.9.2 +Homepage: http://ceph.com/ +Vcs-Git: git://github.com/ceph/ceph-deploy.git +Vcs-Browser: https://github.com/ceph/ceph-deploy + +Package: ceph-deploy +Architecture: all +Depends: python, + python-argparse, + python-setuptools, + ${misc:Depends}, + ${python:Depends} +Description: Ceph-deploy is an easy to use configuration tool + for the Ceph distributed storage system. + . + This package includes the programs and libraries to support + simple ceph cluster deployment. + diff --git a/debian/copyright b/debian/copyright new file mode 100644 index 0000000..93bc530 --- /dev/null +++ b/debian/copyright @@ -0,0 +1,3 @@ +Files: * +Copyright: (c) 2004-2012 by Sage Weil +License: LGPL2.1 (see /usr/share/common-licenses/LGPL-2.1) diff --git a/debian/rules b/debian/rules new file mode 100755 index 0000000..b46b956 --- /dev/null +++ b/debian/rules @@ -0,0 +1,12 @@ +#!/usr/bin/make -f + +# Uncomment this to turn on verbose mode. +export DH_VERBOSE=1 +@export DEB_PYTHON_INSTALL_ARGS_ALL += --install-lib=/usr/share/ceph-deploy + +%: + dh $@ --buildsystem python_distutils --with python2 + +override_dh_clean: + rm -rf ceph_deploy/lib/remoto + dh_clean diff --git a/debian/source/format b/debian/source/format new file mode 100644 index 0000000..d3827e7 --- /dev/null +++ b/debian/source/format @@ -0,0 +1 @@ +1.0 diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..f8e0867 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,177 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ceph-deploy.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ceph-deploy.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/ceph-deploy" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ceph-deploy" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/docs/source/_static/.empty b/docs/source/_static/.empty new file mode 100644 index 0000000..e69de29 diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Book.eot b/docs/source/_themes/ceph/static/font/ApexSans-Book.eot new file mode 100644 index 0000000..332c8cb Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Book.eot differ diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Book.svg b/docs/source/_themes/ceph/static/font/ApexSans-Book.svg new file mode 100644 index 0000000..8af9af2 --- /dev/null +++ b/docs/source/_themes/ceph/static/font/ApexSans-Book.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Book.ttf b/docs/source/_themes/ceph/static/font/ApexSans-Book.ttf new file mode 100644 index 0000000..42a0084 Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Book.ttf differ diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Book.woff b/docs/source/_themes/ceph/static/font/ApexSans-Book.woff new file mode 100644 index 0000000..681a70e Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Book.woff differ diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Medium.eot b/docs/source/_themes/ceph/static/font/ApexSans-Medium.eot new file mode 100644 index 0000000..e06fd21 Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Medium.eot differ diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Medium.svg b/docs/source/_themes/ceph/static/font/ApexSans-Medium.svg new file mode 100644 index 0000000..6c624ec --- /dev/null +++ b/docs/source/_themes/ceph/static/font/ApexSans-Medium.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Medium.ttf b/docs/source/_themes/ceph/static/font/ApexSans-Medium.ttf new file mode 100644 index 0000000..44c281e Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Medium.ttf differ diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Medium.woff b/docs/source/_themes/ceph/static/font/ApexSans-Medium.woff new file mode 100644 index 0000000..b7c8819 Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Medium.woff differ diff --git a/docs/source/_themes/ceph/static/nature.css_t b/docs/source/_themes/ceph/static/nature.css_t new file mode 100644 index 0000000..394a633 --- /dev/null +++ b/docs/source/_themes/ceph/static/nature.css_t @@ -0,0 +1,325 @@ +/* + * nature.css_t + * ~~~~~~~~~~~~ + * + * Sphinx stylesheet -- nature theme. + * + * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +@font-face { + font-family: 'ApexSansMedium'; + src: url('font/ApexSans-Medium.eot'); + src: url('font/ApexSans-Medium.eot?#iefix') format('embedded-opentype'), + url('font/ApexSans-Medium.woff') format('woff'), + url('font/ApexSans-Medium.ttf') format('truetype'), + url('font/ApexSans-Medium.svg#FontAwesome') format('svg'); + font-weight: normal; + font-style: normal; +} + +@font-face { + font-family: 'ApexSansBook'; + src: url('font/ApexSans-Book.eot'); + src: url('font/ApexSans-Book.eot?#iefix') format('embedded-opentype'), + url('font/ApexSans-Book.woff') format('woff'), + url('font/ApexSans-Book.ttf') format('truetype'), + url('font/ApexSans-Book.svg#FontAwesome') format('svg'); + font-weight: normal; + font-style: normal; +} + +body { + font: 14px/1.4 Helvetica, Arial, sans-serif; + background-color: #E6E8E8; + color: #37424A; + margin: 0; + padding: 0; + border-top: 5px solid #F05C56; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 330px; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.document { + background-color: #ffffff; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 0 30px 30px 30px; +} + +div.footer { + color: #222B31; + width: 100%; + padding: 13px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #444; + text-decoration: underline; +} + +div.related { + background-color: #80D2DC; + line-height: 32px; + color: #37424A; + // text-shadow: 0px 1px 0 #444; + font-size: 100%; + border-top: #9C4850 5px solid; +} + +div.related a { + color: #37424A; + text-decoration: none; +} + +div.related a:hover { + color: #fff; + // text-decoration: underline; +} + +div.sphinxsidebar { + // font-size: 100%; + line-height: 1.5em; + width: 330px; +} + +div.sphinxsidebarwrapper{ + padding: 20px 0; + background-color: #efefef; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: ApexSansMedium; + color: #e6e8e8; + font-size: 1.2em; + font-weight: normal; + margin: 0; + padding: 5px 10px; + background-color: #5e6a71; + // text-shadow: 1px 1px 0 white; + text-transform: uppercase; +} + +div.sphinxsidebar h4{ + font-size: 1.1em; +} + +div.sphinxsidebar h3 a { + color: #e6e8e8; +} + + +div.sphinxsidebar p { + color: #888; + padding: 5px 20px; +} + +div.sphinxsidebar p.topless { +} + +div.sphinxsidebar ul { + margin: 10px 5px 10px 20px; + padding: 0; + color: #000; +} + +div.sphinxsidebar a { + color: #444; +} + +div.sphinxsidebar input { + border: 1px solid #ccc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar input[type=text]{ + margin-left: 20px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #F05C56; + text-decoration: none; +} + +a:hover { + color: #F05C56; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + // font-family: ApexSansMedium; + // background-color: #80D2DC; + // font-weight: normal; + // color: #37424a; + margin: 30px 0px 10px 0px; + padding: 5px 0 5px 0px; + // text-shadow: 0px 1px 0 white; + text-transform: uppercase; +} + +div.body h1 { font: 20px/2.0 ApexSansBook; color: #37424A; border-top: 20px solid white; margin-top: 0; } +div.body h2 { font: 18px/1.8 ApexSansMedium; background-color: #5E6A71; color: #E6E8E8; padding: 5px 10px; } +div.body h3 { font: 16px/1.6 ApexSansMedium; color: #37424A; } +div.body h4 { font: 14px/1.4 Helvetica, Arial, sans-serif; color: #37424A; } +div.body h5 { font: 12px/1.2 Helvetica, Arial, sans-serif; color: #37424A; } +div.body h6 { font-size: 100%; color: #37424A; } + +// div.body h2 { font-size: 150%; background-color: #E6E8E8; color: #37424A; } +// div.body h3 { font-size: 120%; background-color: #E6E8E8; color: #37424A; } +// div.body h4 { font-size: 110%; background-color: #E6E8E8; color: #37424A; } +// div.body h5 { font-size: 100%; background-color: #E6E8E8; color: #37424A; } +// div.body h6 { font-size: 100%; background-color: #E6E8E8; color: #37424A; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + line-height: 1.5em; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.highlight{ + background-color: white; +} + +div.note { + background-color: #e6e8e8; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #efefef; +} + +div.warning { + background-color: #F05C56; + border: 1px solid #9C4850; + color: #fff; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 10px; + background-color: White; + color: #222; + line-height: 1.2em; + border: 1px solid #5e6a71; + font-size: 1.1em; + margin: 1.5em; + -webkit-box-shadow: 1px 1px 1px #e6e8e8; + -moz-box-shadow: 1px 1px 1px #e6e8e8; +} + +tt { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ + font-size: 15px; + font-family: monospace; +} + +.viewcode-back { + font-family: Arial, sans-serif; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; +} + +table.docutils { + margin: 1.5em; +} + +div.sidebar { + border: 1px solid #5E6A71; + background-color: #E6E8E8; +} + +div.admonition.tip { + background-color: #80D2DC; + border: 1px solid #55AEBA; +} + +div.admonition.important { + background-color: #F05C56; + border: 1px solid #9C4850; + color: #fff; +} + +div.tip tt.literal { + background-color: #55aeba; + color: #fff; +} + +div.important tt.literal { + background-color: #9C4850; + color: #fff; +} + +h2 .literal { + color: #fff; + background-color: #37424a; +} + +dl.glossary dt { + font-size: 1.0em; + padding-top:20px; + +} \ No newline at end of file diff --git a/docs/source/_themes/ceph/theme.conf b/docs/source/_themes/ceph/theme.conf new file mode 100644 index 0000000..1cc4004 --- /dev/null +++ b/docs/source/_themes/ceph/theme.conf @@ -0,0 +1,4 @@ +[theme] +inherit = basic +stylesheet = nature.css +pygments_style = tango diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst new file mode 100644 index 0000000..fd10a2f --- /dev/null +++ b/docs/source/changelog.rst @@ -0,0 +1,112 @@ +1.3.2 +----- +* ``ceph-deploy new`` will now attempt to copy SSH keys if necessary unless it + it disabled. +* Default to Emperor version of ceph when installing. + +1.3.1 +----- +* Use ``shutil.move`` to overwrite files from temporary ones (Thanks Mark + Kirkwood) +* Fix failure to ``wget`` GPG keys on Debian and Debian-based distros when + installing + +1.3 +--- +* Major refactoring for all the remote connections in ceph-deploy. With global + and granular timeouts. +* Raise the log level for missing keyrings +* Allow ``--username`` to be used for connecting over SSH +* Increase verbosity when MDS fails, include the exit code +* Do not remove ``/etc/ceph``, just the contents +* Use ``rcceph`` instead of service for SUSE +* Fix lack of ``--cluster`` usage on monitor error checks +* ensure we correctly detect Debian releases + +1.2.7 +----- +* Ensure local calls to ceph-deploy do not attempt to ssh. +* ``mon create-initial`` command to deploy all defined mons, wait for them to + form quorum and finally to gatherkeys. +* Improve help menu for mon commands. +* Add ``--fs-type`` option to ``disk`` and ``osd`` commands (Thanks Benoit + Knecht) +* Make sure we are using ``--cluster`` for remote configs when starting ceph +* Fix broken ``mon destroy`` calls using the new hostname resolution helper +* Add a helper to catch common monitor errors (reporting the status of a mon) +* Normalize all configuration options in ceph-deploy (Thanks Andrew Woodward) +* Use a ``cuttlefish`` compatible ``mon_status`` command +* Make ``osd activate`` use the new remote connection libraries for improved + readability. +* Make ``disk zap`` also use the new remote connection libraries. +* Handle any connection errors that may came up when attempting to get into + remote hosts. + +1.2.6 +----- +* Fixes a problem witha closed connection for Debian distros when creating + a mon. + +1.2.5 +----- +* Fix yet another hanging problem when starting monitors. Closing the + connection now before we even start them. + +1.2.4 +----- +* Improve ``osd help`` menu with path information +* Really discourage the use of ``ceph-deploy new [IP]`` +* Fix hanging remote requests +* Add ``mon status`` output when creating monitors +* Fix Debian install issue (wrong parameter order) (Thanks Sayid Munawar) +* ``osd`` commands will be more verbose when deploying them +* Issue a warning when provided hosts do not match ``hostname -s`` remotely +* Create two flags for altering/not-altering source repos at install time: + ``--adjust-repos`` and ``--no-adjust-repos`` +* Do not do any ``sudo`` commands if user is root +* Use ``mon status`` for every ``mon`` deployment and detect problems with + monitors. +* Allow to specify ``host:fqdn/ip`` for all mon commands (Thanks Dmitry + Borodaenko) +* Be consistent for hostname detection (Thanks Dmitry Borodaenko) +* Fix hanging problem on remote hosts + +1.2.3 +----- +* Fix non-working ``disk list`` +* ``check_call`` utility fixes ``$PATH`` issues. +* Use proper exit codes from the ``main()`` CLI function +* Do not error when attempting to add the EPEL repos. +* Do not complain when using IP:HOST pairs +* Report nicely when ``HOST:DISK`` is not used when zapping. + +1.2.2 +----- +* Do not force usage of lsb_release, fallback to + ``platform.linux_distribution()`` +* Ease installation in CentOS/Scientific by adding the EPEL repo + before attempting to install Ceph. +* Graceful handling of pushy connection issues due to host + address resolution +* Honor the usage of ``--cluster`` when calling osd prepare. + +1.2.1 +----- +* Print the help when no arguments are passed +* Add a ``--version`` flag +* Show the version in the help menu +* Catch ``DeployError`` exceptions nicely with the logger +* Fix blocked command when calling ``mon create`` +* default to ``dumpling`` for installs +* halt execution on remote exceptions + + +1.2 +--- +* Better logging output +* Remote logging for individual actions for ``install`` and ``mon create`` +* Install ``ca-certificates`` on all Debian-based distros +* Honor the usage of ``--cluster`` +* Do not ``rm -rf`` monitor logs when destroying +* Error out when ``ceph-deploy new [IP]`` is used +* Log the ceph version when installing diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..e81012b --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- +# +# ceph-deploy documentation build configuration file, created by +# sphinx-quickstart on Mon Oct 21 09:32:42 2013. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.append(os.path.abspath('_themes')) +sys.path.insert(0, os.path.abspath('..')) +import ceph_deploy + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.intersphinx', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'contents' + +# General information about the project. +project = u'ceph-deploy' +copyright = u'2013, Inktank' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = ceph_deploy.__version__ +# The full version, including alpha/beta/rc tags. +release = ceph_deploy.__version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'ceph' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = ['_themes'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'ceph-deploydoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'ceph-deploy.tex', u'ceph-deploy Documentation', + u'Inktank', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'ceph-deploy', u'ceph-deploy Documentation', + [u'Inktank'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'ceph-deploy', u'ceph-deploy Documentation', + u'Inktank', 'ceph-deploy', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + + +# XXX Uncomment when we are ready to link to ceph docs +# Example configuration for intersphinx: refer to the Python standard library. +#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/docs/source/contents.rst b/docs/source/contents.rst new file mode 100644 index 0000000..38cd61d --- /dev/null +++ b/docs/source/contents.rst @@ -0,0 +1,8 @@ +Content Index +============= + +.. toctree:: + :maxdepth: 2 + + index.rst + changelog.rst diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..e4583c1 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,359 @@ +======================================================== + ceph-deploy -- Deploy Ceph with minimal infrastructure +======================================================== + +``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to +the servers, ``sudo``, and some Python. It runs fully on your +workstation, requiring no servers, databases, or anything like that. + +If you set up and tear down Ceph clusters a lot, and want minimal +extra bureaucracy, this is for you. + +.. _what this tool is not: + +What this tool is not +--------------------- +It is not a generic deployment system, it is only for Ceph, and is designed +for users who want to quickly get Ceph running with sensible initial settings +without the overhead of installing Chef, Puppet or Juju. + +It does not handle client configuration beyond pushing the Ceph config file +and users who want fine-control over security settings, partitions or directory +locations should use a tool such as Chef or Puppet. + + +Installation +============ +Depending on what type of usage you are going to have with ``ceph-deploy`` you +might want to look into the different ways to install it. For automation, you +might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would +probably install from the OS packages or from the Python Package Index. + +Python Package Index +-------------------- +If you are familiar with Python install tools (like ``pip`` and +``easy_install``) you can easily install ``ceph-deploy`` like:: + + pip install ceph-deploy + +or:: + + easy_install ceph-deploy + + +It should grab all the dependencies for you and install into the current user's +environment. + +We highly recommend using ``virtualenv`` and installing dependencies in +a contained way. + + +DEB +--- +The DEB repo can be found at http://ceph.com/packages/ceph-extras/debian/ + +But they can also be found for ``ceph`` releases in the ``ceph`` repos like:: + + ceph.com/debian-{release} + ceph.com/debian-testing + +RPM +--- +The RPM repos can be found at http://ceph.com/packages/ceph-extras/rpm/ + +Make sure you add the proper one for your distribution. + +But they can also be found for ``ceph`` releases in the ``ceph`` repos like:: + + ceph.com/rpm-{release} + ceph.com/rpm-testing + + +bootstraping +------------ +To get the source tree ready for use, run this once:: + + ./bootstrap + +You can symlink the ``ceph-deploy`` script in this somewhere +convenient (like ``~/bin``), or add the current directory to ``PATH``, +or just always type the full path to ``ceph-deploy``. + + +SSH and Remote Connections +========================== +``ceph-deploy`` will attempt to connect via SSH to hosts when the hostnames do +not match the current host's hostname. For example, if you are connecting to +host ``node1`` it will attempt an SSH connection as long as the current host's +hostname is *not* ``node1``. + +ceph-deploy at a minimum requires that the machine from which the script is +being run can ssh as root without password into each Ceph node. + +To enable this generate a new ssh keypair for the root user with no passphrase +and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:: + + /root/.ssh/authorized_keys + +and ensure that the following lines are in the sshd config:: + + PermitRootLogin yes + PermitEmptyPasswords yes + +The machine running ceph-deploy does not need to have the Ceph packages +installed unless it needs to admin the cluster directly using the ``ceph`` +command line tool. + + +usernames +--------- +When not specified the connection will be done with the same username as the +one executing ``ceph-deploy``. This is useful if the same username is shared in +all the nodes but can be cumbersome if that is not the case. + +A way to avoid this is to define the correct usernames to connect with in the +SSH config, but you can also use the ``--username`` flag as well:: + + ceph-deploy --username ceph install node1 + +``ceph-deploy`` then in turn would use ``ceph@node1`` to connect to that host. + +This would be the same expectation for any action that warrants a connection to +a remote host. + + +SSH Keys +-------- +Ideally, all nodes will be pre-configured to have their passwordless access +from the machine executing ``ceph-deploy`` but you can also take advantage of +automatic detection of this when calling the ``new`` subcommand. + +Once called, it will try to establish an SSH connection to the hosts passed +into the ``new`` subcommand, and determine if it can (or cannot) connect +without a password prompt. + +If it can't proceed, it will try to copy *existing* keys to the remote host, if +those do not exist, then passwordless ``rsa`` keys will be generated for the +current user and those will get used. + +This feature can be overridden in the ``new`` subcommand like:: + + ceph-deploy new --no-ssh-copykey + +.. versionadded:: 1.3.2 + + +Managing an existing cluster +============================ + +You can use ceph-deploy to provision nodes for an existing cluster. +To grab a copy of the cluster configuration file (normally +``ceph.conf``):: + + ceph-deploy config pull HOST + +You will usually also want to gather the encryption keys used for that +cluster:: + + ceph-deploy gatherkeys MONHOST + +At this point you can skip the steps below that create a new cluster +(you already have one) and optionally skip installation and/or monitor +creation, depending on what you are trying to accomplish. + + +Creating a new cluster +====================== + +Creating a new configuration +---------------------------- + +To create a new configuration file and secret key, decide what hosts +will run ``ceph-mon``, and run:: + + ceph-deploy new MON [MON..] + +listing the hostnames of the monitors. Each ``MON`` can be + + * a simple hostname. It must be DNS resolvable without the fully + qualified domain name. + * a fully qualified domain name. The hostname is assumed to be the + leading component up to the first ``.``. + * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified + domain name or IP address. For example, ``foo``, + ``foo.example.com``, ``foo:something.example.com``, and + ``foo:1.2.3.4`` are all valid. Note, however, that the hostname + should match that configured on the host ``foo``. + +The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your +current directory. + + +Edit initial cluster configuration +---------------------------------- + +You want to review the generated ``ceph.conf`` file and make sure that +the ``mon_host`` setting contains the IP addresses you would like the +monitors to bind to. These are the IPs that clients will initially +contact to authenticate to the cluster, and they need to be reachable +both by external client-facing hosts and internal cluster daemons. + +Installing packages +=================== + +To install the Ceph software on the servers, run:: + + ceph-deploy install HOST [HOST..] + +This installs the current default *stable* release. You can choose a +different release track with command line options, for example to use +a release candidate:: + + ceph-deploy install --testing HOST + +Or to test a development branch:: + + ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] + + +Proxy or Firewall Installs +-------------------------- +If attempting to install behind a firewall or through a proxy you can +use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes +to the distro's repository in order to install the packages and it will go +straight to package installation. + +That will allow an environment without internet access to point to *its own +repositories*. This means that those repositories will need to be properly +setup (and mirrored with all the necessary dependencies) before attempting an +install. + +Another alternative is to set the `wget` env variables to point to the right +hosts, for example:: + + http_proxy=http://host:port + ftp_proxy=http://host:port + https_proxy=http://host:port + + + +Deploying monitors +================== + +To actually deploy ``ceph-mon`` to the hosts you chose, run:: + + ceph-deploy mon create HOST [HOST..] + +Without explicit hosts listed, hosts in ``mon_initial_members`` in the +config file are deployed. That is, the hosts you passed to +``ceph-deploy new`` are the default value here. + +Gather keys +=========== + +To gather authenticate keys (for administering the cluster and +bootstrapping new nodes) to the local directory, run:: + + ceph-deploy gatherkeys HOST [HOST...] + +where ``HOST`` is one of the monitor hosts. + +Once these keys are in the local directory, you can provision new OSDs etc. + + +Deploying OSDs +============== + +To prepare a node for running OSDs, run:: + + ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...] + +After that, the hosts will be running OSDs for the given data disks. +If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be +created and GPT labels will be used to mark and automatically activate +OSD volumes. If an existing partition is specified, the partition +table will not be modified. If you want to destroy the existing +partition table on DISK first, you can include the ``--zap-disk`` +option. + +If there is already a prepared disk or directory that is ready to become an +OSD, you can also do:: + + ceph-deploy osd activate HOST:DIR[:JOURNAL] [...] + +This is useful when you are managing the mounting of volumes yourself. + + +Admin hosts +=========== + +To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` +keyring so that it can administer the cluster, run:: + + ceph-deploy admin HOST [HOST ...] + +Forget keys +=========== + +The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in +the local directory. If you are worried about them being there for security +reasons, run:: + + ceph-deploy forgetkeys + +and they will be removed. If you need them again later to deploy additional +nodes, simply re-run:: + + ceph-deploy gatherkeys HOST [HOST...] + +and they will be retrieved from an existing monitor node. + +Multiple clusters +================= + +All of the above commands take a ``--cluster=NAME`` option, allowing +you to manage multiple clusters conveniently from one workstation. +For example:: + + ceph-deploy --cluster=us-west new + vi us-west.conf + ceph-deploy --cluster=us-west mon + +FAQ +=== + +Before anything +--------------- +Make sure you have the latest version of ``ceph-deploy``. It is actively +developed and releases are coming weekly (on average). The most recent versions +of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check +with your package manager and update if there is anything new. + +Why is feature X not implemented? +--------------------------------- +Usually, features are added when/if it is sensible for someone that wants to +get started with ceph and said feature would make sense in that context. If +you believe this is the case and you've read "`what this tool is not`_" and +still think feature ``X`` should exist in ceph-deploy, open a feature request +in the ceph tracker: http://tracker.ceph.com/projects/devops/issues + +A command gave me an error, what is going on? +--------------------------------------------- +Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host +that you have configured when creating the initial config. If a given command +is not working as expected try to run the command that failed in the remote +host and assert the behavior there. + +If the behavior in the remote host is the same, then it is probably not +something wrong with ``ceph-deploy`` per-se. Make sure you capture the output +of both the ``ceph-deploy`` output and the output of the command in the remote +host. + +Issues with monitors +-------------------- +If your monitors are not starting, make sure that the ``{hostname}`` you used +when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s`` +in the remote host. + +Newer versions of ``ceph-deploy`` should warn you if the results are different +but that might prevent the monitors from reaching quorum. diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..dbc0d19 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,3 @@ +pytest >=2.1.3 +tox >=1.2 +mock >=1.0b1 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ + diff --git a/scripts/build-debian.sh b/scripts/build-debian.sh new file mode 100755 index 0000000..ce205ca --- /dev/null +++ b/scripts/build-debian.sh @@ -0,0 +1,84 @@ +#! /bin/sh + +# Tag tree and update version number in change log and +# in setup.py before building. + +REPO=debian-repo +COMPONENT=main +KEYID=${KEYID:-03C3951A} # default is autobuild keyid +DEB_DIST="sid wheezy squeeze quantal precise oneiric natty raring" +DEB_BUILD=$(lsb_release -s -c) +RELEASE=0 + +if [ X"$1" = X"--release" ] ; then + echo "Release Build" + RELEASE=1 +fi + +if [ ! -d debian ] ; then + echo "Are we in the right directory" + exit 1 +fi + +if gpg --list-keys 2>/dev/null | grep -q ${KEYID} ; then + echo "Signing packages and repo with ${KEYID}" +else + echo "Package signing key (${KEYID}) not found" + echo "Have you set \$GNUPGHOME ? " + exit 3 +fi + +# Clean up any leftover builds +rm -f ../ceph-deploy*.dsc ../ceph-deploy*.changes ../ceph-deploy*.deb ../ceph-deploy.tgz +rm -rf ./debian-repo + +# Apply backport tag if release build +if [ $RELEASE -eq 1 ] ; then + DEB_VERSION=$(dpkg-parsechangelog | sed -rne 's,^Version: (.*),\1, p') + BP_VERSION=${DEB_VERSION}${BPTAG} + DEBEMAIL="gary.lowell@inktank.com" dch -D $DIST --force-distribution -b -v "$BP_VERSION" "$comment" + dpkg-source -b . +fi + +# Build Package +echo "Building for dist: $DEB_BUILD" +dpkg-buildpackage -k$KEYID +if [ $? -ne 0 ] ; then + echo "Build failed" + exit 2 +fi + +# Build Repo +PKG=../ceph-deploy*.changes +mkdir -p $REPO/conf +if [ -e $REPO/conf/distributions ] ; then + rm -f $REPO/conf/distributions +fi + +for DIST in $DEB_DIST ; do + cat <> $REPO/conf/distributions +Codename: $DIST +Suite: stable +Components: $COMPONENT +Architectures: amd64 armhf i386 source +Origin: Inktank +Description: Ceph distributed file system +DebIndices: Packages Release . .gz .bz2 +DscIndices: Sources Release .gz .bz2 +Contents: .gz .bz2 +SignWith: $KEYID + +EOF +done + +echo "Adding package to repo, dist: $DEB_BUILD ($PKG)" +reprepro --ask-passphrase -b $REPO -C $COMPONENT --ignore=undefinedtarget --ignore=wrongdistribution include $DEB_BUILD $PKG + +#for DIST in $DEB_DIST +#do +# [ "$DIST" = "$DEB_BUILD" ] && continue +# echo "Copying package to dist: $DIST" +# reprepro -b $REPO --ignore=undefinedtarget --ignore=wrongdistribution copy $DIST $DEB_BUILD ceph-deploy +#done + +echo "Done" diff --git a/scripts/build-rpm.sh b/scripts/build-rpm.sh new file mode 100755 index 0000000..9b330e4 --- /dev/null +++ b/scripts/build-rpm.sh @@ -0,0 +1,59 @@ +#! /bin/sh + +# Tag tree and update version number in change log and +# in setup.py before building. + +REPO=rpm-repo +KEYID=${KEYID:-03C3951A} # Default is autobuild-key +BUILDAREA=./rpmbuild +DIST=el6 +RPM_BUILD=$(lsb_release -s -c) + +if [ ! -e setup.py ] ; then + echo "Are we in the right directory" + exit 1 +fi + +if gpg --list-keys 2>/dev/null | grep -q ${KEYID} ; then + echo "Signing packages and repo with ${KEYID}" +else + echo "Package signing key (${KEYID}) not found" + echo "Have you set \$GNUPGHOME ? " + exit 3 +fi + +if ! CREATEREPO=`which createrepo` ; then + echo "Please install the createrepo package" + exit 4 +fi + +# Create Tarball +python setup.py sdist --formats=bztar + +# Build RPM +mkdir -p rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} +BUILDAREA=`readlink -fn ${BUILDAREA}` ### rpm wants absolute path +cp ceph-deploy.spec ${BUILDAREA}/SPECS +cp dist/*.tar.bz2 ${BUILDAREA}/SOURCES +echo "buildarea is: ${BUILDAREA}" +rpmbuild -ba --define "_topdir ${BUILDAREA}" --define "_unpackaged_files_terminate_build 0" ${BUILDAREA}/SPECS/ceph-deploy.spec + +# create repo +DEST=${REPO}/${DIST} +mkdir -p ${REPO}/${DIST} +cp -r ${BUILDAREA}/*RPMS ${DEST} + +# Sign all the RPMs for this release +rpm_list=`find ${REPO} -name "*.rpm" -print` +rpm --addsign --define "_gpg_name ${KEYID}" $rpm_list + +# Construct repodata +for dir in ${DEST}/SRPMS ${DEST}/RPMS/* +do + if [ -d $dir ] ; then + createrepo $dir + gpg --detach-sign --armor -u ${KEYID} $dir/repodata/repomd.xml + fi +done + +exit 0 diff --git a/scripts/ceph-deploy b/scripts/ceph-deploy new file mode 100755 index 0000000..cc8dd62 --- /dev/null +++ b/scripts/ceph-deploy @@ -0,0 +1,21 @@ +#!/usr/bin/env python +import os +import platform +import sys +""" +ceph-deploy - admin tool for ceph +""" + +if os.path.exists('/usr/share/pyshared/ceph_deploy'): + sys.path.insert(0,'/usr/share/pyshared/ceph_deploy') +elif os.path.exists('/usr/share/ceph-deploy'): + sys.path.insert(0,'/usr/share/ceph-deploy') +elif os.path.exists('/usr/share/pyshared/ceph-deploy'): + sys.path.insert(0,'/usr/share/pyshared/ceph-deploy') +elif os.path.exists('/usr/lib/python2.6/site-packages/ceph_deploy'): + sys.path.insert(0,'/usr/lib/python2.6/site-packages/ceph_deploy') + +from ceph_deploy.cli import main + +if __name__ == '__main__': + sys.exit(main()) diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..d9ec107 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[pytest] +norecursedirs = .* _* virtualenv diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..cbf2c16 --- /dev/null +++ b/setup.py @@ -0,0 +1,71 @@ +from setuptools import setup, find_packages +import os +import sys +import ceph_deploy +from vendor import vendorize + + +def read(fname): + path = os.path.join(os.path.dirname(__file__), fname) + f = open(path) + return f.read() + +install_requires = [] +pyversion = sys.version_info[:2] +if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1): + install_requires.append('argparse') + +# +# Add libraries that are not part of install_requires +# +vendorize([ + ('remoto', '0.0.11'), +]) + + +setup( + name='ceph-deploy', + version=ceph_deploy.__version__, + packages=find_packages(), + + author='Inktank', + author_email='ceph-devel@vger.kernel.org', + description='Deploy Ceph with minimal infrastructure', + long_description=read('README.rst'), + license='MIT', + keywords='ceph deploy', + url="https://github.com/ceph/ceph-deploy", + + install_requires=[ + 'setuptools', + ] + install_requires, + + tests_require=[ + 'pytest >=2.1.3', + 'mock >=1.0b1', + ], + + entry_points={ + + 'console_scripts': [ + 'ceph-deploy = ceph_deploy.cli:main', + ], + + 'ceph_deploy.cli': [ + 'new = ceph_deploy.new:make', + 'install = ceph_deploy.install:make', + 'uninstall = ceph_deploy.install:make_uninstall', + 'purge = ceph_deploy.install:make_purge', + 'purgedata = ceph_deploy.install:make_purge_data', + 'mon = ceph_deploy.mon:make', + 'gatherkeys = ceph_deploy.gatherkeys:make', + 'osd = ceph_deploy.osd:make', + 'disk = ceph_deploy.osd:make_disk', + 'mds = ceph_deploy.mds:make', + 'forgetkeys = ceph_deploy.forgetkeys:make', + 'config = ceph_deploy.config:make', + 'admin = ceph_deploy.admin:make', + ], + + }, + ) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..49028bc --- /dev/null +++ b/tox.ini @@ -0,0 +1,15 @@ +[tox] +envlist = py26, py27 + +[testenv] +deps= + pytest + mock +commands=py.test -v {posargs:ceph_deploy/tests} + +[testenv:docs] +basepython=python +changedir=docs/source +deps=sphinx +commands= + sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html diff --git a/vendor.py b/vendor.py new file mode 100644 index 0000000..7ce7f05 --- /dev/null +++ b/vendor.py @@ -0,0 +1,79 @@ +import subprocess +import os +from os import path +import traceback + + +error_msg = """ +This library depends on sources fetched when packaging that failed to be +retrieved. + +This means that it will *not* work as expected. Errors encountered: +""" + + +def run(cmd): + print '[vendoring] Running command: %s' % ' '.join(cmd) + try: + result = subprocess.Popen( + cmd, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE + ) + except Exception as error: + print_error([], traceback.format_exc(error).split('\n')) + raise SystemExit(1) + + if result.wait(): + print_error(result.stdout.readlines(), result.stderr.readlines()) + + +def print_error(stdout, stderr): + print '*'*80 + print error_msg + for line in stdout: + print line + for line in stderr: + print line + print '*'*80 + + +def vendor_library(name, version): + this_dir = path.dirname(path.abspath(__file__)) + vendor_dest = path.join(this_dir, 'ceph_deploy/lib/%s' % name) + vendor_src = path.join(this_dir, name) + vendor_module = path.join(vendor_src, name) + current_dir = os.getcwd() + + if path.exists(vendor_src): + run(['rm', '-rf', vendor_src]) + + if path.exists(vendor_dest): + module = __import__('ceph_deploy.lib.remoto', globals(), locals(), ['__version__']) + if module.__version__ != version: + run(['rm', '-rf', vendor_dest]) + + if not path.exists(vendor_dest): + run(['git', 'clone', 'git://ceph.com/%s' % name]) + os.chdir(vendor_src) + run(['git', 'checkout', version]) + run(['mv', vendor_module, vendor_dest]) + os.chdir(current_dir) + + +def vendorize(vendor_requirements): + """ + This is the main entry point for vendorizing requirements. It expects + a list of tuples that should contain the name of the library and the + version. + + For example, a library ``foo`` with version ``0.0.1`` would look like:: + + vendor_requirements = [ + ('foo', '0.0.1'), + ] + """ + + for library in vendor_requirements: + name, version = library + vendor_library(name, version)