From: Alfredo Deza Date: Thu, 25 Apr 2019 19:11:10 +0000 (-0400) Subject: tox: add both py35 and py36 to test whichever is available X-Git-Tag: v2.1.0~19^2~9 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=f9115fbb3cb879775c9211e5e1a90db6a0813f69;p=ceph-deploy.git tox: add both py35 and py36 to test whichever is available Signed-off-by: Alfredo Deza --- f9115fbb3cb879775c9211e5e1a90db6a0813f69 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..efd1a25 --- /dev/null +++ b/.gitignore @@ -0,0 +1,22 @@ +*~ +.#* +## the next line needs to start with a backslash to avoid looking like +## a comment +\#*# +.*.swp + +*.pyc +*.pyo +*.egg-info +/build +/dist +build + +/virtualenv +/.tox + +/ceph-deploy +/*.conf + +*/lib/vendor/remoto +remoto diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000..60cead5 --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,48 @@ +Contributing to ceph-deploy +=========================== +Before any contributions, a reference ticket *must* exist. The community issue +tracker is hosted at tracker.ceph.com + +To open a new issue, requests can go to: + +http://tracker.ceph.com/projects/ceph-deploy/issues/new + + +commits +------- +Once a ticket exists, commits should be prefaced by the ticket ID. This makes +it easier for maintainers to keep track of why a given line changed, mapping +directly to work done on a ticket. + +For tickets coming from tracker.ceph.com, we expect the following format:: + + [RM-0000] this is a commit message for tracker.ceph.com + +``RM`` stands for Redmine which is the software running tracker.ceph.com. +Similarly, if a ticket was created in bugzilla.redhat.com, we expect the +following format:: + + [BZ-0000] this is a commit message for bugzilla.redhat.com + + +To automate this process, you can create a branch with the tracker identifier +and id (replace "0000" with the ticket number):: + + git checkout -b RM-0000 + +And then use the follow prepare-commit-msg: +https://gist.github.com/alfredodeza/6d62d99a95c9a7975fbe + +Copy that file to ``$GITREPOSITORY/.git/hooks/prepare-commit-msg`` +and mark it executable. + +Your commit messages should then be automatically prefixed with the branch name +based off of the issue tracker. + +tests and documentation +----------------------- +Wherever it is feasible, tests must exist and documentation must be added or +improved depending on the change. + +The build process not only runs tests but ensures that docs can be built from +the proposed changes as well. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..26624cf --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012 Inktank Storage, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..370e3d9 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +include *.rst +include LICENSE +include scripts/ceph-deploy +include vendor.py +include tox.ini diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..85aec15 --- /dev/null +++ b/README.rst @@ -0,0 +1,373 @@ +======================================================== + ceph-deploy -- Deploy Ceph with minimal infrastructure +======================================================== + +``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to +the servers, ``sudo``, and some Python. It runs fully on your +workstation, requiring no servers, databases, or anything like that. + +If you set up and tear down Ceph clusters a lot, and want minimal +extra bureaucracy, this is for you. + +This ``README`` provides a brief overview of ceph-deploy, for thorough +documentation please go to http://ceph.com/ceph-deploy/docs + +.. _what this tool is not: + +What this tool is not +--------------------- +It is not a generic deployment system, it is only for Ceph, and is designed +for users who want to quickly get Ceph running with sensible initial settings +without the overhead of installing Chef, Puppet or Juju. + +It does not handle client configuration beyond pushing the Ceph config file +and users who want fine-control over security settings, partitions or directory +locations should use a tool such as Chef or Puppet. + + +Installation +============ +Depending on what type of usage you are going to have with ``ceph-deploy`` you +might want to look into the different ways to install it. For automation, you +might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would +probably install from the OS packages or from the Python Package Index. + +Python Package Index +-------------------- +If you are familiar with Python install tools (like ``pip`` and +``easy_install``) you can easily install ``ceph-deploy`` like:: + + pip install ceph-deploy + +or:: + + easy_install ceph-deploy + + +It should grab all the dependencies for you and install into the current user's +environment. + +We highly recommend using ``virtualenv`` and installing dependencies in +a contained way. + + +DEB +--- +All new releases of ``ceph-deploy`` are pushed to all ``ceph`` DEB release +repos. + +The DEB release repos are found at:: + + http://ceph.com/debian-{release} + http://ceph.com/debian-testing + +This means, for example, that installing ``ceph-deploy`` from +http://ceph.com/debian-giant will install the same version as from +http://ceph.com/debian-firefly or http://ceph.com/debian-testing. + +RPM +--- +All new releases of ``ceph-deploy`` are pushed to all ``ceph`` RPM release +repos. + +The RPM release repos are found at:: + + http://ceph.com/rpm-{release} + http://ceph.com/rpm-testing + +Make sure you add the proper one for your distribution (i.e. el7 vs rhel7). + +This means, for example, that installing ``ceph-deploy`` from +http://ceph.com/rpm-giant will install the same version as from +http://ceph.com/rpm-firefly or http://ceph.com/rpm-testing. + +bootstrapping +------------- +To get the source tree ready for use, run this once:: + + ./bootstrap + +You can symlink the ``ceph-deploy`` script in this somewhere +convenient (like ``~/bin``), or add the current directory to ``PATH``, +or just always type the full path to ``ceph-deploy``. + + +SSH and Remote Connections +========================== +``ceph-deploy`` will attempt to connect via SSH to hosts when the hostnames do +not match the current host's hostname. For example, if you are connecting to +host ``node1`` it will attempt an SSH connection as long as the current host's +hostname is *not* ``node1``. + +ceph-deploy at a minimum requires that the machine from which the script is +being run can ssh as root without password into each Ceph node. + +To enable this generate a new ssh keypair for the root user with no passphrase +and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:: + + /root/.ssh/authorized_keys + +and ensure that the following lines are in the sshd config:: + + PermitRootLogin without-password + PubkeyAuthentication yes + +The machine running ceph-deploy does not need to have the Ceph packages +installed unless it needs to admin the cluster directly using the ``ceph`` +command line tool. + + +usernames +--------- +When not specified the connection will be done with the same username as the +one executing ``ceph-deploy``. This is useful if the same username is shared in +all the nodes but can be cumbersome if that is not the case. + +A way to avoid this is to define the correct usernames to connect with in the +SSH config, but you can also use the ``--username`` flag as well:: + + ceph-deploy --username ceph install node1 + +``ceph-deploy`` then in turn would use ``ceph@node1`` to connect to that host. + +This would be the same expectation for any action that warrants a connection to +a remote host. + + +Managing an existing cluster +============================ + +You can use ceph-deploy to provision nodes for an existing cluster. +To grab a copy of the cluster configuration file (normally +``ceph.conf``):: + + ceph-deploy config pull HOST + +You will usually also want to gather the encryption keys used for that +cluster:: + + ceph-deploy gatherkeys MONHOST + +At this point you can skip the steps below that create a new cluster +(you already have one) and optionally skip installation and/or monitor +creation, depending on what you are trying to accomplish. + + +Creating a new cluster +====================== + +Creating a new configuration +---------------------------- + +To create a new configuration file and secret key, decide what hosts +will run ``ceph-mon``, and run:: + + ceph-deploy new MON [MON..] + +listing the hostnames of the monitors. Each ``MON`` can be + + * a simple hostname. It must be DNS resolvable without the fully + qualified domain name. + * a fully qualified domain name. The hostname is assumed to be the + leading component up to the first ``.``. + * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified + domain name or IP address. For example, ``foo``, + ``foo.example.com``, ``foo:something.example.com``, and + ``foo:1.2.3.4`` are all valid. Note, however, that the hostname + should match that configured on the host ``foo``. + +The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your +current directory. + + +Edit initial cluster configuration +---------------------------------- + +You want to review the generated ``ceph.conf`` file and make sure that +the ``mon_host`` setting contains the IP addresses you would like the +monitors to bind to. These are the IPs that clients will initially +contact to authenticate to the cluster, and they need to be reachable +both by external client-facing hosts and internal cluster daemons. + +Installing packages +=================== + +To install the Ceph software on the servers, run:: + + ceph-deploy install HOST [HOST..] + +This installs the current default *stable* release. You can choose a +different release track with command line options, for example to use +a release candidate:: + + ceph-deploy install --testing HOST + +Or to test a development branch:: + + ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..] + + +Proxy or Firewall Installs +-------------------------- +If attempting to install behind a firewall or through a proxy you can +use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes +to the distro's repository in order to install the packages and it will go +straight to package installation. + +That will allow an environment without internet access to point to *its own +repositories*. This means that those repositories will need to be properly +setup (and mirrored with all the necessary dependencies) before attempting an +install. + +Another alternative is to set the ``wget`` env variables to point to the right +hosts, for example, put following lines into ``/root/.wgetrc`` on each node +(since ceph-deploy runs wget as root):: + + http_proxy=http://host:port + ftp_proxy=http://host:port + https_proxy=http://host:port + + + +Deploying monitors +================== + +To actually deploy ``ceph-mon`` to the hosts you chose, run:: + + ceph-deploy mon create HOST [HOST..] + +Without explicit hosts listed, hosts in ``mon_initial_members`` in the +config file are deployed. That is, the hosts you passed to +``ceph-deploy new`` are the default value here. + +Gather keys +=========== + +To gather authenticate keys (for administering the cluster and +bootstrapping new nodes) to the local directory, run:: + + ceph-deploy gatherkeys HOST [HOST...] + +where ``HOST`` is one of the monitor hosts. + +Once these keys are in the local directory, you can provision new OSDs etc. + + +Deploying OSDs +============== + +To prepare a node for running OSDs, run:: + + ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...] + +After that, the hosts will be running OSDs for the given data disks. +If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be +created and GPT labels will be used to mark and automatically activate +OSD volumes. If an existing partition is specified, the partition +table will not be modified. If you want to destroy the existing +partition table on DISK first, you can include the ``--zap-disk`` +option. + +If there is already a prepared disk or directory that is ready to become an +OSD, you can also do:: + + ceph-deploy osd activate HOST:DIR[:JOURNAL] [...] + +This is useful when you are managing the mounting of volumes yourself. + + +Admin hosts +=========== + +To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` +keyring so that it can administer the cluster, run:: + + ceph-deploy admin HOST [HOST ...] + +Forget keys +=========== + +The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in +the local directory. If you are worried about them being there for security +reasons, run:: + + ceph-deploy forgetkeys + +and they will be removed. If you need them again later to deploy additional +nodes, simply re-run:: + + ceph-deploy gatherkeys HOST [HOST...] + +and they will be retrieved from an existing monitor node. + +Multiple clusters +================= + +All of the above commands take a ``--cluster=NAME`` option, allowing +you to manage multiple clusters conveniently from one workstation. +For example:: + + ceph-deploy --cluster=us-west new + vi us-west.conf + ceph-deploy --cluster=us-west mon + +FAQ +=== + +Before anything +--------------- +Make sure you have the latest version of ``ceph-deploy``. It is actively +developed and releases are coming weekly (on average). The most recent versions +of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check +with your package manager and update if there is anything new. + +Why is feature X not implemented? +--------------------------------- +Usually, features are added when/if it is sensible for someone that wants to +get started with ceph and said feature would make sense in that context. If +you believe this is the case and you've read "`what this tool is not`_" and +still think feature ``X`` should exist in ceph-deploy, open a feature request +in the ceph tracker: http://tracker.ceph.com/projects/ceph-deploy/issues + +A command gave me an error, what is going on? +--------------------------------------------- +Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host +that you have configured when creating the initial config. If a given command +is not working as expected try to run the command that failed in the remote +host and assert the behavior there. + +If the behavior in the remote host is the same, then it is probably not +something wrong with ``ceph-deploy`` per-se. Make sure you capture the output +of both the ``ceph-deploy`` output and the output of the command in the remote +host. + +Issues with monitors +-------------------- +If your monitors are not starting, make sure that the ``{hostname}`` you used +when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s`` +in the remote host. + +Newer versions of ``ceph-deploy`` should warn you if the results are different +but that might prevent the monitors from reaching quorum. + +Developing ceph-deploy +====================== +Now that you have cracked your teeth on Ceph, you might find that you want to +contribute to ceph-deploy. + +Resources +--------- +Bug tracking: http://tracker.ceph.com/projects/ceph-deploy/issues + +Mailing list and IRC info is the same as ceph http://ceph.com/resources/mailing-list-irc/ + +Submitting Patches +------------------ +Please add test cases to cover any code you add. You can test your changes +by running ``tox`` (You will also need ``mock`` and ``pytest`` ) from inside +the git clone + +When creating a commit message please use ``git commit -s`` or otherwise add +``Signed-off-by: Your Name `` to your commit message. + +Patches can then be submitted by a pull request on GitHub. diff --git a/bootstrap b/bootstrap new file mode 100755 index 0000000..2580fbc --- /dev/null +++ b/bootstrap @@ -0,0 +1,103 @@ +#!/bin/sh +set -e + +# Use `./bootstrap 3` for Python 3 +python_executable="python$1" + +if ! [ -d virtualenv ]; then + if command -v lsb_release >/dev/null 2>&1; then + if [ "$1" = "2" ]; then + python_package="python" + else + python_package="python$1" + fi + + case "$(lsb_release --id --short)" in + Ubuntu|Debian) + for package in "$python_package" python-virtualenv; do + if [ "$(dpkg --status -- "$package" 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then + # add a space after old values + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages, please install them:" 1>&2 + echo " sudo apt-get install $missing" + exit 1 + fi + ;; + + Arch) + for package in "$python_package" python-virtualenv; do + if ! pacman -Qs -- "$package" >/dev/null 2>&1; then + # add a space after old values + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages, please install them:" 1>&2 + echo " pacman -Sy $missing" + exit 1 + fi + ;; + esac + + case "$(lsb_release --id --short | awk '{print $1}')" in + openSUSE|SUSE) + for package in "$python_package" python-virtualenv; do + if [ "$(rpm -qa "$package" 2>/dev/null)" == "" ]; then + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages, please install them:" 1>&2 + echo " sudo zypper install $missing" + exit 1 + fi + ;; + esac + + fi + + if [ -f /etc/redhat-release ]; then + case "$(cat /etc/redhat-release | awk '{print $1}')" in + CentOS) + for package in python-virtualenv; do + if [ "$(rpm -qa "$package" 2>/dev/null)" == "" ]; then + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required packages, please install them:" 1>&2 + echo " sudo yum install $missing" + exit 1 + fi + + if [ "${1:-2}" -ge 3 ]; then + if ! command -v "$python_executable" >/dev/null 2>&1; then + echo "$0: missing Python ($python_executable), please install it" + exit 1 + fi + + # Make a temporary virtualenv to get a fresh version of virtualenv + # and use it to make a Python 3 virtualenv, + # because CentOS 7 has buggy old virtualenv (v1.10.1) + # https://github.com/pypa/virtualenv/issues/463 + + virtualenv virtualenv_tmp + virtualenv_tmp/bin/pip install --upgrade setuptools + virtualenv_tmp/bin/pip install --upgrade virtualenv + virtualenv_tmp/bin/virtualenv -p "$python_executable" virtualenv + rm -rf virtualenv_tmp + else + virtualenv virtualenv + fi + ;; + esac + fi +fi + +test -d virtualenv || virtualenv -p "$python_executable" virtualenv +./virtualenv/bin/pip install --upgrade setuptools +./virtualenv/bin/python setup.py develop +test -e ceph-deploy || ln -s virtualenv/bin/ceph-deploy . diff --git a/ceph-deploy.spec b/ceph-deploy.spec new file mode 100644 index 0000000..8218266 --- /dev/null +++ b/ceph-deploy.spec @@ -0,0 +1,71 @@ +# +# spec file for package ceph-deploy +# + +%if ! (0%{?fedora} > 12 || 0%{?rhel} > 5) +%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} +%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} +%endif + +################################################################################# +# common +################################################################################# +Name: ceph-deploy +Version: 2.0.1 +Release: 0 +Summary: Admin and deploy tool for Ceph +License: MIT +Group: System/Filesystems +URL: http://ceph.com/ +Source0: %{name}-%{version}.tar.bz2 +BuildRoot: %{_tmppath}/%{name}-%{version}-build +BuildRequires: python-devel +BuildRequires: python-setuptools +BuildRequires: python-virtualenv +BuildRequires: python-mock +BuildRequires: python-tox +%if 0%{?suse_version} +BuildRequires: python-pytest +%else +BuildRequires: pytest +%endif +BuildRequires: git +Requires: python-argparse +Requires: python-remoto +%if 0%{?suse_version} && 0%{?suse_version} <= 1110 +%{!?python_sitelib: %global python_sitelib %(python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} +%else +BuildArch: noarch +%endif + +################################################################################# +# specific +################################################################################# +%if 0%{defined suse_version} +%py_requires +%endif + +%description +An easy to use admin tool for deploy ceph storage clusters. + +%prep +#%%setup -q -n %%{name} +%setup -q + +%build +#python setup.py build + +%install +python setup.py install --prefix=%{_prefix} --root=%{buildroot} +install -m 0755 -D scripts/ceph-deploy $RPM_BUILD_ROOT/usr/bin + +%clean +[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT" + +%files +%defattr(-,root,root) +%doc LICENSE README.rst +%{_bindir}/ceph-deploy +%{python_sitelib}/* + +%changelog diff --git a/ceph_deploy/__init__.py b/ceph_deploy/__init__.py new file mode 100644 index 0000000..5f4a28a --- /dev/null +++ b/ceph_deploy/__init__.py @@ -0,0 +1,3 @@ + +__version__ = '2.0.2' + diff --git a/ceph_deploy/admin.py b/ceph_deploy/admin.py new file mode 100644 index 0000000..7212a1b --- /dev/null +++ b/ceph_deploy/admin.py @@ -0,0 +1,61 @@ +import logging +from ceph_deploy import exc +from ceph_deploy import conf +from ceph_deploy.cliutil import priority +from ceph_deploy import hosts + +LOG = logging.getLogger(__name__) + + +def admin(args): + conf_data = conf.ceph.load_raw(args) + + try: + with open('%s.client.admin.keyring' % args.cluster, 'rb') as f: + keyring = f.read() + except: + raise RuntimeError('%s.client.admin.keyring not found' % + args.cluster) + + errors = 0 + for hostname in args.client: + LOG.debug('Pushing admin keys and conf to %s', hostname) + try: + distro = hosts.get(hostname, username=args.username) + + distro.conn.remote_module.write_conf( + args.cluster, + conf_data, + args.overwrite_conf, + ) + + distro.conn.remote_module.write_file( + '/etc/ceph/%s.client.admin.keyring' % args.cluster, + keyring, + 0o600, + ) + + distro.conn.exit() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to configure %d admin hosts' % errors) + + +@priority(70) +def make(parser): + """ + Push configuration and client.admin key to a remote host. + """ + parser.add_argument( + 'client', + metavar='HOST', + nargs='+', + help='host to configure for Ceph administration', + ) + parser.set_defaults( + func=admin, + ) diff --git a/ceph_deploy/cli.py b/ceph_deploy/cli.py new file mode 100644 index 0000000..ccd6df8 --- /dev/null +++ b/ceph_deploy/cli.py @@ -0,0 +1,184 @@ +import pkg_resources +import argparse +import logging +import textwrap +import os +import sys + +import ceph_deploy +from ceph_deploy import exc +from ceph_deploy.util import log +from ceph_deploy.util.decorators import catches + +LOG = logging.getLogger(__name__) + + +__header__ = textwrap.dedent(""" + -^- + / \\ + |O o| ceph-deploy v%s + ).-.( + '/|||\` + | '|` | + '|` + +Full documentation can be found at: http://ceph.com/ceph-deploy/docs +""" % ceph_deploy.__version__) + + +def log_flags(args, logger=None): + logger = logger or LOG + logger.info('ceph-deploy options:') + + for k, v in args.__dict__.items(): + if k.startswith('_'): + continue + logger.info(' %-30s: %s' % (k, v)) + + +def get_parser(): + epilog_text = "See 'ceph-deploy --help' for help on a specific command" + parser = argparse.ArgumentParser( + prog='ceph-deploy', + formatter_class=argparse.RawDescriptionHelpFormatter, + description='Easy Ceph deployment\n\n%s' % __header__, + epilog=epilog_text + ) + verbosity = parser.add_mutually_exclusive_group(required=False) + verbosity.add_argument( + '-v', '--verbose', + action='store_true', dest='verbose', default=False, + help='be more verbose', + ) + verbosity.add_argument( + '-q', '--quiet', + action='store_true', dest='quiet', + help='be less verbose', + ) + parser.add_argument( + '--version', + action='version', + version='%s' % ceph_deploy.__version__, + help='the current installed version of ceph-deploy', + ) + parser.add_argument( + '--username', + help='the username to connect to the remote host', + ) + parser.add_argument( + '--overwrite-conf', + action='store_true', + help='overwrite an existing conf file on remote host (if present)', + ) + parser.add_argument( + '--ceph-conf', + dest='ceph_conf', + help='use (or reuse) a given ceph.conf file', + ) + sub = parser.add_subparsers( + title='commands', + metavar='COMMAND', + help='description', + ) + sub.required = True + entry_points = [ + (ep.name, ep.load()) + for ep in pkg_resources.iter_entry_points('ceph_deploy.cli') + ] + entry_points.sort( + key=lambda name_fn: getattr(name_fn[1], 'priority', 100), + ) + for (name, fn) in entry_points: + p = sub.add_parser( + name, + description=fn.__doc__, + help=fn.__doc__, + ) + if not os.environ.get('CEPH_DEPLOY_TEST'): + p.set_defaults(cd_conf=ceph_deploy.conf.cephdeploy.load()) + + # flag if the default release is being used + p.set_defaults(default_release=False) + fn(p) + p.required = True + parser.set_defaults( + cluster='ceph', + ) + + return parser + + +@catches((KeyboardInterrupt, RuntimeError, exc.DeployError,), handle_all=True) +def _main(args=None, namespace=None): + # Set console logging first with some defaults, to prevent having exceptions + # before hitting logging configuration. The defaults can/will get overridden + # later. + + # Console Logger + sh = logging.StreamHandler() + sh.setFormatter(log.color_format()) + sh.setLevel(logging.WARNING) + + # because we're in a module already, __name__ is not the ancestor of + # the rest of the package; use the root as the logger for everyone + root_logger = logging.getLogger() + + # allow all levels at root_logger, handlers control individual levels + root_logger.setLevel(logging.DEBUG) + root_logger.addHandler(sh) + + parser = get_parser() + if len(sys.argv) < 2: + parser.print_help() + sys.exit() + else: + args = parser.parse_args(args=args, namespace=namespace) + + console_loglevel = logging.DEBUG # start at DEBUG for now + if args.quiet: + console_loglevel = logging.WARNING + if args.verbose: + console_loglevel = logging.DEBUG + + # Console Logger + sh.setLevel(console_loglevel) + + # File Logger + fh = logging.FileHandler('ceph-deploy-{cluster}.log'.format(cluster=args.cluster)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(logging.Formatter(log.FILE_FORMAT)) + + root_logger.addHandler(fh) + + # Reads from the config file and sets values for the global + # flags and the given sub-command + # the one flag that will never work regardless of the config settings is + # logging because we cannot set it before hand since the logging config is + # not ready yet. This is the earliest we can do. + args = ceph_deploy.conf.cephdeploy.set_overrides(args) + + LOG.info("Invoked (%s): %s" % ( + ceph_deploy.__version__, + ' '.join(sys.argv)) + ) + log_flags(args) + + return args.func(args) + + +def main(args=None, namespace=None): + try: + _main(args=args, namespace=namespace) + finally: + # This block is crucial to avoid having issues with + # Python spitting non-sense thread exceptions. We have already + # handled what we could, so close stderr and stdout. + if not os.environ.get('CEPH_DEPLOY_TEST'): + try: + sys.stdout.close() + except: + pass + try: + sys.stderr.close() + except: + pass diff --git a/ceph_deploy/cliutil.py b/ceph_deploy/cliutil.py new file mode 100644 index 0000000..d273f31 --- /dev/null +++ b/ceph_deploy/cliutil.py @@ -0,0 +1,8 @@ +def priority(num): + """ + Decorator to add a `priority` attribute to the function. + """ + def add_priority(fn): + fn.priority = num + return fn + return add_priority diff --git a/ceph_deploy/conf/__init__.py b/ceph_deploy/conf/__init__.py new file mode 100644 index 0000000..8599a6a --- /dev/null +++ b/ceph_deploy/conf/__init__.py @@ -0,0 +1,2 @@ +from . import ceph # noqa +from . import cephdeploy # noqa diff --git a/ceph_deploy/conf/ceph.py b/ceph_deploy/conf/ceph.py new file mode 100644 index 0000000..e14ad6d --- /dev/null +++ b/ceph_deploy/conf/ceph.py @@ -0,0 +1,108 @@ +try: + import configparser +except ImportError: + import ConfigParser as configparser +import contextlib +import sys + +from ceph_deploy import exc + + +class _TrimIndentFile(object): + def __init__(self, fp): + self.fp = fp + + def readline(self): + line = self.fp.readline() + return line.lstrip(' \t') + + def __iter__(self): + return iter(self.readline, '') + +class CephConf(configparser.RawConfigParser): + def __init__(self, *args, **kwargs): + if sys.version_info >= (3, 2): + kwargs.setdefault('strict', False) + # super() cannot be used with an old-style class + configparser.RawConfigParser.__init__(self, *args, **kwargs) + + def optionxform(self, s): + s = s.replace('_', ' ') + s = '_'.join(s.split()) + return s + + def safe_get(self, section, key): + """ + Attempt to get a configuration value from a certain section + in a ``cfg`` object but returning None if not found. Avoids the need + to be doing try/except {ConfigParser Exceptions} every time. + """ + try: + #Use full parent function so we can replace it in the class + # if desired + return configparser.RawConfigParser.get(self, section, key) + except (configparser.NoSectionError, + configparser.NoOptionError): + return None + + +def parse(fp): + cfg = CephConf() + ifp = _TrimIndentFile(fp) + cfg.readfp(ifp) + return cfg + + +def load(args): + """ + :param args: Will be used to infer the proper configuration name, or + if args.ceph_conf is passed in, that will take precedence + """ + path = args.ceph_conf or '{cluster}.conf'.format(cluster=args.cluster) + + try: + f = open(path) + except IOError as e: + raise exc.ConfigError( + "%s; has `ceph-deploy new` been run in this directory?" % e + ) + else: + with contextlib.closing(f): + return parse(f) + + +def load_raw(args): + """ + Read the actual file *as is* without parsing/modifiying it + so that it can be written maintaining its same properties. + + :param args: Will be used to infer the proper configuration name + :paran path: alternatively, use a path for any configuration file loading + """ + path = args.ceph_conf or '{cluster}.conf'.format(cluster=args.cluster) + try: + with open(path) as ceph_conf: + return ceph_conf.read() + except (IOError, OSError) as e: + raise exc.ConfigError( + "%s; has `ceph-deploy new` been run in this directory?" % e + ) + + +def write_conf(cluster, conf, overwrite): + """ write cluster configuration to /etc/ceph/{cluster}.conf """ + import os + + path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster) + tmp = '{path}.{pid}.tmp'.format(path=path, pid=os.getpid()) + + if os.path.exists(path): + with open(path) as f: + old = f.read() + if old != conf and not overwrite: + raise RuntimeError('config file %s exists with different content; use --overwrite-conf to overwrite' % path) + with open(tmp, 'w') as f: + f.write(conf) + f.flush() + os.fsync(f) + os.rename(tmp, path) diff --git a/ceph_deploy/conf/cephdeploy.py b/ceph_deploy/conf/cephdeploy.py new file mode 100644 index 0000000..15854fd --- /dev/null +++ b/ceph_deploy/conf/cephdeploy.py @@ -0,0 +1,218 @@ +try: + import configparser +except ImportError: + import ConfigParser as configparser +import logging +import os +from os import path +import re + +from ceph_deploy.util.paths import gpg + +logger = logging.getLogger('ceph_deploy.conf') + +cd_conf_template = """ +# +# ceph-deploy configuration file +# + +[ceph-deploy-global] +# Overrides for some of ceph-deploy's global flags, like verbosity or cluster +# name + +[ceph-deploy-install] +# Overrides for some of ceph-deploy's install flags, like version of ceph to +# install + + +# +# Repositories section +# + +# yum repos: +# [myrepo] +# baseurl = http://gitbuilder.ceph.com/ceph-rpm-centos7-x86_64-basic/ref/hammer +# gpgurl = https://download.ceph.com/keys/autobuild.asc +# default = True +# extra-repos = cephrepo # will install the cephrepo file too +# +# [cephrepo] +# name=ceph repo noarch packages +# baseurl=http://download.ceph.com/rpm-hammer/el6/noarch +# enabled=1 +# gpgcheck=1 +# type=rpm-md +# gpgkey=https://download.ceph.com/keys/release.asc + +# apt repos: +# [myrepo] +# baseurl = http://gitbuilder.ceph.com/ceph-deb-trusty-x86_64-basic/ref/hammer +# gpgurl = https://download.ceph.com/keys/autobuild.asc +# default = True +# extra-repos = cephrepo # will install the cephrepo file too +# +# [cephrepo] +# baseurl=http://download.ceph.com/debian-hammer +# gpgkey=https://download.ceph.com/keys/release.asc +""".format(gpgurl=gpg.url('release')) + + +def location(): + """ + Find and return the location of the ceph-deploy configuration file. If this + file does not exist, create one in a default location. + """ + return _locate_or_create() + + +def load(): + parser = Conf() + parser.read(location()) + return parser + + +def _locate_or_create(): + home_config = path.expanduser('~/.cephdeploy.conf') + # With order of importance + locations = [ + path.join(os.getcwd(), 'cephdeploy.conf'), + home_config, + ] + + for location in locations: + if path.exists(location): + logger.debug('found configuration file at: %s' % location) + return location + logger.info('could not find configuration file, will create one in $HOME') + create_stub(home_config) + return home_config + + +def create_stub(_path=None): + _path = _path or path.expanduser('~/.cephdeploy.conf') + logger.debug('creating new configuration file: %s' % _path) + with open(_path, 'w') as cd_conf: + cd_conf.write(cd_conf_template) + + +def set_overrides(args, _conf=None): + """ + Read the configuration file and look for ceph-deploy sections + to set flags/defaults from the values found. This will alter the + ``args`` object that is created by argparse. + """ + # Get the subcommand name to avoid overwritting values from other + # subcommands that are not going to be used + subcommand = args.func.__name__ + command_section = 'ceph-deploy-%s' % subcommand + conf = _conf or load() + + for section_name in conf.sections(): + if section_name in ['ceph-deploy-global', command_section]: + override_subcommand( + section_name, + conf.items(section_name), + args + ) + return args + + +def override_subcommand(section_name, section_items, args): + """ + Given a specific section in the configuration file that maps to + a subcommand (except for the global section) read all the keys that are + actual argument flags and slap the values for that one subcommand. + + Return the altered ``args`` object at the end. + """ + # XXX We are not coercing here any int-like values, so if ArgParse + # does that in the CLI we are totally non-compliant with that expectation + # but we will try and infer a few boolean values + + # acceptable boolean states for flags + _boolean_states = {'yes': True, 'true': True, 'on': True, + 'no': False, 'false': False, 'off': False} + + for k, v, in section_items: + # get the lower case value of `v`, fallback to the booleanized + # (original) value of `v` + try: + normalized_value = v.lower() + except AttributeError: + # probably not a string object that has .lower + normalized_value = v + value = _boolean_states.get(normalized_value, v) + setattr(args, k, value) + return args + + +class Conf(configparser.SafeConfigParser): + """ + Subclasses from SafeConfigParser to give a few helpers for the ceph-deploy + configuration. Specifically, it addresses the need to work with custom + sections that signal the usage of custom repositories. + """ + + reserved_sections = ['ceph-deploy-global', 'ceph-deploy-install'] + + def get_safe(self, section, key, default=None): + """ + Attempt to get a configuration value from a certain section + in a ``cfg`` object but returning None if not found. Avoids the need + to be doing try/except {ConfigParser Exceptions} every time. + """ + try: + return self.get(section, key) + except (configparser.NoSectionError, configparser.NoOptionError): + return default + + def get_repos(self): + """ + Return all the repo sections from the config, excluding the ceph-deploy + reserved sections. + """ + return [ + section for section in self.sections() + if section not in self.reserved_sections + ] + + @property + def has_repos(self): + """ + boolean to reflect having (or not) any repository sections + """ + for section in self.sections(): + if section not in self.reserved_sections: + return True + return False + + def get_list(self, section, key): + """ + Assumes that the value for a given key is going to be a list + separated by commas. It gets rid of trailing comments. + If just one item is present it returns a list with a single item, if no + key is found an empty list is returned. + """ + value = self.get_safe(section, key, []) + if value == []: + return value + + # strip comments + value = re.split(r'\s+#', value)[0] + + # split on commas + value = value.split(',') + + # strip spaces + return [x.strip() for x in value] + + def get_default_repo(self): + """ + Go through all the repositories defined in the config file and search + for a truthy value for the ``default`` key. If there isn't any return + None. + """ + for repo in self.get_repos(): + if self.get_safe(repo, 'default') and self.getboolean(repo, 'default'): + return repo + return False diff --git a/ceph_deploy/config.py b/ceph_deploy/config.py new file mode 100644 index 0000000..1f85ed7 --- /dev/null +++ b/ceph_deploy/config.py @@ -0,0 +1,111 @@ +import logging +import os.path + +from ceph_deploy import exc +from ceph_deploy import conf +from ceph_deploy.cliutil import priority +from ceph_deploy import hosts + +LOG = logging.getLogger(__name__) + + +def config_push(args): + conf_data = conf.ceph.load_raw(args) + + errors = 0 + for hostname in args.client: + LOG.debug('Pushing config to %s', hostname) + try: + distro = hosts.get(hostname, username=args.username) + + distro.conn.remote_module.write_conf( + args.cluster, + conf_data, + args.overwrite_conf, + ) + + distro.conn.exit() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to config %d hosts' % errors) + + +def config_pull(args): + + topath = '{cluster}.conf'.format(cluster=args.cluster) + frompath = '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster) + + errors = 0 + for hostname in args.client: + try: + LOG.debug('Checking %s for %s', hostname, frompath) + distro = hosts.get(hostname, username=args.username) + conf_file_contents = distro.conn.remote_module.get_file(frompath) + + if conf_file_contents is not None: + LOG.debug('Got %s from %s', frompath, hostname) + if os.path.exists(topath): + with open(topath, 'rb') as f: + existing = f.read() + if existing != conf_file_contents and not args.overwrite_conf: + LOG.error('local config file %s exists with different content; use --overwrite-conf to overwrite' % topath) + raise + + with open(topath, 'wb') as f: + f.write(conf_file_contents) + return + distro.conn.exit() + LOG.debug('Empty or missing %s on %s', frompath, hostname) + except: + LOG.error('Unable to pull %s from %s', frompath, hostname) + finally: + errors += 1 + + raise exc.GenericError('Failed to fetch config from %d hosts' % errors) + + +def config(args): + if args.subcommand == 'push': + config_push(args) + elif args.subcommand == 'pull': + config_pull(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + + +@priority(70) +def make(parser): + """ + Copy ceph.conf to/from remote host(s) + """ + config_parser = parser.add_subparsers(dest='subcommand') + config_parser.required = True + + config_push = config_parser.add_parser( + 'push', + help='push Ceph config file to one or more remote hosts' + ) + config_push.add_argument( + 'client', + metavar='HOST', + nargs='+', + help='host(s) to push the config file to', + ) + + config_pull = config_parser.add_parser( + 'pull', + help='pull Ceph config file from one or more remote hosts' + ) + config_pull.add_argument( + 'client', + metavar='HOST', + nargs='+', + help='host(s) to pull the config file from', + ) + parser.set_defaults( + func=config, + ) diff --git a/ceph_deploy/connection.py b/ceph_deploy/connection.py new file mode 100644 index 0000000..fd71983 --- /dev/null +++ b/ceph_deploy/connection.py @@ -0,0 +1,44 @@ +import socket +from ceph_deploy.lib import remoto + + +def get_connection(hostname, username, logger, threads=5, use_sudo=None, detect_sudo=True): + """ + A very simple helper, meant to return a connection + that will know about the need to use sudo. + """ + if username: + hostname = "%s@%s" % (username, hostname) + try: + conn = remoto.Connection( + hostname, + logger=logger, + threads=threads, + detect_sudo=detect_sudo, + ) + + # Set a timeout value in seconds to disconnect and move on + # if no data is sent back. + conn.global_timeout = 300 + logger.debug("connected to host: %s " % hostname) + return conn + + except Exception as error: + msg = "connecting to host: %s " % hostname + errors = "resulted in errors: %s %s" % (error.__class__.__name__, error) + raise RuntimeError(msg + errors) + + +def get_local_connection(logger, use_sudo=False): + """ + Helper for local connections that are sometimes needed to operate + on local hosts + """ + return get_connection( + socket.gethostname(), # cannot rely on 'localhost' here + None, + logger=logger, + threads=1, + use_sudo=use_sudo, + detect_sudo=False + ) diff --git a/ceph_deploy/exc.py b/ceph_deploy/exc.py new file mode 100644 index 0000000..064fb9b --- /dev/null +++ b/ceph_deploy/exc.py @@ -0,0 +1,127 @@ +class DeployError(Exception): + """ + Unknown deploy error + """ + + def __str__(self): + doc = self.__doc__.strip() + return ': '.join([doc] + [str(a) for a in self.args]) + + +class UnableToResolveError(DeployError): + """ + Unable to resolve host + """ + + +class ClusterExistsError(DeployError): + """ + Cluster config exists already + """ + + +class ConfigError(DeployError): + """ + Cannot load config + """ + + +class NeedHostError(DeployError): + """ + No hosts specified to deploy to. + """ + + +class NeedMonError(DeployError): + """ + Cannot find nodes with ceph-mon. + """ + + +class NeedDiskError(DeployError): + """ + Must supply disk/path argument + """ + + +class UnsupportedPlatform(DeployError): + """ + Platform is not supported + """ + def __init__(self, distro, codename, release): + self.distro = distro + self.codename = codename + self.release = release + + def __str__(self): + return '{doc}: {distro} {codename} {release}'.format( + doc=self.__doc__.strip(), + distro=self.distro, + codename=self.codename, + release=self.release, + ) + + +class ExecutableNotFound(DeployError): + """ + Could not locate executable + """ + def __init__(self, executable, host): + self.executable = executable + self.host = host + + def __str__(self): + return "{doc} '{executable}' make sure it is installed and available on {host}".format( + doc=self.__doc__.strip(), + executable=self.executable, + host=self.host, + ) + + +class MissingPackageError(DeployError): + """ + A required package or command is missing + """ + def __init__(self, message): + self.message = message + + def __str__(self): + return self.message + + +class GenericError(DeployError): + def __init__(self, message): + self.message = message + + def __str__(self): + return self.message + + +class ClusterNameError(DeployError): + """ + Problem encountered with custom cluster name + """ + def __init__(self, message): + self.message = message + + def __str__(self): + return self.message + + +class KeyNotFoundError(DeployError): + """ + Could not find keyring file + """ + def __init__(self, keyring, hosts): + self.keyring = keyring + self.hosts = hosts + + def __str__(self): + return '{doc}: {keys}'.format( + doc=self.__doc__.strip(), + keys=', '.join( + [self.keyring.format(hostname=host) + + " on host {hostname}".format(hostname=host) + for host in self.hosts] + ) + ) diff --git a/ceph_deploy/forgetkeys.py b/ceph_deploy/forgetkeys.py new file mode 100644 index 0000000..faa376e --- /dev/null +++ b/ceph_deploy/forgetkeys.py @@ -0,0 +1,37 @@ +import logging +import errno + +from .cliutil import priority + + +LOG = logging.getLogger(__name__) + + +def forgetkeys(args): + import os + for f in [ + 'mon', + 'client.admin', + 'bootstrap-osd', + 'bootstrap-mds', + 'bootstrap-rgw', + ]: + try: + os.unlink('{cluster}.{what}.keyring'.format( + cluster=args.cluster, + what=f, + )) + except OSError as e: + if e.errno == errno.ENOENT: + pass + else: + raise + +@priority(100) +def make(parser): + """ + Remove authentication keys from the local directory. + """ + parser.set_defaults( + func=forgetkeys, + ) diff --git a/ceph_deploy/gatherkeys.py b/ceph_deploy/gatherkeys.py new file mode 100644 index 0000000..ebd4158 --- /dev/null +++ b/ceph_deploy/gatherkeys.py @@ -0,0 +1,284 @@ +import errno +import os.path +import logging +import json +import tempfile +import shutil +import time + +from ceph_deploy import hosts +from ceph_deploy.cliutil import priority +from ceph_deploy.lib import remoto +import ceph_deploy.util.paths.mon + +LOG = logging.getLogger(__name__) + + +def _keyring_equivalent(keyring_one, keyring_two): + """ + Check two keyrings are identical + """ + def keyring_extract_key(file_path): + """ + Cephx keyring files may or may not have white space before some lines. + They may have some values in quotes, so a safe way to compare is to + extract the key. + """ + with open(file_path) as f: + for line in f: + content = line.strip() + if len(content) == 0: + continue + split_line = content.split('=') + if split_line[0].strip() == 'key': + return "=".join(split_line[1:]).strip() + raise RuntimeError("File '%s' is not a keyring" % file_path) + key_one = keyring_extract_key(keyring_one) + key_two = keyring_extract_key(keyring_two) + return key_one == key_two + + +def keytype_path_to(args, keytype): + """ + Get the local filename for a keyring type + """ + if keytype == "admin": + return '{cluster}.client.admin.keyring'.format( + cluster=args.cluster) + if keytype == "mon": + return '{cluster}.mon.keyring'.format( + cluster=args.cluster) + return '{cluster}.bootstrap-{what}.keyring'.format( + cluster=args.cluster, + what=keytype) + + +def keytype_identity(keytype): + """ + Get the keyring identity from keyring type. + + This is used in authentication with keyrings and generating keyrings. + """ + ident_dict = { + 'admin' : 'client.admin', + 'mds' : 'client.bootstrap-mds', + 'mgr' : 'client.bootstrap-mgr', + 'osd' : 'client.bootstrap-osd', + 'rgw' : 'client.bootstrap-rgw', + 'mon' : 'mon.' + } + return ident_dict.get(keytype, None) + + +def keytype_capabilities(keytype): + """ + Get the capabilities of a keyring from keyring type. + """ + cap_dict = { + 'admin' : [ + 'osd', 'allow *', + 'mds', 'allow *', + 'mon', 'allow *', + 'mgr', 'allow *' + ], + 'mds' : [ + 'mon', 'allow profile bootstrap-mds' + ], + 'mgr' : [ + 'mon', 'allow profile bootstrap-mgr' + ], + 'osd' : [ + 'mon', 'allow profile bootstrap-osd' + ], + 'rgw': [ + 'mon', 'allow profile bootstrap-rgw' + ] + } + return cap_dict.get(keytype, None) + + +def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir): + """ + Get or create the keyring from the mon using the mon keyring by keytype and + copy to dest_dir + """ + args_prefix = [ + '/usr/bin/ceph', + '--connect-timeout=25', + '--cluster={cluster}'.format( + cluster=args.cluster), + '--name', 'mon.', + '--keyring={keypath}'.format( + keypath=keypath), + ] + + identity = keytype_identity(keytype) + if identity is None: + raise RuntimeError('Could not find identity for keytype:%s' % keytype) + capabilites = keytype_capabilities(keytype) + if capabilites is None: + raise RuntimeError('Could not find capabilites for keytype:%s' % keytype) + + # First try getting the key if it already exists, to handle the case where + # it exists but doesn't match the caps we would pass into get-or-create. + # This is the same behvaior as in newer ceph-create-keys + out, err, code = remoto.process.check( + distro.conn, + args_prefix + ['auth', 'get', identity] + ) + if code == errno.ENOENT: + out, err, code = remoto.process.check( + distro.conn, + args_prefix + ['auth', 'get-or-create', identity] + capabilites + ) + if code != 0: + rlogger.error( + '"ceph auth get-or-create for keytype %s returned %s', + keytype, code + ) + for line in err: + rlogger.debug(line) + return False + keyring_name_local = keytype_path_to(args, keytype) + keyring_path_local = os.path.join(dest_dir, keyring_name_local) + with open(keyring_path_local, 'wb') as f: + for line in out: + f.write(line + b'\n') + return True + + +def gatherkeys_with_mon(args, host, dest_dir): + """ + Connect to mon and gather keys if mon is in quorum. + """ + distro = hosts.get(host, username=args.username) + remote_hostname = distro.conn.remote_module.shortname() + dir_keytype_mon = ceph_deploy.util.paths.mon.path(args.cluster, remote_hostname) + path_keytype_mon = "%s/keyring" % (dir_keytype_mon) + mon_key = distro.conn.remote_module.get_file(path_keytype_mon) + if mon_key is None: + LOG.warning("No mon key found in host: %s", host) + return False + mon_name_local = keytype_path_to(args, "mon") + mon_path_local = os.path.join(dest_dir, mon_name_local) + with open(mon_path_local, 'wb') as f: + f.write(mon_key) + rlogger = logging.getLogger(host) + path_asok = ceph_deploy.util.paths.mon.asok(args.cluster, remote_hostname) + out, err, code = remoto.process.check( + distro.conn, + [ + "/usr/bin/ceph", + "--connect-timeout=25", + "--cluster={cluster}".format( + cluster=args.cluster), + "--admin-daemon={asok}".format( + asok=path_asok), + "mon_status" + ] + ) + if code != 0: + rlogger.error('"ceph mon_status %s" returned %s', host, code) + for line in err: + rlogger.debug(line) + return False + try: + mon_status = json.loads(b''.join(out).decode('utf-8')) + except ValueError: + rlogger.error('"ceph mon_status %s" output was not json', host) + for line in out: + rlogger.error(line) + return False + mon_number = None + mon_map = mon_status.get('monmap') + if mon_map is None: + rlogger.error("could not find mon map for mons on '%s'", host) + return False + mon_quorum = mon_status.get('quorum') + if mon_quorum is None: + rlogger.error("could not find quorum for mons on '%s'" , host) + return False + mon_map_mons = mon_map.get('mons') + if mon_map_mons is None: + rlogger.error("could not find mons in monmap on '%s'", host) + return False + for mon in mon_map_mons: + if mon.get('name') == remote_hostname: + mon_number = mon.get('rank') + break + if mon_number is None: + rlogger.error("could not find '%s' in monmap", remote_hostname) + return False + if not mon_number in mon_quorum: + rlogger.error("Not yet quorum for '%s'", host) + return False + for keytype in ["admin", "mds", "mgr", "osd", "rgw"]: + if not gatherkeys_missing(args, distro, rlogger, path_keytype_mon, keytype, dest_dir): + # We will return failure if we fail to gather any key + rlogger.error("Failed to return '%s' key from host %s", keytype, host) + return False + return True + + +def gatherkeys(args): + """ + Gather keys from any mon and store in current working directory. + + Backs up keys from previous installs and stores new keys. + """ + oldmask = os.umask(0o77) + try: + try: + tmpd = tempfile.mkdtemp() + LOG.info("Storing keys in temp directory %s", tmpd) + sucess = False + for host in args.mon: + sucess = gatherkeys_with_mon(args, host, tmpd) + if sucess: + break + if not sucess: + LOG.error("Failed to connect to host:%s" ,', '.join(args.mon)) + raise RuntimeError('Failed to connect any mon') + had_error = False + date_string = time.strftime("%Y%m%d%H%M%S") + for keytype in ["admin", "mds", "mgr", "mon", "osd", "rgw"]: + filename = keytype_path_to(args, keytype) + tmp_path = os.path.join(tmpd, filename) + if not os.path.exists(tmp_path): + LOG.error("No key retrived for '%s'" , keytype) + had_error = True + continue + if not os.path.exists(filename): + LOG.info("Storing %s" % (filename)) + shutil.move(tmp_path, filename) + continue + if _keyring_equivalent(tmp_path, filename): + LOG.info("keyring '%s' already exists" , filename) + continue + backup_keyring = "%s-%s" % (filename, date_string) + LOG.info("Replacing '%s' and backing up old key as '%s'", filename, backup_keyring) + shutil.copy(filename, backup_keyring) + shutil.move(tmp_path, filename) + if had_error: + raise RuntimeError('Failed to get all key types') + finally: + LOG.info("Destroy temp directory %s" %(tmpd)) + shutil.rmtree(tmpd) + finally: + os.umask(oldmask) + + +@priority(40) +def make(parser): + """ + Gather authentication keys for provisioning new nodes. + """ + parser.add_argument( + 'mon', + metavar='HOST', + nargs='+', + help='monitor host to pull keys from', + ) + parser.set_defaults( + func=gatherkeys, + ) diff --git a/ceph_deploy/hosts/__init__.py b/ceph_deploy/hosts/__init__.py new file mode 100644 index 0000000..6bb5209 --- /dev/null +++ b/ceph_deploy/hosts/__init__.py @@ -0,0 +1,149 @@ +""" +We deal (mostly) with remote hosts. To avoid special casing each different +commands (e.g. using `yum` as opposed to `apt`) we can make a one time call to +that remote host and set all the special cases for running commands depending +on the type of distribution/version we are dealing with. +""" +import logging +from ceph_deploy import exc +from ceph_deploy.util import versions +from ceph_deploy.hosts import debian, centos, fedora, suse, remotes, rhel, arch, alt +from ceph_deploy.connection import get_connection + +logger = logging.getLogger() + + +def get(hostname, + username=None, + fallback=None, + detect_sudo=True, + use_rhceph=False, + callbacks=None): + """ + Retrieve the module that matches the distribution of a ``hostname``. This + function will connect to that host and retrieve the distribution + information, then return the appropriate module and slap a few attributes + to that module defining the information it found from the hostname. + + For example, if host ``node1.example.com`` is an Ubuntu server, the + ``debian`` module would be returned and the following would be set:: + + module.name = 'ubuntu' + module.release = '12.04' + module.codename = 'precise' + + :param hostname: A hostname that is reachable/resolvable over the network + :param fallback: Optional fallback to use if no supported distro is found + :param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or + the community distro. Changes what host module is + returned for RHEL. + :params callbacks: A list of callables that accept one argument (the actual + module that contains the connection) that will be + called, in order at the end of the instantiation of the + module. + """ + conn = get_connection( + hostname, + username=username, + logger=logging.getLogger(hostname), + detect_sudo=detect_sudo + ) + try: + conn.import_module(remotes) + except IOError as error: + if 'already closed' in getattr(error, 'message', ''): + raise RuntimeError('remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname) + distro_name, release, codename = conn.remote_module.platform_information() + if not codename or not _get_distro(distro_name): + raise exc.UnsupportedPlatform( + distro=distro_name, + codename=codename, + release=release) + + machine_type = conn.remote_module.machine_type() + module = _get_distro(distro_name, use_rhceph=use_rhceph) + module.name = distro_name + module.normalized_name = _normalized_distro_name(distro_name) + module.normalized_release = _normalized_release(release) + module.distro = module.normalized_name + module.is_el = module.normalized_name in ['redhat', 'centos', 'fedora', 'scientific', 'oracle', 'virtuozzo'] + module.is_rpm = module.normalized_name in ['redhat', 'centos', + 'fedora', 'scientific', 'suse', 'oracle', 'virtuozzo', 'alt'] + module.is_deb = module.normalized_name in ['debian', 'ubuntu'] + module.is_pkgtarxz = module.normalized_name in ['arch'] + module.release = release + module.codename = codename + module.conn = conn + module.machine_type = machine_type + module.init = module.choose_init(module) + module.packager = module.get_packager(module) + # execute each callback if any + if callbacks: + for c in callbacks: + c(module) + return module + + +def _get_distro(distro, fallback=None, use_rhceph=False): + if not distro: + return + + distro = _normalized_distro_name(distro) + distributions = { + 'debian': debian, + 'ubuntu': debian, + 'centos': centos, + 'scientific': centos, + 'oracle': centos, + 'redhat': centos, + 'fedora': fedora, + 'suse': suse, + 'virtuozzo': centos, + 'arch': arch, + 'alt': alt + } + + if distro == 'redhat' and use_rhceph: + return rhel + else: + return distributions.get(distro) or _get_distro(fallback) + + +def _normalized_distro_name(distro): + distro = distro.lower() + if distro.startswith(('redhat', 'red hat')): + return 'redhat' + elif distro.startswith(('scientific', 'scientific linux')): + return 'scientific' + elif distro.startswith('oracle'): + return 'oracle' + elif distro.startswith(('suse', 'opensuse', 'sles')): + return 'suse' + elif distro.startswith('centos'): + return 'centos' + elif distro.startswith('linuxmint'): + return 'ubuntu' + elif distro.startswith('virtuozzo'): + return 'virtuozzo' + elif distro.startswith('arch'): + return 'arch' + elif distro.startswith(('alt', 'altlinux', 'basealt', 'alt linux')): + return 'alt' + return distro + + +def _normalized_release(release): + """ + A normalizer function to make sense of distro + release versions. + + Returns an object with: major, minor, patch, and garbage + + These attributes can be accessed as ints with prefixed "int" + attribute names, for example: + + normalized_version.int_major + """ + # TODO: at some point deprecate this function so that we just + # use this class directly (and update every test that calls it + return versions.NormalizedVersion(release) diff --git a/ceph_deploy/hosts/alt/__init__.py b/ceph_deploy/hosts/alt/__init__.py new file mode 100644 index 0000000..a4e53f3 --- /dev/null +++ b/ceph_deploy/hosts/alt/__init__.py @@ -0,0 +1,30 @@ +from . import mon # noqa +from .install import install # noqa +from .uninstall import uninstall # noqa +from ceph_deploy.util import pkg_managers +from ceph_deploy.util.system import is_systemd + +# Allow to set some information about this distro +# + +distro = None +release = None +codename = None + + +def choose_init(module): + """ + Select a init system + + Returns the name of a init system (systemd, sysvinit ...). + """ + + + if is_systemd(module.conn): + return 'systemd' + + return 'sysvinit' + + +def get_packager(module): + return pkg_managers.AptRpm(module) diff --git a/ceph_deploy/hosts/alt/install.py b/ceph_deploy/hosts/alt/install.py new file mode 100644 index 0000000..e795b1b --- /dev/null +++ b/ceph_deploy/hosts/alt/install.py @@ -0,0 +1,43 @@ +from ceph_deploy.hosts.centos.install import repo_install, mirror_install # noqa +from ceph_deploy.hosts.common import map_components +from ceph_deploy.util.system import enable_service, start_service + + +NON_SPLIT_PACKAGES = [ + 'ceph-osd', + 'ceph-mds', + 'ceph-mon', + 'ceph-mgr', +] + +SYSTEMD_UNITS = [ + 'ceph.target', + 'ceph-mds.target', + 'ceph-mon.target', + 'ceph-mgr.target', + 'ceph-osd.target', +] +SYSTEMD_UNITS_SKIP_START = [ + 'ceph-mgr.target', + 'ceph-mon.target', +] +SYSTEMD_UNITS_SKIP_ENABLE = [ +] + + +def install(distro, version_kind, version, adjust_repos, **kw): + packages = map_components( + NON_SPLIT_PACKAGES, + kw.pop('components', []) + ) + + if packages: + distro.packager.clean() + distro.packager.install(packages) + + # Start and enable services + for unit in SYSTEMD_UNITS: + if unit not in SYSTEMD_UNITS_SKIP_START: + start_service(distro.conn, unit) + if unit not in SYSTEMD_UNITS_SKIP_ENABLE: + enable_service(distro.conn, unit) diff --git a/ceph_deploy/hosts/alt/mon/__init__.py b/ceph_deploy/hosts/alt/mon/__init__.py new file mode 100644 index 0000000..f266fb0 --- /dev/null +++ b/ceph_deploy/hosts/alt/mon/__init__.py @@ -0,0 +1,2 @@ +from ceph_deploy.hosts.common import mon_add as add # noqa +from ceph_deploy.hosts.common import mon_create as create # noqa diff --git a/ceph_deploy/hosts/alt/uninstall.py b/ceph_deploy/hosts/alt/uninstall.py new file mode 100644 index 0000000..00c808a --- /dev/null +++ b/ceph_deploy/hosts/alt/uninstall.py @@ -0,0 +1,11 @@ +def uninstall(distro, purge=False): + packages = [ + 'ceph-common', + 'ceph-base', + 'ceph-radosgw', + 'python-module-cephfs', + 'python-module-rados', + 'python-module-rbd', + 'python-module-rgw', + ] + distro.packager.remove(packages) diff --git a/ceph_deploy/hosts/arch/__init__.py b/ceph_deploy/hosts/arch/__init__.py new file mode 100644 index 0000000..2e8c0ab --- /dev/null +++ b/ceph_deploy/hosts/arch/__init__.py @@ -0,0 +1,26 @@ +from . import mon # noqa +from ceph_deploy.hosts.centos.install import repo_install # noqa +from .install import install, mirror_install # noqa +from .uninstall import uninstall # noqa +from ceph_deploy.util import pkg_managers + +# Allow to set some information about this distro +# + +distro = None +release = None +codename = None + + +def choose_init(module): + """ + Select a init system + + Returns the name of a init system (upstart, sysvinit ...). + """ + + return 'systemd' + + +def get_packager(module): + return pkg_managers.Pacman(module) diff --git a/ceph_deploy/hosts/arch/install.py b/ceph_deploy/hosts/arch/install.py new file mode 100644 index 0000000..7625298 --- /dev/null +++ b/ceph_deploy/hosts/arch/install.py @@ -0,0 +1,49 @@ +from ceph_deploy.hosts.centos.install import repo_install, mirror_install # noqa +from ceph_deploy.hosts.common import map_components +from ceph_deploy.util.system import enable_service, start_service + + +NON_SPLIT_PACKAGES = [ + 'ceph-osd', + 'ceph-radosgw', + 'ceph-mds', + 'ceph-mon', + 'ceph-mgr', + 'ceph-common', + 'ceph-test' +] + +SYSTEMD_UNITS = [ + 'ceph.target', + 'ceph-radosgw.target', + 'ceph-rbd-mirror.target', + 'ceph-fuse.target', + 'ceph-mds.target', + 'ceph-mon.target', + 'ceph-mgr.target', + 'ceph-osd.target', +] +SYSTEMD_UNITS_SKIP_START = [ + 'ceph-mgr.target', + 'ceph-mon.target', +] +SYSTEMD_UNITS_SKIP_ENABLE = [ +] + + +def install(distro, version_kind, version, adjust_repos, **kw): + packages = map_components( + NON_SPLIT_PACKAGES, + kw.pop('components', []) + ) + + distro.packager.install( + packages + ) + + # Start and enable services + for unit in SYSTEMD_UNITS: + if unit not in SYSTEMD_UNITS_SKIP_START: + start_service(distro.conn, unit) + if unit not in SYSTEMD_UNITS_SKIP_ENABLE: + enable_service(distro.conn, unit) diff --git a/ceph_deploy/hosts/arch/mon/__init__.py b/ceph_deploy/hosts/arch/mon/__init__.py new file mode 100644 index 0000000..f266fb0 --- /dev/null +++ b/ceph_deploy/hosts/arch/mon/__init__.py @@ -0,0 +1,2 @@ +from ceph_deploy.hosts.common import mon_add as add # noqa +from ceph_deploy.hosts.common import mon_create as create # noqa diff --git a/ceph_deploy/hosts/arch/uninstall.py b/ceph_deploy/hosts/arch/uninstall.py new file mode 100644 index 0000000..0787392 --- /dev/null +++ b/ceph_deploy/hosts/arch/uninstall.py @@ -0,0 +1,50 @@ +import logging + +from ceph_deploy.util.system import disable_service, stop_service +from ceph_deploy.lib import remoto + + +SYSTEMD_UNITS = [ + 'ceph-mds.target', + 'ceph-mon.target', + 'ceph-osd.target', + 'ceph-radosgw.target', + 'ceph-fuse.target', + 'ceph-mgr.target', + 'ceph-rbd-mirror.target', + 'ceph.target', +] + + +def uninstall(distro, purge=False): + packages = [ + 'ceph', + ] + + hostname = distro.conn.hostname + LOG = logging.getLogger(hostname) + + # I need to stop and disable services prior package removal + LOG.info('stopping and disabling services on {}'.format(hostname)) + for unit in SYSTEMD_UNITS: + stop_service(distro.conn, unit) + disable_service(distro.conn, unit) + + # remoto.process.run( + # distro.conn, + # [ + # 'systemctl', + # 'daemon-reload', + # ] + # ) + + LOG.info('uninstalling packages on {}'.format(hostname)) + distro.packager.remove(packages) + + remoto.process.run( + distro.conn, + [ + 'systemctl', + 'reset-failed', + ] + ) diff --git a/ceph_deploy/hosts/centos/__init__.py b/ceph_deploy/hosts/centos/__init__.py new file mode 100644 index 0000000..daeae4c --- /dev/null +++ b/ceph_deploy/hosts/centos/__init__.py @@ -0,0 +1,34 @@ +from . import mon # noqa +from .install import install, mirror_install, repo_install, repository_url_part, rpm_dist # noqa +from .uninstall import uninstall # noqa +from ceph_deploy.util import pkg_managers +from ceph_deploy.util.system import is_systemd + +# Allow to set some information about this distro +# + +distro = None +release = None +codename = None + + +def choose_init(module): + """ + Select a init system + + Returns the name of a init system (upstart, sysvinit ...). + """ + + if module.normalized_release.int_major < 7: + return 'sysvinit' + + if is_systemd(module.conn): + return 'systemd' + + if not module.conn.remote_module.path_exists("/usr/lib/systemd/system/ceph.target"): + return 'sysvinit' + + return 'systemd' + +def get_packager(module): + return pkg_managers.Yum(module) diff --git a/ceph_deploy/hosts/centos/install.py b/ceph_deploy/hosts/centos/install.py new file mode 100644 index 0000000..d622635 --- /dev/null +++ b/ceph_deploy/hosts/centos/install.py @@ -0,0 +1,219 @@ +import logging +from ceph_deploy.util import templates +from ceph_deploy.lib import remoto +from ceph_deploy.hosts.common import map_components +from ceph_deploy.util.paths import gpg +from ceph_deploy.util import net + + +LOG = logging.getLogger(__name__) +NON_SPLIT_PACKAGES = ['ceph-osd', 'ceph-mon', 'ceph-mds'] + + +def rpm_dist(distro): + if distro.normalized_name in ['redhat', 'centos', 'scientific', 'oracle', 'virtuozzo'] and distro.normalized_release.int_major >= 6: + return 'el' + distro.normalized_release.major + return 'el6' + + +def repository_url_part(distro): + """ + Historically everything CentOS, RHEL, and Scientific has been mapped to + `el6` urls, but as we are adding repositories for `rhel`, the URLs should + map correctly to, say, `rhel6` or `rhel7`. + + This function looks into the `distro` object and determines the right url + part for the given distro, falling back to `el6` when all else fails. + + Specifically to work around the issue of CentOS vs RHEL:: + + >>> import platform + >>> platform.linux_distribution() + ('Red Hat Enterprise Linux Server', '7.0', 'Maipo') + + """ + if distro.normalized_release.int_major >= 6: + if distro.normalized_name == 'redhat': + return 'rhel' + distro.normalized_release.major + if distro.normalized_name in ['centos', 'scientific', 'oracle', 'virtuozzo']: + return 'el' + distro.normalized_release.major + + return 'el6' + + +def install(distro, version_kind, version, adjust_repos, **kw): + packages = map_components( + NON_SPLIT_PACKAGES, + kw.pop('components', []) + ) + + gpgcheck = kw.pop('gpgcheck', 1) + logger = distro.conn.logger + machine = distro.machine_type + repo_part = repository_url_part(distro) + dist = rpm_dist(distro) + + distro.packager.clean() + + # Get EPEL installed before we continue: + if adjust_repos: + distro.packager.install('epel-release') + distro.packager.install('yum-plugin-priorities') + distro.conn.remote_module.enable_yum_priority_obsoletes() + logger.warning('check_obsoletes has been enabled for Yum priorities plugin') + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + if adjust_repos: + if version_kind in ['stable', 'testing']: + distro.packager.add_repo_gpg_key(gpg.url(key)) + + if version_kind == 'stable': + url = 'https://download.ceph.com/rpm-{version}/{repo}/'.format( + version=version, + repo=repo_part, + ) + elif version_kind == 'testing': + url = 'https://download.ceph.com/rpm-testing/{repo}/'.format(repo=repo_part) + + # remove any old ceph-release package from prevoius release + remoto.process.run( + distro.conn, + [ + 'yum', + 'remove', + '-y', + 'ceph-release' + ], + ) + remoto.process.run( + distro.conn, + [ + 'yum', + 'install', + '-y', + '{url}noarch/ceph-release-1-0.{dist}.noarch.rpm'.format(url=url, dist=dist), + ], + ) + + elif version_kind in ['dev', 'dev_commit']: + logger.info('skipping install of ceph-release package') + logger.info('repo file will be created manually') + shaman_url = 'https://shaman.ceph.com/api/repos/ceph/{version}/{sha1}/{distro}/{distro_version}/repo/?arch={arch}'.format( + distro=distro.normalized_name, + distro_version=distro.normalized_release.major, + version=kw['args'].dev, + sha1=kw['args'].dev_commit or 'latest', + arch=machine + ) + LOG.debug('fetching repo information from: %s' % shaman_url) + content = net.get_chacra_repo(shaman_url) + mirror_install( + distro, + '', # empty repo_url + None, # no need to use gpg here, repos are unsigned + adjust_repos=True, + extra_installs=False, + gpgcheck=gpgcheck, + repo_content=content + ) + + else: + raise Exception('unrecognized version_kind %s' % version_kind) + + # set the right priority + logger.warning('ensuring that /etc/yum.repos.d/ceph.repo contains a high priority') + distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source']) + logger.warning('altered ceph.repo priorities to contain: priority=1') + + if packages: + distro.packager.install(packages) + + +def mirror_install(distro, repo_url, gpg_url, adjust_repos, extra_installs=True, **kw): + packages = map_components( + NON_SPLIT_PACKAGES, + kw.pop('components', []) + ) + repo_url = repo_url.strip('/') # Remove trailing slashes + gpgcheck = kw.pop('gpgcheck', 1) + + distro.packager.clean() + + if adjust_repos: + if gpg_url: + distro.packager.add_repo_gpg_key(gpg_url) + + ceph_repo_content = templates.ceph_repo.format( + repo_url=repo_url, + gpg_url=gpg_url, + gpgcheck=gpgcheck, + ) + content = kw.get('repo_content', ceph_repo_content) + + distro.conn.remote_module.write_yum_repo(content) + # set the right priority + if distro.packager.name == 'yum': + distro.packager.install('yum-plugin-priorities') + distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source']) + distro.conn.logger.warning('altered ceph.repo priorities to contain: priority=1') + + + if extra_installs and packages: + distro.packager.install(packages) + + +def repo_install(distro, reponame, baseurl, gpgkey, **kw): + packages = map_components( + NON_SPLIT_PACKAGES, + kw.pop('components', []) + ) + logger = distro.conn.logger + # Get some defaults + name = kw.pop('name', '%s repo' % reponame) + enabled = kw.pop('enabled', 1) + gpgcheck = kw.pop('gpgcheck', 1) + install_ceph = kw.pop('install_ceph', False) + proxy = kw.pop('proxy', '') # will get ignored if empty + _type = 'repo-md' + baseurl = baseurl.strip('/') # Remove trailing slashes + + distro.packager.clean() + + if gpgkey: + distro.packager.add_repo_gpg_key(gpgkey) + + repo_content = templates.custom_repo( + reponame=reponame, + name=name, + baseurl=baseurl, + enabled=enabled, + gpgcheck=gpgcheck, + _type=_type, + gpgkey=gpgkey, + proxy=proxy, + **kw + ) + + distro.conn.remote_module.write_yum_repo( + repo_content, + "%s.repo" % reponame + ) + + repo_path = '/etc/yum.repos.d/{reponame}.repo'.format(reponame=reponame) + + # set the right priority + if kw.get('priority'): + if distro.packager.name == 'yum': + distro.packager.install('yum-plugin-priorities') + + distro.conn.remote_module.set_repo_priority([reponame], repo_path) + logger.warning('altered {reponame}.repo priorities to contain: priority=1'.format( + reponame=reponame) + ) + + # Some custom repos do not need to install ceph + if install_ceph and packages: + distro.packager.install(packages) diff --git a/ceph_deploy/hosts/centos/mon/__init__.py b/ceph_deploy/hosts/centos/mon/__init__.py new file mode 100644 index 0000000..f266fb0 --- /dev/null +++ b/ceph_deploy/hosts/centos/mon/__init__.py @@ -0,0 +1,2 @@ +from ceph_deploy.hosts.common import mon_add as add # noqa +from ceph_deploy.hosts.common import mon_create as create # noqa diff --git a/ceph_deploy/hosts/centos/uninstall.py b/ceph_deploy/hosts/centos/uninstall.py new file mode 100644 index 0000000..758c34c --- /dev/null +++ b/ceph_deploy/hosts/centos/uninstall.py @@ -0,0 +1,10 @@ +def uninstall(distro, purge=False): + packages = [ + 'ceph', + 'ceph-release', + 'ceph-common', + 'ceph-radosgw', + ] + + distro.packager.remove(packages) + distro.packager.clean() diff --git a/ceph_deploy/hosts/common.py b/ceph_deploy/hosts/common.py new file mode 100644 index 0000000..70345dc --- /dev/null +++ b/ceph_deploy/hosts/common.py @@ -0,0 +1,248 @@ +from ceph_deploy.util import paths +from ceph_deploy import conf +from ceph_deploy.lib import remoto +from ceph_deploy.util import constants +from ceph_deploy.util import system + + +def ceph_version(conn): + """ + Log the remote ceph-version by calling `ceph --version` + """ + return remoto.process.run(conn, ['ceph', '--version']) + + +def mon_create(distro, args, monitor_keyring): + hostname = distro.conn.remote_module.shortname() + logger = distro.conn.logger + logger.debug('remote hostname: %s' % hostname) + path = paths.mon.path(args.cluster, hostname) + uid = distro.conn.remote_module.path_getuid(constants.base_path) + gid = distro.conn.remote_module.path_getgid(constants.base_path) + done_path = paths.mon.done(args.cluster, hostname) + init_path = paths.mon.init(args.cluster, hostname, distro.init) + + conf_data = conf.ceph.load_raw(args) + + # write the configuration file + distro.conn.remote_module.write_conf( + args.cluster, + conf_data, + args.overwrite_conf, + ) + + # if the mon path does not exist, create it + distro.conn.remote_module.create_mon_path(path, uid, gid) + + logger.debug('checking for done path: %s' % done_path) + if not distro.conn.remote_module.path_exists(done_path): + logger.debug('done path does not exist: %s' % done_path) + if not distro.conn.remote_module.path_exists(paths.mon.constants.tmp_path): + logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path) + distro.conn.remote_module.makedir(paths.mon.constants.tmp_path) + keyring = paths.mon.keyring(args.cluster, hostname) + + logger.info('creating keyring file: %s' % keyring) + distro.conn.remote_module.write_monitor_keyring( + keyring, + monitor_keyring, + uid, gid, + ) + + user_args = [] + if uid != 0: + user_args = user_args + [ '--setuser', str(uid) ] + if gid != 0: + user_args = user_args + [ '--setgroup', str(gid) ] + + remoto.process.run( + distro.conn, + [ + 'ceph-mon', + '--cluster', args.cluster, + '--mkfs', + '-i', hostname, + '--keyring', keyring, + ] + user_args + ) + + logger.info('unlinking keyring file %s' % keyring) + distro.conn.remote_module.unlink(keyring) + + # create the done file + distro.conn.remote_module.create_done_path(done_path, uid, gid) + + # create init path + distro.conn.remote_module.create_init_path(init_path, uid, gid) + + # start mon service + start_mon_service(distro, args.cluster, hostname) + + +def mon_add(distro, args, monitor_keyring): + hostname = distro.conn.remote_module.shortname() + logger = distro.conn.logger + path = paths.mon.path(args.cluster, hostname) + uid = distro.conn.remote_module.path_getuid(constants.base_path) + gid = distro.conn.remote_module.path_getgid(constants.base_path) + monmap_path = paths.mon.monmap(args.cluster, hostname) + done_path = paths.mon.done(args.cluster, hostname) + init_path = paths.mon.init(args.cluster, hostname, distro.init) + + conf_data = conf.ceph.load_raw(args) + + # write the configuration file + distro.conn.remote_module.write_conf( + args.cluster, + conf_data, + args.overwrite_conf, + ) + + # if the mon path does not exist, create it + distro.conn.remote_module.create_mon_path(path, uid, gid) + + logger.debug('checking for done path: %s' % done_path) + if not distro.conn.remote_module.path_exists(done_path): + logger.debug('done path does not exist: %s' % done_path) + if not distro.conn.remote_module.path_exists(paths.mon.constants.tmp_path): + logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path) + distro.conn.remote_module.makedir(paths.mon.constants.tmp_path) + keyring = paths.mon.keyring(args.cluster, hostname) + + logger.info('creating keyring file: %s' % keyring) + distro.conn.remote_module.write_monitor_keyring( + keyring, + monitor_keyring, + uid, gid, + ) + + # get the monmap + remoto.process.run( + distro.conn, + [ + 'ceph', + '--cluster', args.cluster, + 'mon', + 'getmap', + '-o', + monmap_path, + ], + ) + + # now use it to prepare the monitor's data dir + user_args = [] + if uid != 0: + user_args = user_args + [ '--setuser', str(uid) ] + if gid != 0: + user_args = user_args + [ '--setgroup', str(gid) ] + + remoto.process.run( + distro.conn, + [ + 'ceph-mon', + '--cluster', args.cluster, + '--mkfs', + '-i', hostname, + '--monmap', + monmap_path, + '--keyring', keyring, + ] + user_args + ) + + logger.info('unlinking keyring file %s' % keyring) + distro.conn.remote_module.unlink(keyring) + + # create the done file + distro.conn.remote_module.create_done_path(done_path, uid, gid) + + # create init path + distro.conn.remote_module.create_init_path(init_path, uid, gid) + + # start mon service + start_mon_service(distro, args.cluster, hostname) + + +def map_components(notsplit_packages, components): + """ + Returns a list of packages to install based on component names + + This is done by checking if a component is in notsplit_packages, + if it is, we know we need to install 'ceph' instead of the + raw component name. Essentially, this component hasn't been + 'split' from the master 'ceph' package yet. + """ + packages = set() + + for c in components: + if c in notsplit_packages: + packages.add('ceph') + else: + packages.add(c) + + return list(packages) + + +def start_mon_service(distro, cluster, hostname): + """ + start mon service depending on distro init + """ + if distro.init == 'sysvinit': + service = distro.conn.remote_module.which_service() + remoto.process.run( + distro.conn, + [ + service, + 'ceph', + '-c', + '/etc/ceph/{cluster}.conf'.format(cluster=cluster), + 'start', + 'mon.{hostname}'.format(hostname=hostname) + ], + timeout=7, + ) + system.enable_service(distro.conn) + + elif distro.init == 'upstart': + remoto.process.run( + distro.conn, + [ + 'initctl', + 'emit', + 'ceph-mon', + 'cluster={cluster}'.format(cluster=cluster), + 'id={hostname}'.format(hostname=hostname), + ], + timeout=7, + ) + + elif distro.init == 'systemd': + # enable ceph target for this host (in case it isn't already enabled) + remoto.process.run( + distro.conn, + [ + 'systemctl', + 'enable', + 'ceph.target' + ], + timeout=7, + ) + + # enable and start this mon instance + remoto.process.run( + distro.conn, + [ + 'systemctl', + 'enable', + 'ceph-mon@{hostname}'.format(hostname=hostname), + ], + timeout=7, + ) + remoto.process.run( + distro.conn, + [ + 'systemctl', + 'start', + 'ceph-mon@{hostname}'.format(hostname=hostname), + ], + timeout=7, + ) diff --git a/ceph_deploy/hosts/debian/__init__.py b/ceph_deploy/hosts/debian/__init__.py new file mode 100644 index 0000000..e3e747b --- /dev/null +++ b/ceph_deploy/hosts/debian/__init__.py @@ -0,0 +1,35 @@ +from . import mon # noqa +from .install import install, mirror_install, repo_install # noqa +from .uninstall import uninstall # noqa +from ceph_deploy.util import pkg_managers +from ceph_deploy.util.system import is_systemd, is_upstart + +# Allow to set some information about this distro +# + +distro = None +release = None +codename = None + + +def choose_init(module): + """ + Select a init system + + Returns the name of a init system (upstart, sysvinit ...). + """ + # Upstart checks first because when installing ceph, the + # `/lib/systemd/system/ceph.target` file may be created, fooling this + # detection mechanism. + if is_upstart(module.conn): + return 'upstart' + + if is_systemd(module.conn) or module.conn.remote_module.path_exists( + "/lib/systemd/system/ceph.target"): + return 'systemd' + + return 'sysvinit' + + +def get_packager(module): + return pkg_managers.Apt(module) diff --git a/ceph_deploy/hosts/debian/install.py b/ceph_deploy/hosts/debian/install.py new file mode 100644 index 0000000..1aa4431 --- /dev/null +++ b/ceph_deploy/hosts/debian/install.py @@ -0,0 +1,125 @@ +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse +import logging +from ceph_deploy.util.paths import gpg +from ceph_deploy.util import net + + +LOG = logging.getLogger(__name__) + + +def install(distro, version_kind, version, adjust_repos, **kw): + packages = kw.pop('components', []) + codename = distro.codename + machine = distro.machine_type + extra_install_flags = [] + + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + distro.packager.clean() + distro.packager.install(['ca-certificates', 'apt-transport-https']) + + if adjust_repos: + # Wheezy does not like the download.ceph.com SSL cert + protocol = 'https' + if codename == 'wheezy': + protocol = 'http' + + if version_kind in ['dev', 'dev_commit']: + shaman_url = 'https://shaman.ceph.com/api/repos/ceph/{version}/{sha1}/{distro}/{distro_version}/repo/?arch={arch}'.format( + distro=distro.normalized_name, + distro_version=distro.codename, + version=kw['args'].dev, + sha1=kw['args'].dev_commit or 'latest', + arch=machine + ) + LOG.debug('fetching repo information from: %s' % shaman_url) + chacra_url = net.get_request(shaman_url).geturl() + content = net.get_chacra_repo(shaman_url) + # set the repo priority for the right domain + fqdn = urlparse(chacra_url).hostname + distro.conn.remote_module.set_apt_priority(fqdn) + distro.conn.remote_module.write_sources_list_content(content) + extra_install_flags = ['-o', 'Dpkg::Options::=--force-confnew', '--allow-unauthenticated'] + else: + distro.packager.add_repo_gpg_key(gpg.url(key, protocol=protocol)) + if version_kind == 'stable': + url = '{protocol}://download.ceph.com/debian-{version}/'.format( + protocol=protocol, + version=version, + ) + elif version_kind == 'testing': + url = '{protocol}://download.ceph.com/debian-testing/'.format( + protocol=protocol, + ) + else: + raise RuntimeError('Unknown version kind: %r' % version_kind) + + # set the repo priority for the right domain + fqdn = urlparse(url).hostname + distro.conn.remote_module.set_apt_priority(fqdn) + distro.conn.remote_module.write_sources_list(url, codename) + extra_install_flags = ['-o', 'Dpkg::Options::=--force-confnew'] + + distro.packager.clean() + + # TODO this does not downgrade -- should it? + if packages: + distro.packager.install( + packages, + extra_install_flags=extra_install_flags + ) + + +def mirror_install(distro, repo_url, gpg_url, adjust_repos, **kw): + packages = kw.pop('components', []) + version_kind = kw['args'].version_kind + repo_url = repo_url.strip('/') # Remove trailing slashes + + if adjust_repos: + distro.packager.add_repo_gpg_key(gpg_url) + + # set the repo priority for the right domain + fqdn = urlparse(repo_url).hostname + distro.conn.remote_module.set_apt_priority(fqdn) + + distro.conn.remote_module.write_sources_list(repo_url, distro.codename) + + extra_install_flags = ['--allow-unauthenticated'] if version_kind in 'dev' else [] + + if packages: + distro.packager.clean() + distro.packager.install( + packages, + extra_install_flags=extra_install_flags) + + +def repo_install(distro, repo_name, baseurl, gpgkey, **kw): + packages = kw.pop('components', []) + # Get some defaults + safe_filename = '%s.list' % repo_name.replace(' ', '-') + install_ceph = kw.pop('install_ceph', False) + baseurl = baseurl.strip('/') # Remove trailing slashes + + distro.packager.add_repo_gpg_key(gpgkey) + + distro.conn.remote_module.write_sources_list( + baseurl, + distro.codename, + safe_filename + ) + + # set the repo priority for the right domain + fqdn = urlparse(baseurl).hostname + distro.conn.remote_module.set_apt_priority(fqdn) + + # repo is not operable until an update + distro.packager.clean() + + if install_ceph and packages: + distro.packager.install(packages) diff --git a/ceph_deploy/hosts/debian/mon/__init__.py b/ceph_deploy/hosts/debian/mon/__init__.py new file mode 100644 index 0000000..f266fb0 --- /dev/null +++ b/ceph_deploy/hosts/debian/mon/__init__.py @@ -0,0 +1,2 @@ +from ceph_deploy.hosts.common import mon_add as add # noqa +from ceph_deploy.hosts.common import mon_create as create # noqa diff --git a/ceph_deploy/hosts/debian/uninstall.py b/ceph_deploy/hosts/debian/uninstall.py new file mode 100644 index 0000000..b3a01b2 --- /dev/null +++ b/ceph_deploy/hosts/debian/uninstall.py @@ -0,0 +1,15 @@ +def uninstall(distro, purge=False): + packages = [ + 'ceph', + 'ceph-mds', + 'ceph-common', + 'ceph-fs-common', + 'radosgw', + ] + extra_remove_flags = [] + if purge: + extra_remove_flags.append('--purge') + distro.packager.remove( + packages, + extra_remove_flags=extra_remove_flags + ) diff --git a/ceph_deploy/hosts/fedora/__init__.py b/ceph_deploy/hosts/fedora/__init__.py new file mode 100644 index 0000000..81d8aca --- /dev/null +++ b/ceph_deploy/hosts/fedora/__init__.py @@ -0,0 +1,30 @@ +from . import mon # noqa +from ceph_deploy.hosts.centos.install import repo_install # noqa +from .install import install, mirror_install # noqa +from .uninstall import uninstall # noqa +from ceph_deploy.util import pkg_managers + +# Allow to set some information about this distro +# + +distro = None +release = None +codename = None + +def choose_init(module): + """ + Select a init system + + Returns the name of a init system (upstart, sysvinit ...). + """ + + if not module.conn.remote_module.path_exists("/usr/lib/systemd/system/ceph.target"): + return 'sysvinit' + + return 'systemd' + +def get_packager(module): + if module.normalized_release.int_major >= 22: + return pkg_managers.DNF(module) + else: + return pkg_managers.Yum(module) diff --git a/ceph_deploy/hosts/fedora/install.py b/ceph_deploy/hosts/fedora/install.py new file mode 100644 index 0000000..b2806f4 --- /dev/null +++ b/ceph_deploy/hosts/fedora/install.py @@ -0,0 +1,87 @@ +from ceph_deploy.lib import remoto +from ceph_deploy.hosts.centos.install import repo_install, mirror_install # noqa +from ceph_deploy.util.paths import gpg +from ceph_deploy.hosts.common import map_components + + +NON_SPLIT_PACKAGES = ['ceph-osd', 'ceph-mon', 'ceph-mds'] + + +def install(distro, version_kind, version, adjust_repos, **kw): + packages = map_components( + NON_SPLIT_PACKAGES, + kw.pop('components', []) + ) + gpgcheck = kw.pop('gpgcheck', 1) + + logger = distro.conn.logger + release = distro.release + machine = distro.machine_type + + if version_kind in ['stable', 'testing']: + key = 'release' + else: + key = 'autobuild' + + if adjust_repos: + if distro.packager.name == 'yum': + distro.packager.install('yum-plugin-priorities') + # haven't been able to determine necessity of check_obsoletes with DNF + distro.conn.remote_module.enable_yum_priority_obsoletes() + logger.warning('check_obsoletes has been enabled for Yum priorities plugin') + + if version_kind in ['stable', 'testing']: + distro.packager.add_repo_gpg_key(gpg.url(key)) + + if version_kind == 'stable': + url = 'https://download.ceph.com/rpm-{version}/fc{release}/'.format( + version=version, + release=release, + ) + elif version_kind == 'testing': + url = 'https://download.ceph.com/rpm-testing/fc{release}'.format( + release=release, + ) + + remoto.process.run( + distro.conn, + [ + 'rpm', + '-Uvh', + '--replacepkgs', + '--force', + '--quiet', + '{url}noarch/ceph-release-1-0.fc{release}.noarch.rpm'.format( + url=url, + release=release, + ), + ] + ) + + # set the right priority + logger.warning('ensuring that /etc/yum.repos.d/ceph.repo contains a high priority') + distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source']) + logger.warning('altered ceph.repo priorities to contain: priority=1') + + elif version_kind in ['dev', 'dev_commit']: + logger.info('skipping install of ceph-release package') + logger.info('repo file will be created manually') + mirror_install( + distro, + 'http://gitbuilder.ceph.com/ceph-rpm-fc{release}-{machine}-basic/{sub}/{version}/'.format( + release=release.split(".", 1)[0], + machine=machine, + sub='ref' if version_kind == 'dev' else 'sha1', + version=version), + gpg.url(key), + adjust_repos=True, + extra_installs=False, + gpgcheck=gpgcheck, + ) + + else: + raise Exception('unrecognized version_kind %s' % version_kind) + + distro.packager.install( + packages + ) diff --git a/ceph_deploy/hosts/fedora/mon/__init__.py b/ceph_deploy/hosts/fedora/mon/__init__.py new file mode 100644 index 0000000..f266fb0 --- /dev/null +++ b/ceph_deploy/hosts/fedora/mon/__init__.py @@ -0,0 +1,2 @@ +from ceph_deploy.hosts.common import mon_add as add # noqa +from ceph_deploy.hosts.common import mon_create as create # noqa diff --git a/ceph_deploy/hosts/fedora/uninstall.py b/ceph_deploy/hosts/fedora/uninstall.py new file mode 100644 index 0000000..8d40909 --- /dev/null +++ b/ceph_deploy/hosts/fedora/uninstall.py @@ -0,0 +1,8 @@ +def uninstall(distro, purge=False): + packages = [ + 'ceph', + 'ceph-common', + 'ceph-radosgw', + ] + + distro.packager.remove(packages) diff --git a/ceph_deploy/hosts/remotes.py b/ceph_deploy/hosts/remotes.py new file mode 100644 index 0000000..72dd1f6 --- /dev/null +++ b/ceph_deploy/hosts/remotes.py @@ -0,0 +1,413 @@ +try: + import configparser +except ImportError: + import ConfigParser as configparser +import errno +import socket +import os +import shutil +import tempfile +import platform +import re + + +def platform_information(_linux_distribution=None): + """ detect platform information from remote host """ + linux_distribution = _linux_distribution or platform.linux_distribution + distro, release, codename = linux_distribution() + if not distro: + distro, release, codename = parse_os_release() + if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian + debian_codenames = { + '10': 'buster', + '9': 'stretch', + '8': 'jessie', + '7': 'wheezy', + '6': 'squeeze', + } + major_version = release.split('.')[0] + codename = debian_codenames.get(major_version, '') + + # In order to support newer jessie/sid or wheezy/sid strings we test this + # if sid is buried in the minor, we should use sid anyway. + if not codename and '/' in release: + major, minor = release.split('/') + if minor == 'sid': + codename = minor + else: + codename = major + if not codename and 'oracle' in distro.lower(): # this could be an empty string in Oracle linux + codename = 'oracle' + if not codename and 'virtuozzo linux' in distro.lower(): # this could be an empty string in Virtuozzo linux + codename = 'virtuozzo' + if not codename and 'arch' in distro.lower(): # this could be an empty string in Arch linux + codename = 'arch' + + return ( + str(distro).rstrip(), + str(release).rstrip(), + str(codename).rstrip() + ) + + +def parse_os_release(release_path='/etc/os-release'): + """ Extract (distro, release, codename) from /etc/os-release if present """ + release_info = {} + if os.path.isfile(release_path): + for line in open(release_path, 'r').readlines(): + line = line.strip() + if line.startswith('#'): + continue + parts = line.split('=') + if len(parts) != 2: + continue + release_info[parts[0].strip()] = parts[1].strip("\"'\n\t ") + # In theory, we want ID/NAME, VERSION_ID and VERSION_CODENAME (with a + # possible fallback to VERSION on the latter), based on information at: + # https://www.freedesktop.org/software/systemd/man/os-release.html + # However, after reviewing several distros /etc/os-release, getting + # the codename is a bit of a mess. It's usually in parentheses in + # VERSION, with some exceptions. + distro = release_info.get('ID', '') + release = release_info.get('VERSION_ID', '') + codename = release_info.get('UBUNTU_CODENAME', release_info.get('VERSION', '')) + match = re.match(r'^[^(]+ \(([^)]+)\)', codename) + if match: + codename = match.group(1).lower() + if not codename and release_info.get('NAME', '') == 'openSUSE Tumbleweed': + codename = 'tumbleweed' + return (distro, release, codename) + +def machine_type(): + """ detect machine type """ + return platform.machine() + + +def write_sources_list(url, codename, filename='ceph.list', mode=0o644): + """add deb repo to /etc/apt/sources.list.d/""" + repo_path = os.path.join('/etc/apt/sources.list.d', filename) + content = 'deb {url} {codename} main\n'.format( + url=url, + codename=codename, + ) + write_file(repo_path, content.encode('utf-8'), mode) + + +def write_sources_list_content(content, filename='ceph.list', mode=0o644): + """add deb repo to /etc/apt/sources.list.d/ from content""" + repo_path = os.path.join('/etc/apt/sources.list.d', filename) + if not isinstance(content, str): + content = content.decode('utf-8') + write_file(repo_path, content.encode('utf-8'), mode) + + +def write_yum_repo(content, filename='ceph.repo'): + """add yum repo file in /etc/yum.repos.d/""" + repo_path = os.path.join('/etc/yum.repos.d', filename) + if not isinstance(content, str): + content = content.decode('utf-8') + write_file(repo_path, content.encode('utf-8')) + + +def set_apt_priority(fqdn, path='/etc/apt/preferences.d/ceph.pref'): + template = "Package: *\nPin: origin {fqdn}\nPin-Priority: 999\n" + content = template.format(fqdn=fqdn) + with open(path, 'w') as fout: + fout.write(content) + + +def set_repo_priority(sections, path='/etc/yum.repos.d/ceph.repo', priority='1'): + Config = configparser.ConfigParser() + Config.read(path) + Config.sections() + for section in sections: + try: + Config.set(section, 'priority', priority) + except configparser.NoSectionError: + # Emperor versions of Ceph used all lowercase sections + # so lets just try again for the section that failed, maybe + # we are able to find it if it is lower + Config.set(section.lower(), 'priority', priority) + + with open(path, 'w') as fout: + Config.write(fout) + + # And now, because ConfigParser is super duper, we need to remove the + # assignments so this looks like it was before + def remove_whitespace_from_assignments(): + separator = "=" + lines = open(path).readlines() + fp = open(path, "w") + for line in lines: + line = line.strip() + if not line.startswith("#") and separator in line: + assignment = line.split(separator, 1) + assignment = tuple(map(str.strip, assignment)) + fp.write("%s%s%s\n" % (assignment[0], separator, assignment[1])) + else: + fp.write(line + "\n") + + remove_whitespace_from_assignments() + + +def write_conf(cluster, conf, overwrite): + """ write cluster configuration to /etc/ceph/{cluster}.conf """ + path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster) + tmp_file = tempfile.NamedTemporaryFile('w', dir='/etc/ceph', delete=False) + err_msg = 'config file %s exists with different content; use --overwrite-conf to overwrite' % path + + if os.path.exists(path): + with open(path, 'r') as f: + old = f.read() + if old != conf and not overwrite: + raise RuntimeError(err_msg) + tmp_file.write(conf) + tmp_file.close() + shutil.move(tmp_file.name, path) + os.chmod(path, 0o644) + return + if os.path.exists('/etc/ceph'): + with open(path, 'w') as f: + f.write(conf) + os.chmod(path, 0o644) + else: + err_msg = '/etc/ceph/ does not exist - could not write config' + raise RuntimeError(err_msg) + + +def write_keyring(path, key, uid=-1, gid=-1): + """ create a keyring file """ + # Note that we *require* to avoid deletion of the temp file + # otherwise we risk not being able to copy the contents from + # one file system to the other, hence the `delete=False` + tmp_file = tempfile.NamedTemporaryFile('wb', delete=False) + tmp_file.write(key) + tmp_file.close() + keyring_dir = os.path.dirname(path) + if not path_exists(keyring_dir): + makedir(keyring_dir, uid, gid) + shutil.move(tmp_file.name, path) + + +def create_mon_path(path, uid=-1, gid=-1): + """create the mon path if it does not exist""" + if not os.path.exists(path): + os.makedirs(path) + os.chown(path, uid, gid); + + +def create_done_path(done_path, uid=-1, gid=-1): + """create a done file to avoid re-doing the mon deployment""" + with open(done_path, 'wb'): + pass + os.chown(done_path, uid, gid); + + +def create_init_path(init_path, uid=-1, gid=-1): + """create the init path if it does not exist""" + if not os.path.exists(init_path): + with open(init_path, 'wb'): + pass + os.chown(init_path, uid, gid); + + +def append_to_file(file_path, contents): + """append contents to file""" + with open(file_path, 'a') as f: + f.write(contents) + +def path_getuid(path): + return os.stat(path).st_uid + +def path_getgid(path): + return os.stat(path).st_gid + +def readline(path): + with open(path) as _file: + return _file.readline().strip('\n') + + +def path_exists(path): + return os.path.exists(path) + + +def get_realpath(path): + return os.path.realpath(path) + + +def listdir(path): + return os.listdir(path) + + +def makedir(path, ignored=None, uid=-1, gid=-1): + ignored = ignored or [] + try: + os.makedirs(path) + except OSError as error: + if error.errno in ignored: + pass + else: + # re-raise the original exception + raise + else: + os.chown(path, uid, gid); + + +def unlink(_file): + os.unlink(_file) + + +def write_monitor_keyring(keyring, monitor_keyring, uid=-1, gid=-1): + """create the monitor keyring file""" + write_file(keyring, monitor_keyring, 0o600, None, uid, gid) + + +def write_file(path, content, mode=0o644, directory=None, uid=-1, gid=-1): + if directory: + if path.startswith("/"): + path = path[1:] + path = os.path.join(directory, path) + if os.path.exists(path): + # Delete file in case we are changing its mode + os.unlink(path) + with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, mode), 'wb') as f: + f.write(content) + os.chown(path, uid, gid) + + +def touch_file(path): + with open(path, 'wb') as f: # noqa + pass + + +def get_file(path): + """ fetch remote file """ + try: + with open(path, 'rb') as f: + return f.read() + except IOError: + pass + + +def object_grep(term, file_object): + for line in file_object.readlines(): + if term in line: + return True + return False + + +def grep(term, file_path): + # A small grep-like function that will search for a word in a file and + # return True if it does and False if it does not. + + # Implemented initially to have a similar behavior as the init system + # detection in Ceph's init scripts:: + + # # detect systemd + # # SYSTEMD=0 + # grep -qs systemd /proc/1/comm && SYSTEMD=1 + + # .. note:: Because we intent to be operating in silent mode, we explicitly + # return ``False`` if the file does not exist. + if not os.path.isfile(file_path): + return False + + with open(file_path) as _file: + return object_grep(term, _file) + + +def shortname(): + """get remote short hostname""" + return socket.gethostname().split('.', 1)[0] + + +def which_service(): + """ locating the `service` executable... """ + # XXX This should get deprecated at some point. For now + # it just bypasses and uses the new helper. + return which('service') + + +def which(executable): + """find the location of an executable""" + locations = ( + '/usr/local/bin', + '/bin', + '/usr/bin', + '/usr/local/sbin', + '/usr/sbin', + '/sbin', + ) + + for location in locations: + executable_path = os.path.join(location, executable) + if os.path.exists(executable_path) and os.path.isfile(executable_path): + return executable_path + + +def make_mon_removed_dir(path, file_name): + """ move old monitor data """ + try: + os.makedirs('/var/lib/ceph/mon-removed') + except OSError as e: + if e.errno != errno.EEXIST: + raise + shutil.move(path, os.path.join('/var/lib/ceph/mon-removed/', file_name)) + + +def safe_mkdir(path, uid=-1, gid=-1): + """ create path if it doesn't exist """ + try: + os.mkdir(path) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + raise + else: + os.chown(path, uid, gid) + + +def safe_makedirs(path, uid=-1, gid=-1): + """ create path recursively if it doesn't exist """ + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + raise + else: + os.chown(path, uid, gid) + + +def zeroing(dev): + """ zeroing last few blocks of device """ + # this kills the crab + # + # sgdisk will wipe out the main copy of the GPT partition + # table (sorry), but it doesn't remove the backup copies, and + # subsequent commands will continue to complain and fail when + # they see those. zeroing the last few blocks of the device + # appears to do the trick. + lba_size = 4096 + size = 33 * lba_size + return True + with open(dev, 'wb') as f: + f.seek(-size, os.SEEK_END) + f.write(size*b'\0') + + +def enable_yum_priority_obsoletes(path="/etc/yum/pluginconf.d/priorities.conf"): + """Configure Yum priorities to include obsoletes""" + config = configparser.ConfigParser() + config.read(path) + config.set('main', 'check_obsoletes', '1') + with open(path, 'w') as fout: + config.write(fout) + + +# remoto magic, needed to execute these functions remotely +if __name__ == '__channelexec__': + for item in channel: # noqa + channel.send(eval(item)) # noqa diff --git a/ceph_deploy/hosts/rhel/__init__.py b/ceph_deploy/hosts/rhel/__init__.py new file mode 100644 index 0000000..a86ad50 --- /dev/null +++ b/ceph_deploy/hosts/rhel/__init__.py @@ -0,0 +1,33 @@ +from . import mon # noqa +from .install import install, mirror_install, repo_install # noqa +from .uninstall import uninstall # noqa +from ceph_deploy.util import pkg_managers +from ceph_deploy.util.system import is_systemd + +# Allow to set some information about this distro +# + +distro = None +release = None +codename = None + +def choose_init(module): + """ + Select a init system + + Returns the name of a init system (upstart, sysvinit ...). + """ + + if module.normalized_release.int_major < 7: + return 'sysvinit' + + if not module.conn.remote_module.path_exists("/usr/lib/systemd/system/ceph.target"): + return 'sysvinit' + + if is_systemd(module.conn): + return 'systemd' + + return 'systemd' + +def get_packager(module): + return pkg_managers.Yum(module) diff --git a/ceph_deploy/hosts/rhel/install.py b/ceph_deploy/hosts/rhel/install.py new file mode 100644 index 0000000..bf44a03 --- /dev/null +++ b/ceph_deploy/hosts/rhel/install.py @@ -0,0 +1,71 @@ +from ceph_deploy.util import templates + + +def install(distro, version_kind, version, adjust_repos, **kw): + packages = kw.get('components', []) + distro.packager.clean() + distro.packager.install(packages) + + +def mirror_install(distro, repo_url, + gpg_url, adjust_repos, extra_installs=True, **kw): + packages = kw.get('components', []) + repo_url = repo_url.strip('/') # Remove trailing slashes + gpgcheck = kw.pop('gpgcheck', 1) + + distro.packager.clean() + + if adjust_repos: + distro.packager.add_repo_gpg_key(gpg_url) + + ceph_repo_content = templates.ceph_repo.format( + repo_url=repo_url, + gpg_url=gpg_url, + gpgcheck=gpgcheck, + ) + + distro.conn.remote_module.write_yum_repo(ceph_repo_content) + + if extra_installs and packages: + distro.packager.install(packages) + + +def repo_install(distro, reponame, baseurl, gpgkey, **kw): + # do we have specific components to install? + # removed them from `kw` so that we don't mess with other defaults + packages = kw.pop('components', []) + + # Get some defaults + name = kw.pop('name', '%s repo' % reponame) + enabled = kw.pop('enabled', 1) + gpgcheck = kw.pop('gpgcheck', 1) + install_ceph = kw.pop('install_ceph', False) + proxy = kw.pop('proxy', '') # will get ignored if empty + _type = 'repo-md' + baseurl = baseurl.strip('/') # Remove trailing slashes + + distro.packager.clean() + + if gpgkey: + distro.packager.add_repo_gpg_key(gpgkey) + + repo_content = templates.custom_repo( + reponame=reponame, + name=name, + baseurl=baseurl, + enabled=enabled, + gpgcheck=gpgcheck, + _type=_type, + gpgkey=gpgkey, + proxy=proxy, + **kw + ) + + distro.conn.remote_module.write_yum_repo( + repo_content, + "%s.repo" % reponame + ) + + # Some custom repos do not need to install ceph + if install_ceph and packages: + distro.packager.install(packages) diff --git a/ceph_deploy/hosts/rhel/mon/__init__.py b/ceph_deploy/hosts/rhel/mon/__init__.py new file mode 100644 index 0000000..f266fb0 --- /dev/null +++ b/ceph_deploy/hosts/rhel/mon/__init__.py @@ -0,0 +1,2 @@ +from ceph_deploy.hosts.common import mon_add as add # noqa +from ceph_deploy.hosts.common import mon_create as create # noqa diff --git a/ceph_deploy/hosts/rhel/uninstall.py b/ceph_deploy/hosts/rhel/uninstall.py new file mode 100644 index 0000000..17ae208 --- /dev/null +++ b/ceph_deploy/hosts/rhel/uninstall.py @@ -0,0 +1,11 @@ +def uninstall(distro, purge=False): + packages = [ + 'ceph', + 'ceph-common', + 'ceph-mon', + 'ceph-osd', + 'ceph-radosgw' + ] + + distro.packager.remove(packages) + distro.packager.clean() diff --git a/ceph_deploy/hosts/suse/__init__.py b/ceph_deploy/hosts/suse/__init__.py new file mode 100644 index 0000000..af66a15 --- /dev/null +++ b/ceph_deploy/hosts/suse/__init__.py @@ -0,0 +1,31 @@ +from . import mon # noqa +from .install import install, mirror_install, repo_install # noqa +from .uninstall import uninstall # noqa +import logging + +from ceph_deploy.util import pkg_managers + +# Allow to set some information about this distro +# + +log = logging.getLogger(__name__) + +distro = None +release = None +codename = None + +def choose_init(module): + """ + Select a init system + + Returns the name of a init system (upstart, sysvinit ...). + """ + init_mapping = { '11' : 'sysvinit', # SLE_11 + '12' : 'systemd', # SLE_12 + '13.1' : 'systemd', # openSUSE_13.1 + } + return init_mapping.get(release, 'systemd') + + +def get_packager(module): + return pkg_managers.Zypper(module) diff --git a/ceph_deploy/hosts/suse/install.py b/ceph_deploy/hosts/suse/install.py new file mode 100644 index 0000000..7a23f38 --- /dev/null +++ b/ceph_deploy/hosts/suse/install.py @@ -0,0 +1,98 @@ +import logging + +from ceph_deploy.util import templates +from ceph_deploy.lib import remoto +from ceph_deploy.hosts.common import map_components + +LOG = logging.getLogger(__name__) + +NON_SPLIT_PACKAGES = ['ceph-osd', 'ceph-mon', 'ceph-mds'] + + +def install(distro, version_kind, version, adjust_repos, **kw): + packages = map_components( + NON_SPLIT_PACKAGES, + kw.get('components', []) + ) + + distro.packager.clean() + if packages: + distro.packager.install(packages) + + +def mirror_install(distro, repo_url, gpg_url, adjust_repos, **kw): + packages = map_components( + NON_SPLIT_PACKAGES, + kw.get('components', []) + ) + repo_url = repo_url.strip('/') # Remove trailing slashes + gpg_url_path = gpg_url.split('file://')[-1] # Remove file if present + gpgcheck = kw.pop('gpgcheck', 1) + + if adjust_repos: + remoto.process.run( + distro.conn, + [ + 'rpm', + '--import', + gpg_url_path, + ] + ) + + ceph_repo_content = templates.zypper_repo.format( + repo_url=repo_url, + gpg_url=gpg_url, + gpgcheck=gpgcheck, + ) + distro.conn.remote_module.write_file( + '/etc/zypp/repos.d/ceph.repo', + ceph_repo_content.encode('utf-8')) + distro.packager.clean() + + if packages: + distro.packager.install(packages) + + +def repo_install(distro, reponame, baseurl, gpgkey, **kw): + packages = map_components( + NON_SPLIT_PACKAGES, + kw.pop('components', []) + ) + # Get some defaults + name = kw.get('name', '%s repo' % reponame) + enabled = kw.get('enabled', 1) + gpgcheck = kw.get('gpgcheck', 1) + install_ceph = kw.pop('install_ceph', False) + proxy = kw.get('proxy') + _type = 'repo-md' + baseurl = baseurl.strip('/') # Remove trailing slashes + + if gpgkey: + remoto.process.run( + distro.conn, + [ + 'rpm', + '--import', + gpgkey, + ] + ) + + repo_content = templates.custom_repo( + reponame=reponame, + name = name, + baseurl = baseurl, + enabled = enabled, + gpgcheck = gpgcheck, + _type = _type, + gpgkey = gpgkey, + proxy = proxy, + ) + + distro.conn.remote_module.write_file( + '/etc/zypp/repos.d/%s' % (reponame), + repo_content.encode('utf-8') + ) + + # Some custom repos do not need to install ceph + if install_ceph and packages: + distro.packager.install(packages) diff --git a/ceph_deploy/hosts/suse/mon/__init__.py b/ceph_deploy/hosts/suse/mon/__init__.py new file mode 100644 index 0000000..f266fb0 --- /dev/null +++ b/ceph_deploy/hosts/suse/mon/__init__.py @@ -0,0 +1,2 @@ +from ceph_deploy.hosts.common import mon_add as add # noqa +from ceph_deploy.hosts.common import mon_create as create # noqa diff --git a/ceph_deploy/hosts/suse/uninstall.py b/ceph_deploy/hosts/suse/uninstall.py new file mode 100644 index 0000000..163d891 --- /dev/null +++ b/ceph_deploy/hosts/suse/uninstall.py @@ -0,0 +1,10 @@ +def uninstall(distro, purge=False): + packages = [ + 'ceph', + 'ceph-common', + 'libcephfs1', + 'librados2', + 'librbd1', + 'ceph-radosgw', + ] + distro.packager.remove(packages) diff --git a/ceph_deploy/hosts/util.py b/ceph_deploy/hosts/util.py new file mode 100644 index 0000000..b943609 --- /dev/null +++ b/ceph_deploy/hosts/util.py @@ -0,0 +1,31 @@ +""" +A utility module that can host utilities that will be used by more than +one type of distro and not common to all of them +""" +from ceph_deploy.util import pkg_managers + + +def install_yum_priorities(distro, _yum=None): + """ + EPEL started packaging Ceph so we need to make sure that the ceph.repo we + install has a higher priority than the EPEL repo so that when installing + Ceph it will come from the repo file we create. + + The name of the package changed back and forth (!) since CentOS 4: + + From the CentOS wiki:: + + Note: This plugin has carried at least two differing names over time. + It is named yum-priorities on CentOS-5 but was named + yum-plugin-priorities on CentOS-4. CentOS-6 has reverted to + yum-plugin-priorities. + + :params _yum: Used for testing, so we can inject a fake yum + """ + yum = _yum or pkg_managers.yum + package_name = 'yum-plugin-priorities' + + if distro.normalized_name == 'centos': + if distro.release[0] != '6': + package_name = 'yum-priorities' + yum(distro.conn, package_name) diff --git a/ceph_deploy/install.py b/ceph_deploy/install.py new file mode 100644 index 0000000..12c5f79 --- /dev/null +++ b/ceph_deploy/install.py @@ -0,0 +1,670 @@ +import argparse +import logging +import os + +from ceph_deploy import hosts +from ceph_deploy.cliutil import priority +from ceph_deploy.lib import remoto +from ceph_deploy.util.constants import default_components +from ceph_deploy.util.paths import gpg + +LOG = logging.getLogger(__name__) + + +def sanitize_args(args): + """ + args may need a bunch of logic to set proper defaults that argparse is + not well suited for. + """ + if args.release is None: + args.release = 'nautilus' + args.default_release = True + + # XXX This whole dance is because --stable is getting deprecated + if args.stable is not None: + LOG.warning('the --stable flag is deprecated, use --release instead') + args.release = args.stable + # XXX Tango ends here. + + return args + + +def detect_components(args, distro): + """ + Since the package split, now there are various different Ceph components to + install like: + + * ceph + * ceph-mon + * ceph-mgr + * ceph-osd + * ceph-mds + + This helper function should parse the args that may contain specifics about + these flags and return the default if none are passed in (which is, install + everything) + """ + # the flag that prevents all logic here is the `--repo` flag which is used + # when no packages should be installed, just the repo files, so check for + # that here and return an empty list (which is equivalent to say 'no + # packages should be installed') + if args.repo: + return [] + + flags = { + 'install_osd': 'ceph-osd', + 'install_rgw': 'ceph-radosgw', + 'install_mds': 'ceph-mds', + 'install_mon': 'ceph-mon', + 'install_mgr': 'ceph-mgr', + 'install_common': 'ceph-common', + 'install_tests': 'ceph-test', + } + + if distro.is_rpm: + defaults = default_components.rpm + elif distro.is_pkgtarxz: + # archlinux doesn't have components! + flags = { + 'install_osd': 'ceph', + 'install_rgw': 'ceph', + 'install_mds': 'ceph', + 'install_mon': 'ceph', + 'install_mgr': 'ceph', + 'install_common': 'ceph', + 'install_tests': 'ceph', + } + defaults = default_components.pkgtarxz + else: + defaults = default_components.deb + # different naming convention for deb than rpm for radosgw + flags['install_rgw'] = 'radosgw' + + if args.install_all: + return defaults + else: + components = [] + for k, v in flags.items(): + if getattr(args, k, False): + components.append(v) + # if we have some components selected from flags then return that, + # otherwise return defaults because no flags and no `--repo` means we + # should get all of them by default + return components or defaults + + +def install(args): + args = sanitize_args(args) + + if args.repo: + return install_repo(args) + + gpgcheck = 0 if args.nogpgcheck else 1 + + if args.version_kind == 'stable': + version = args.release + else: + version = getattr(args, args.version_kind) + + version_str = args.version_kind + + if version: + version_str += ' version {version}'.format(version=version) + LOG.debug( + 'Installing %s on cluster %s hosts %s', + version_str, + args.cluster, + ' '.join(args.host), + ) + + for hostname in args.host: + LOG.debug('Detecting platform for host %s ...', hostname) + distro = hosts.get( + hostname, + username=args.username, + # XXX this should get removed once ceph packages are split for + # upstream. If default_release is True, it means that the user is + # trying to install on a RHEL machine and should expect to get RHEL + # packages. Otherwise, it will need to specify either a specific + # version, or repo, or a development branch. Other distro users + # should not see any differences. + use_rhceph=args.default_release, + ) + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + + components = detect_components(args, distro) + if distro.init == 'sysvinit' and args.cluster != 'ceph': + LOG.error('refusing to install on host: %s, with custom cluster name: %s' % ( + hostname, + args.cluster, + ) + ) + LOG.error('custom cluster names are not supported on sysvinit hosts') + continue + + rlogger = logging.getLogger(hostname) + rlogger.info('installing Ceph on %s' % hostname) + + cd_conf = getattr(args, 'cd_conf', None) + + # custom repo arguments + repo_url = os.environ.get('CEPH_DEPLOY_REPO_URL') or args.repo_url + gpg_url = os.environ.get('CEPH_DEPLOY_GPG_URL') or args.gpg_url + gpg_fallback = gpg.url('release') + + if gpg_url is None and repo_url: + LOG.warning('--gpg-url was not used, will fallback') + LOG.warning('using GPG fallback: %s', gpg_fallback) + gpg_url = gpg_fallback + + if args.local_mirror: + if args.username: + hostname = "%s@%s" % (args.username, hostname) + remoto.rsync(hostname, args.local_mirror, '/opt/ceph-deploy/repo', distro.conn.logger, sudo=True) + repo_url = 'file:///opt/ceph-deploy/repo' + gpg_url = 'file:///opt/ceph-deploy/repo/release.asc' + + if repo_url: # triggers using a custom repository + # the user used a custom repo url, this should override anything + # we can detect from the configuration, so warn about it + if cd_conf: + if cd_conf.get_default_repo(): + rlogger.warning('a default repo was found but it was \ + overridden on the CLI') + if args.release in cd_conf.get_repos(): + rlogger.warning('a custom repo was found but it was \ + overridden on the CLI') + + rlogger.info('using custom repository location: %s', repo_url) + distro.mirror_install( + distro, + repo_url, + gpg_url, + args.adjust_repos, + components=components, + gpgcheck=gpgcheck, + args=args + ) + + # Detect and install custom repos here if needed + elif should_use_custom_repo(args, cd_conf, repo_url): + LOG.info('detected valid custom repositories from config file') + custom_repo(distro, args, cd_conf, rlogger) + + else: # otherwise a normal installation + distro.install( + distro, + args.version_kind, + version, + args.adjust_repos, + components=components, + gpgcheck = gpgcheck, + args=args + ) + + # Check the ceph version we just installed + hosts.common.ceph_version(distro.conn) + distro.conn.exit() + + +def should_use_custom_repo(args, cd_conf, repo_url): + """ + A boolean to determine the logic needed to proceed with a custom repo + installation instead of cramming everything nect to the logic operator. + """ + if repo_url: + # repo_url signals a CLI override, return False immediately + return False + if cd_conf: + if cd_conf.has_repos: + has_valid_release = args.release in cd_conf.get_repos() + has_default_repo = cd_conf.get_default_repo() + if has_valid_release or has_default_repo: + return True + return False + + +def custom_repo(distro, args, cd_conf, rlogger, install_ceph=None): + """ + A custom repo install helper that will go through config checks to retrieve + repos (and any extra repos defined) and install those + + ``cd_conf`` is the object built from argparse that holds the flags and + information needed to determine what metadata from the configuration to be + used. + """ + default_repo = cd_conf.get_default_repo() + components = detect_components(args, distro) + if args.release in cd_conf.get_repos(): + LOG.info('will use repository from conf: %s' % args.release) + default_repo = args.release + elif default_repo: + LOG.info('will use default repository: %s' % default_repo) + + # At this point we know there is a cd_conf and that it has custom + # repos make sure we were able to detect and actual repo + if not default_repo: + LOG.warning('a ceph-deploy config was found with repos \ + but could not default to one') + else: + options = dict(cd_conf.items(default_repo)) + options['install_ceph'] = False if install_ceph is False else True + extra_repos = cd_conf.get_list(default_repo, 'extra-repos') + rlogger.info('adding custom repository file') + try: + distro.repo_install( + distro, + default_repo, + options.pop('baseurl'), + options.pop('gpgkey'), + components=components, + **options + ) + except KeyError as err: + raise RuntimeError('missing required key: %s in config section: %s' % (err, default_repo)) + + for xrepo in extra_repos: + rlogger.info('adding extra repo file: %s.repo' % xrepo) + options = dict(cd_conf.items(xrepo)) + try: + distro.repo_install( + distro, + xrepo, + options.pop('baseurl'), + options.pop('gpgkey'), + components=components, + **options + ) + except KeyError as err: + raise RuntimeError('missing required key: %s in config section: %s' % (err, xrepo)) + + +def install_repo(args): + """ + For a user that only wants to install the repository only (and avoid + installing Ceph and its dependencies). + """ + cd_conf = getattr(args, 'cd_conf', None) + + for hostname in args.host: + LOG.debug('Detecting platform for host %s ...', hostname) + distro = hosts.get( + hostname, + username=args.username, + # XXX this should get removed once Ceph packages are split for + # upstream. If default_release is True, it means that the user is + # trying to install on a RHEL machine and should expect to get RHEL + # packages. Otherwise, it will need to specify either a specific + # version, or repo, or a development branch. Other distro users should + # not see any differences. + use_rhceph=args.default_release, + ) + rlogger = logging.getLogger(hostname) + + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + + custom_repo(distro, args, cd_conf, rlogger, install_ceph=False) + +def remove(args, purge): + LOG.info('note that some dependencies *will not* be removed because they can cause issues with qemu-kvm') + LOG.info('like: librbd1 and librados2') + remove_action = 'Uninstalling' + if purge: + remove_action = 'Purging' + LOG.debug( + '%s on cluster %s hosts %s', + remove_action, + args.cluster, + ' '.join(args.host), + ) + + for hostname in args.host: + LOG.debug('Detecting platform for host %s ...', hostname) + + distro = hosts.get( + hostname, + username=args.username, + use_rhceph=True) + LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) + rlogger = logging.getLogger(hostname) + rlogger.info('%s Ceph on %s' % (remove_action, hostname)) + distro.uninstall(distro, purge=purge) + distro.conn.exit() + +def uninstall(args): + remove(args, False) + +def purge(args): + remove(args, True) + +def purgedata(args): + LOG.debug( + 'Purging data from cluster %s hosts %s', + args.cluster, + ' '.join(args.host), + ) + + installed_hosts = [] + for hostname in args.host: + distro = hosts.get(hostname, username=args.username) + ceph_is_installed = distro.conn.remote_module.which('ceph') + if ceph_is_installed: + installed_hosts.append(hostname) + distro.conn.exit() + + if installed_hosts: + LOG.error("Ceph is still installed on: %s", installed_hosts) + raise RuntimeError("refusing to purge data while Ceph is still installed") + + for hostname in args.host: + distro = hosts.get(hostname, username=args.username) + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + + rlogger = logging.getLogger(hostname) + rlogger.info('purging data on %s' % hostname) + + # Try to remove the contents of /var/lib/ceph first, don't worry + # about errors here, we deal with them later on + remoto.process.check( + distro.conn, + [ + 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph', + ] + ) + + # If we failed in the previous call, then we probably have OSDs + # still mounted, so we unmount them here + if distro.conn.remote_module.path_exists('/var/lib/ceph'): + rlogger.warning( + 'OSDs may still be mounted, trying to unmount them' + ) + remoto.process.run( + distro.conn, + [ + 'find', '/var/lib/ceph', + '-mindepth', '1', + '-maxdepth', '2', + '-type', 'd', + '-exec', 'umount', '{}', ';', + ] + ) + + # And now we try again to remove the contents, since OSDs should be + # unmounted, but this time we do check for errors + remoto.process.run( + distro.conn, + [ + 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph', + ] + ) + + remoto.process.run( + distro.conn, + [ + 'rm', '-rf', '--one-file-system', '--', '/etc/ceph/', + ] + ) + + distro.conn.exit() + + +class StoreVersion(argparse.Action): + """ + Like ``"store"`` but also remember which one of the exclusive + options was set. + + There are three kinds of versions: stable, testing and dev. + This sets ``version_kind`` to be the right one of the above. + + This kludge essentially lets us differentiate explicitly set + values from defaults. + """ + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + if self.dest == 'release': + self.dest = 'stable' + namespace.version_kind = self.dest + + +@priority(20) +def make(parser): + """ + Install Ceph packages on remote hosts. + """ + + version = parser.add_mutually_exclusive_group() + + # XXX deprecated in favor of release + version.add_argument( + '--stable', + nargs='?', + action=StoreVersion, + metavar='CODENAME', + help='[DEPRECATED] install a release known as CODENAME\ + (done by default) (default: %(default)s)', + ) + + version.add_argument( + '--release', + nargs='?', + action=StoreVersion, + metavar='CODENAME', + help='install a release known as CODENAME\ + (done by default) (default: %(default)s)', + ) + + version.add_argument( + '--testing', + nargs=0, + action=StoreVersion, + help='install the latest development release', + ) + + version.add_argument( + '--dev', + nargs='?', + action=StoreVersion, + const='master', + metavar='BRANCH_OR_TAG', + help='install a bleeding edge build from Git branch\ + or tag (default: %(default)s)', + ) + parser.add_argument( + '--dev-commit', + nargs='?', + action=StoreVersion, + metavar='COMMIT', + help='install a bleeding edge build from Git commit (defaults to master branch)', + ) + + version.set_defaults( + stable=None, # XXX deprecated in favor of release + release=None, # Set the default release in sanitize_args() + dev='master', + version_kind='stable', + ) + + parser.add_argument( + '--mon', + dest='install_mon', + action='store_true', + help='install the mon component only', + ) + + parser.add_argument( + '--mgr', + dest='install_mgr', + action='store_true', + help='install the mgr component only', + ) + + parser.add_argument( + '--mds', + dest='install_mds', + action='store_true', + help='install the mds component only', + ) + + parser.add_argument( + '--rgw', + dest='install_rgw', + action='store_true', + help='install the rgw component only', + ) + + parser.add_argument( + '--osd', + dest='install_osd', + action='store_true', + help='install the osd component only', + ) + + parser.add_argument( + '--tests', + dest='install_tests', + action='store_true', + help='install the testing components', + ) + + parser.add_argument( + '--cli', '--common', + dest='install_common', + action='store_true', + help='install the common component only', + ) + + parser.add_argument( + '--all', + dest='install_all', + action='store_true', + help='install all Ceph components (mon, osd, mds, rgw) except tests. This is the default', + ) + + repo = parser.add_mutually_exclusive_group() + + repo.add_argument( + '--adjust-repos', + dest='adjust_repos', + action='store_true', + help='install packages modifying source repos', + ) + + repo.add_argument( + '--no-adjust-repos', + dest='adjust_repos', + action='store_false', + help='install packages without modifying source repos', + ) + + repo.add_argument( + '--repo', + action='store_true', + help='install repo files only (skips package installation)', + ) + + repo.set_defaults( + adjust_repos=True, + ) + + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to install on', + ) + + parser.add_argument( + '--local-mirror', + nargs='?', + const='PATH', + default=None, + help='Fetch packages and push them to hosts for a local repo mirror', + ) + + parser.add_argument( + '--repo-url', + nargs='?', + dest='repo_url', + help='specify a repo URL that mirrors/contains Ceph packages', + ) + + parser.add_argument( + '--gpg-url', + nargs='?', + dest='gpg_url', + help='specify a GPG key URL to be used with custom repos\ + (defaults to ceph.com)' + ) + + parser.add_argument( + '--nogpgcheck', + action='store_true', + help='install packages without gpgcheck', + ) + + parser.set_defaults( + func=install, + ) + + +@priority(80) +def make_uninstall(parser): + """ + Remove Ceph packages from remote hosts. + """ + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to uninstall Ceph from', + ) + parser.set_defaults( + func=uninstall, + ) + + +@priority(80) +def make_purge(parser): + """ + Remove Ceph packages from remote hosts and purge all data. + """ + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to purge Ceph from', + ) + parser.set_defaults( + func=purge, + ) + + +@priority(80) +def make_purge_data(parser): + """ + Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph + """ + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to purge Ceph data from', + ) + parser.set_defaults( + func=purgedata, + ) diff --git a/ceph_deploy/lib/__init__.py b/ceph_deploy/lib/__init__.py new file mode 100644 index 0000000..fefb992 --- /dev/null +++ b/ceph_deploy/lib/__init__.py @@ -0,0 +1,27 @@ +""" +This module is meant for vendorizing Python libraries. Most libraries will need +to have some ``sys.path`` alterations done unless they are doing relative +imports. + +Do **not** add anything to this module that does not represent a vendorized +library. + +Vendored libraries should go into the ``vendor`` directory and imported from +there. This is so we allow libraries that are installed normally to be imported +if the vendored module is not available. + +The import dance here is done so that all other imports throught ceph-deploy +are kept the same regardless of where the module comes from. + +The expected way to import remoto would look like this:: + + from ceph_deploy.lib import remoto + +""" + +try: + # vendored + from .vendor import remoto +except ImportError: + # normally installed + import remoto # noqa diff --git a/ceph_deploy/lib/vendor/__init__.py b/ceph_deploy/lib/vendor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ceph_deploy/mds.py b/ceph_deploy/mds.py new file mode 100644 index 0000000..58f3eb8 --- /dev/null +++ b/ceph_deploy/mds.py @@ -0,0 +1,226 @@ +import logging +import os + +from ceph_deploy import conf +from ceph_deploy import exc +from ceph_deploy import hosts +from ceph_deploy.util import system +from ceph_deploy.lib import remoto +from ceph_deploy.cliutil import priority + + +LOG = logging.getLogger(__name__) + + +def get_bootstrap_mds_key(cluster): + """ + Read the bootstrap-mds key for `cluster`. + """ + path = '{cluster}.bootstrap-mds.keyring'.format(cluster=cluster) + try: + with open(path, 'rb') as f: + return f.read() + except IOError: + raise RuntimeError('bootstrap-mds keyring not found; run \'gatherkeys\'') + + +def create_mds(distro, name, cluster, init): + conn = distro.conn + + path = '/var/lib/ceph/mds/{cluster}-{name}'.format( + cluster=cluster, + name=name + ) + + conn.remote_module.safe_mkdir(path) + + bootstrap_keyring = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( + cluster=cluster + ) + + keypath = os.path.join(path, 'keyring') + + stdout, stderr, returncode = remoto.process.check( + conn, + [ + 'ceph', + '--cluster', cluster, + '--name', 'client.bootstrap-mds', + '--keyring', bootstrap_keyring, + 'auth', 'get-or-create', 'mds.{name}'.format(name=name), + 'osd', 'allow rwx', + 'mds', 'allow', + 'mon', 'allow profile mds', + '-o', + os.path.join(keypath), + ] + ) + if returncode > 0: + for line in stderr: + conn.logger.error(line) + for line in stdout: + # yes stdout as err because this is an error + conn.logger.error(line) + conn.logger.error('exit code from command was: %s' % returncode) + raise RuntimeError('could not create mds') + + conn.remote_module.touch_file(os.path.join(path, 'done')) + conn.remote_module.touch_file(os.path.join(path, init)) + + if init == 'upstart': + remoto.process.run( + conn, + [ + 'initctl', + 'emit', + 'ceph-mds', + 'cluster={cluster}'.format(cluster=cluster), + 'id={name}'.format(name=name), + ], + timeout=7 + ) + elif init == 'sysvinit': + remoto.process.run( + conn, + [ + 'service', + 'ceph', + 'start', + 'mds.{name}'.format(name=name), + ], + timeout=7 + ) + if distro.is_el: + system.enable_service(distro.conn) + elif init == 'systemd': + remoto.process.run( + conn, + [ + 'systemctl', + 'enable', + 'ceph-mds@{name}'.format(name=name), + ], + timeout=7 + ) + remoto.process.run( + conn, + [ + 'systemctl', + 'start', + 'ceph-mds@{name}'.format(name=name), + ], + timeout=7 + ) + remoto.process.run( + conn, + [ + 'systemctl', + 'enable', + 'ceph.target', + ], + timeout=7 + ) + + + +def mds_create(args): + conf_data = conf.ceph.load_raw(args) + LOG.debug( + 'Deploying mds, cluster %s hosts %s', + args.cluster, + ' '.join(':'.join(x or '' for x in t) for t in args.mds), + ) + + key = get_bootstrap_mds_key(cluster=args.cluster) + + bootstrapped = set() + errors = 0 + failed_on_rhel = False + + for hostname, name in args.mds: + try: + distro = None + distro = hosts.get(hostname, username=args.username) + rlogger = distro.conn.logger + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + + LOG.debug('remote host will use %s', distro.init) + + if hostname not in bootstrapped: + bootstrapped.add(hostname) + LOG.debug('deploying mds bootstrap to %s', hostname) + distro.conn.remote_module.write_conf( + args.cluster, + conf_data, + args.overwrite_conf, + ) + + path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format( + cluster=args.cluster, + ) + + if not distro.conn.remote_module.path_exists(path): + rlogger.warning('mds keyring does not exist yet, creating one') + distro.conn.remote_module.write_keyring(path, key) + + create_mds(distro, name, args.cluster, distro.init) + distro.conn.exit() + except RuntimeError as e: + if distro and distro.normalized_name == 'redhat': + LOG.error('this feature may not yet available for %s %s' % (distro.name, distro.release)) + failed_on_rhel = True + LOG.error(e) + errors += 1 + + if errors: + if failed_on_rhel: + # because users only read the last few lines :( + LOG.error( + 'RHEL RHCS systems do not have the ability to deploy MDS yet' + ) + + raise exc.GenericError('Failed to create %d MDSs' % errors) + + +def mds(args): + if args.subcommand == 'create': + mds_create(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + + +def colon_separated(s): + host = s + name = s + if s.count(':') == 1: + (host, name) = s.split(':') + return (host, name) + + +@priority(30) +def make(parser): + """ + Ceph MDS daemon management + """ + mds_parser = parser.add_subparsers(dest='subcommand') + mds_parser.required = True + + mds_create = mds_parser.add_parser( + 'create', + help='Deploy Ceph MDS on remote host(s)' + ) + mds_create.add_argument( + 'mds', + metavar='HOST[:NAME]', + nargs='+', + type=colon_separated, + help='host (and optionally the daemon name) to deploy on', + ) + parser.set_defaults( + func=mds, + ) diff --git a/ceph_deploy/mgr.py b/ceph_deploy/mgr.py new file mode 100644 index 0000000..6d5ad13 --- /dev/null +++ b/ceph_deploy/mgr.py @@ -0,0 +1,226 @@ +import logging +import os + +from ceph_deploy import conf +from ceph_deploy import exc +from ceph_deploy import hosts +from ceph_deploy.util import system +from ceph_deploy.lib import remoto +from ceph_deploy.cliutil import priority + + +LOG = logging.getLogger(__name__) + + +def get_bootstrap_mgr_key(cluster): + """ + Read the bootstrap-mgr key for `cluster`. + """ + path = '{cluster}.bootstrap-mgr.keyring'.format(cluster=cluster) + try: + with open(path, 'rb') as f: + return f.read() + except IOError: + raise RuntimeError('bootstrap-mgr keyring not found; run \'gatherkeys\'') + + +def create_mgr(distro, name, cluster, init): + conn = distro.conn + + path = '/var/lib/ceph/mgr/{cluster}-{name}'.format( + cluster=cluster, + name=name + ) + + conn.remote_module.safe_makedirs(path) + + bootstrap_keyring = '/var/lib/ceph/bootstrap-mgr/{cluster}.keyring'.format( + cluster=cluster + ) + + keypath = os.path.join(path, 'keyring') + + stdout, stderr, returncode = remoto.process.check( + conn, + [ + 'ceph', + '--cluster', cluster, + '--name', 'client.bootstrap-mgr', + '--keyring', bootstrap_keyring, + 'auth', 'get-or-create', 'mgr.{name}'.format(name=name), + 'mon', 'allow profile mgr', + 'osd', 'allow *', + 'mds', 'allow *', + '-o', + os.path.join(keypath), + ] + ) + if returncode > 0: + for line in stderr: + conn.logger.error(line) + for line in stdout: + # yes stdout as err because this is an error + conn.logger.error(line) + conn.logger.error('exit code from command was: %s' % returncode) + raise RuntimeError('could not create mgr') + + conn.remote_module.touch_file(os.path.join(path, 'done')) + conn.remote_module.touch_file(os.path.join(path, init)) + + if init == 'upstart': + remoto.process.run( + conn, + [ + 'initctl', + 'emit', + 'ceph-mgr', + 'cluster={cluster}'.format(cluster=cluster), + 'id={name}'.format(name=name), + ], + timeout=7 + ) + elif init == 'sysvinit': + remoto.process.run( + conn, + [ + 'service', + 'ceph', + 'start', + 'mgr.{name}'.format(name=name), + ], + timeout=7 + ) + if distro.is_el: + system.enable_service(distro.conn) + elif init == 'systemd': + remoto.process.run( + conn, + [ + 'systemctl', + 'enable', + 'ceph-mgr@{name}'.format(name=name), + ], + timeout=7 + ) + remoto.process.run( + conn, + [ + 'systemctl', + 'start', + 'ceph-mgr@{name}'.format(name=name), + ], + timeout=7 + ) + remoto.process.run( + conn, + [ + 'systemctl', + 'enable', + 'ceph.target', + ], + timeout=7 + ) + + + +def mgr_create(args): + conf_data = conf.ceph.load_raw(args) + LOG.debug( + 'Deploying mgr, cluster %s hosts %s', + args.cluster, + ' '.join(':'.join(x or '' for x in t) for t in args.mgr), + ) + + key = get_bootstrap_mgr_key(cluster=args.cluster) + + bootstrapped = set() + errors = 0 + failed_on_rhel = False + + for hostname, name in args.mgr: + try: + distro = None + distro = hosts.get(hostname, username=args.username) + rlogger = distro.conn.logger + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + + LOG.debug('remote host will use %s', distro.init) + + if hostname not in bootstrapped: + bootstrapped.add(hostname) + LOG.debug('deploying mgr bootstrap to %s', hostname) + distro.conn.remote_module.write_conf( + args.cluster, + conf_data, + args.overwrite_conf, + ) + + path = '/var/lib/ceph/bootstrap-mgr/{cluster}.keyring'.format( + cluster=args.cluster, + ) + + if not distro.conn.remote_module.path_exists(path): + rlogger.warning('mgr keyring does not exist yet, creating one') + distro.conn.remote_module.write_keyring(path, key) + + create_mgr(distro, name, args.cluster, distro.init) + distro.conn.exit() + except RuntimeError as e: + if distro and distro.normalized_name == 'redhat': + LOG.error('this feature may not yet available for %s %s' % (distro.name, distro.release)) + failed_on_rhel = True + LOG.error(e) + errors += 1 + + if errors: + if failed_on_rhel: + # because users only read the last few lines :( + LOG.error( + 'RHEL RHCS systems do not have the ability to deploy MGR yet' + ) + + raise exc.GenericError('Failed to create %d MGRs' % errors) + + +def mgr(args): + if args.subcommand == 'create': + mgr_create(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + + +def colon_separated(s): + host = s + name = s + if s.count(':') == 1: + (host, name) = s.split(':') + return (host, name) + + +@priority(30) +def make(parser): + """ + Ceph MGR daemon management + """ + mgr_parser = parser.add_subparsers(dest='subcommand') + mgr_parser.required = True + + mgr_create = mgr_parser.add_parser( + 'create', + help='Deploy Ceph MGR on remote host(s)' + ) + mgr_create.add_argument( + 'mgr', + metavar='HOST[:NAME]', + nargs='+', + type=colon_separated, + help='host (and optionally the daemon name) to deploy on', + ) + parser.set_defaults( + func=mgr, + ) diff --git a/ceph_deploy/misc.py b/ceph_deploy/misc.py new file mode 100644 index 0000000..1620e1f --- /dev/null +++ b/ceph_deploy/misc.py @@ -0,0 +1,22 @@ + +def mon_hosts(mons): + """ + Iterate through list of MON hosts, return tuples of (name, host). + """ + for m in mons: + if m.count(':'): + (name, host) = m.split(':') + else: + name = m + host = m + if name.count('.') > 0: + name = name.split('.')[0] + yield (name, host) + +def remote_shortname(socket): + """ + Obtains remote hostname of the socket and cuts off the domain part + of its FQDN. + """ + return socket.gethostname().split('.', 1)[0] + diff --git a/ceph_deploy/mon.py b/ceph_deploy/mon.py new file mode 100644 index 0000000..12f4a72 --- /dev/null +++ b/ceph_deploy/mon.py @@ -0,0 +1,596 @@ +import json +import logging +import re +import os +import time + +from ceph_deploy import conf, exc, admin +from ceph_deploy.cliutil import priority +from ceph_deploy.util.help_formatters import ToggleRawTextHelpFormatter +from ceph_deploy.util import paths, net, files, packages, system +from ceph_deploy.lib import remoto +from ceph_deploy.new import new_mon_keyring +from ceph_deploy import hosts +from ceph_deploy.misc import mon_hosts +from ceph_deploy import gatherkeys + + +LOG = logging.getLogger(__name__) + + +def mon_status_check(conn, logger, hostname, args): + """ + A direct check for JSON output on the monitor status. + + For newer versions of Ceph (dumpling and newer) a new mon_status command + was added ( `ceph daemon mon mon_status` ) and should be revisited if the + output changes as this check depends on that availability. + + """ + asok_path = paths.mon.asok(args.cluster, hostname) + + out, err, code = remoto.process.check( + conn, + [ + 'ceph', + '--cluster={cluster}'.format(cluster=args.cluster), + '--admin-daemon', + asok_path, + 'mon_status', + ], + ) + + for line in err: + logger.error(line) + + try: + return json.loads(b''.join(out).decode('utf-8')) + except ValueError: + return {} + + +def catch_mon_errors(conn, logger, hostname, cfg, args): + """ + Make sure we are able to catch up common mishaps with monitors + and use that state of a monitor to determine what is missing + and warn apropriately about it. + """ + monmap = mon_status_check(conn, logger, hostname, args).get('monmap', {}) + mon_initial_members = get_mon_initial_members(args, _cfg=cfg) + public_addr = cfg.safe_get('global', 'public_addr') + public_network = cfg.safe_get('global', 'public_network') + mon_in_monmap = [ + mon.get('name') + for mon in monmap.get('mons', [{}]) + if mon.get('name') == hostname + ] + if mon_initial_members is None or not hostname in mon_initial_members: + logger.warning('%s is not defined in `mon initial members`', hostname) + if not mon_in_monmap: + logger.warning('monitor %s does not exist in monmap', hostname) + if not public_addr and not public_network: + logger.warning('neither `public_addr` nor `public_network` keys are defined for monitors') + logger.warning('monitors may not be able to form quorum') + + +def mon_status(conn, logger, hostname, args, silent=False): + """ + run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide + not only the output, but be able to return a boolean status of what is + going on. + ``False`` represents a monitor that is not doing OK even if it is up and + running, while ``True`` would mean the monitor is up and running correctly. + """ + mon = 'mon.%s' % hostname + + try: + out = mon_status_check(conn, logger, hostname, args) + if not out: + logger.warning('monitor: %s, might not be running yet' % mon) + return False + + if not silent: + logger.debug('*'*80) + logger.debug('status for monitor: %s' % mon) + for line in json.dumps(out, indent=2, sort_keys=True).split('\n'): + logger.debug(line) + logger.debug('*'*80) + if out['rank'] >= 0: + logger.info('monitor: %s is running' % mon) + return True + if out['rank'] == -1 and out['state']: + logger.info('monitor: %s is currently at the state of %s' % (mon, out['state'])) + return True + logger.info('monitor: %s is not running' % mon) + return False + except RuntimeError: + logger.info('monitor: %s is not running' % mon) + return False + + +def keyring_parser(path): + """ + This is a very, very, dumb parser that will look for `[entity]` sections + and return a list of those sections. It is not possible to parse this with + ConfigParser even though it is almost the same thing. + + Since this is only used to spit out warnings, it is OK to just be naive + about the parsing. + """ + sections = [] + with open(path) as keyring: + lines = keyring.readlines() + for line in lines: + line = line.strip('\n') + if line.startswith('[') and line.endswith(']'): + sections.append(line.strip('[]')) + return sections + + +def concatenate_keyrings(args): + """ + A helper to collect all keyrings into a single blob that will be + used to inject it to mons with ``--mkfs`` on remote nodes + + We require all keyring files to be concatenated to be in a directory + to end with ``.keyring``. + """ + keyring_path = os.path.abspath(args.keyrings) + LOG.info('concatenating keyrings from %s' % keyring_path) + LOG.info('to seed remote monitors') + + keyrings = [ + os.path.join(keyring_path, f) for f in os.listdir(keyring_path) + if os.path.isfile(os.path.join(keyring_path, f)) and f.endswith('.keyring') + ] + + contents = [] + seen_sections = {} + + if not keyrings: + path_from_arg = os.path.abspath(args.keyrings) + raise RuntimeError('could not find any keyrings in %s' % path_from_arg) + + for keyring in keyrings: + path = os.path.abspath(keyring) + + for section in keyring_parser(path): + if not seen_sections.get(section): + seen_sections[section] = path + LOG.info('adding entity "%s" from keyring %s' % (section, path)) + with open(path) as k: + contents.append(k.read()) + else: + LOG.warning('will not add keyring: %s' % path) + LOG.warning('entity "%s" from keyring %s is a duplicate' % (section, path)) + LOG.warning('already present in keyring: %s' % seen_sections[section]) + + return ''.join(contents) + + +def mon_add(args): + cfg = conf.ceph.load(args) + + # args.mon is a list with only one entry + mon_host = args.mon[0] + + try: + with open('{cluster}.mon.keyring'.format(cluster=args.cluster), + 'rb') as f: + monitor_keyring = f.read() + except IOError: + raise RuntimeError( + 'mon keyring not found; run \'new\' to create a new cluster' + ) + + LOG.info('ensuring configuration of new mon host: %s', mon_host) + args.client = args.mon + admin.admin(args) + LOG.debug( + 'Adding mon to cluster %s, host %s', + args.cluster, + mon_host, + ) + + mon_section = 'mon.%s' % mon_host + cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr') + + if args.address: + LOG.debug('using mon address via --address %s' % args.address) + mon_ip = args.address + elif cfg_mon_addr: + LOG.debug('using mon address via configuration: %s' % cfg_mon_addr) + mon_ip = cfg_mon_addr + else: + mon_ip = net.get_nonlocal_ip(mon_host) + LOG.debug('using mon address by resolving host: %s' % mon_ip) + + try: + LOG.debug('detecting platform for host %s ...', mon_host) + distro = hosts.get( + mon_host, + username=args.username, + callbacks=[packages.ceph_is_installed] + ) + LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) + rlogger = logging.getLogger(mon_host) + + # ensure remote hostname is good to go + hostname_is_compatible(distro.conn, rlogger, mon_host) + rlogger.debug('adding mon to %s', mon_host) + args.address = mon_ip + distro.mon.add(distro, args, monitor_keyring) + + # tell me the status of the deployed mon + time.sleep(2) # give some room to start + catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args) + mon_status(distro.conn, rlogger, mon_host, args) + distro.conn.exit() + + except RuntimeError as e: + LOG.error(e) + raise exc.GenericError('Failed to add monitor to host: %s' % mon_host) + + +def mon_create(args): + + cfg = conf.ceph.load(args) + if not args.mon: + args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg) + + if args.keyrings: + monitor_keyring = concatenate_keyrings(args) + else: + keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster) + try: + monitor_keyring = files.read_file(keyring_path) + except IOError: + LOG.warning('keyring (%s) not found, creating a new one' % keyring_path) + new_mon_keyring(args) + monitor_keyring = files.read_file(keyring_path) + + LOG.debug( + 'Deploying mon, cluster %s hosts %s', + args.cluster, + ' '.join(args.mon), + ) + + errors = 0 + for (name, host) in mon_hosts(args.mon): + try: + # TODO add_bootstrap_peer_hint + LOG.debug('detecting platform for host %s ...', name) + distro = hosts.get( + host, + username=args.username, + callbacks=[packages.ceph_is_installed] + ) + LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename) + rlogger = logging.getLogger(name) + + # ensure remote hostname is good to go + hostname_is_compatible(distro.conn, rlogger, name) + rlogger.debug('deploying mon to %s', name) + distro.mon.create(distro, args, monitor_keyring) + + # tell me the status of the deployed mon + time.sleep(2) # give some room to start + mon_status(distro.conn, rlogger, name, args) + catch_mon_errors(distro.conn, rlogger, name, cfg, args) + distro.conn.exit() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to create %d monitors' % errors) + + +def hostname_is_compatible(conn, logger, provided_hostname): + """ + Make sure that the host that we are connecting to has the same value as the + `hostname` in the remote host, otherwise mons can fail not reaching quorum. + """ + logger.debug('determining if provided host has same hostname in remote') + remote_hostname = conn.remote_module.shortname() + if remote_hostname == provided_hostname: + return + logger.warning('*'*80) + logger.warning('provided hostname must match remote hostname') + logger.warning('provided hostname: %s' % provided_hostname) + logger.warning('remote hostname: %s' % remote_hostname) + logger.warning('monitors may not reach quorum and create-keys will not complete') + logger.warning('*'*80) + + +def destroy_mon(conn, cluster, hostname): + import datetime + import time + retries = 5 + + path = paths.mon.path(cluster, hostname) + + if conn.remote_module.path_exists(path): + # remove from cluster + remoto.process.run( + conn, + [ + 'ceph', + '--cluster={cluster}'.format(cluster=cluster), + '-n', 'mon.', + '-k', '{path}/keyring'.format(path=path), + 'mon', + 'remove', + hostname, + ], + timeout=7, + ) + + # stop + if conn.remote_module.path_exists(os.path.join(path, 'upstart')) or system.is_upstart(conn): + status_args = [ + 'initctl', + 'status', + 'ceph-mon', + 'cluster={cluster}'.format(cluster=cluster), + 'id={hostname}'.format(hostname=hostname), + ] + + elif conn.remote_module.path_exists(os.path.join(path, 'sysvinit')): + status_args = [ + 'service', + 'ceph', + 'status', + 'mon.{hostname}'.format(hostname=hostname), + ] + elif system.is_systemd(conn): + status_args = [ + 'systemctl', + 'stop', + 'ceph-mon@{hostname}.service'.format(hostname=hostname), + ] + else: + raise RuntimeError('could not detect a supported init system, cannot continue') + + while retries: + conn.logger.info('polling the daemon to verify it stopped') + if is_running(conn, status_args): + time.sleep(5) + retries -= 1 + if retries <= 0: + raise RuntimeError('ceph-mon deamon did not stop') + else: + break + + # archive old monitor directory + fn = '{cluster}-{hostname}-{stamp}'.format( + hostname=hostname, + cluster=cluster, + stamp=datetime.datetime.utcnow().strftime("%Y-%m-%dZ%H:%M:%S"), + ) + + remoto.process.run( + conn, + [ + 'mkdir', + '-p', + '/var/lib/ceph/mon-removed', + ], + ) + + conn.remote_module.make_mon_removed_dir(path, fn) + + +def mon_destroy(args): + errors = 0 + for (name, host) in mon_hosts(args.mon): + try: + LOG.debug('Removing mon from %s', name) + + distro = hosts.get( + host, + username=args.username, + callbacks=[packages.ceph_is_installed] + ) + hostname = distro.conn.remote_module.shortname() + + destroy_mon( + distro.conn, + args.cluster, + hostname, + ) + distro.conn.exit() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to destroy %d monitors' % errors) + + +def mon_create_initial(args): + mon_initial_members = get_mon_initial_members(args, error_on_empty=True) + + # create them normally through mon_create + args.mon = mon_initial_members + mon_create(args) + + # make the sets to be able to compare late + mon_in_quorum = set([]) + mon_members = set([host for host in mon_initial_members]) + + for host in mon_initial_members: + mon_name = 'mon.%s' % host + LOG.info('processing monitor %s', mon_name) + sleeps = [20, 20, 15, 10, 10, 5] + tries = 5 + rlogger = logging.getLogger(host) + distro = hosts.get( + host, + username=args.username, + callbacks=[packages.ceph_is_installed] + ) + + while tries: + status = mon_status_check(distro.conn, rlogger, host, args) + has_reached_quorum = status.get('state', '') in ['peon', 'leader'] + if not has_reached_quorum: + LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries)) + tries -= 1 + sleep_seconds = sleeps.pop() + LOG.warning('waiting %s seconds before retrying', sleep_seconds) + time.sleep(sleep_seconds) # Magic number + else: + mon_in_quorum.add(host) + LOG.info('%s monitor has reached quorum!', mon_name) + break + distro.conn.exit() + + if mon_in_quorum == mon_members: + LOG.info('all initial monitors are running and have formed quorum') + LOG.info('Running gatherkeys...') + gatherkeys.gatherkeys(args) + else: + LOG.error('Some monitors have still not reached quorum:') + for host in mon_members - mon_in_quorum: + LOG.error('%s', host) + raise SystemExit('cluster may not be in a healthy state') + + +def mon(args): + if args.subcommand == 'create': + mon_create(args) + elif args.subcommand == 'add': + mon_add(args) + elif args.subcommand == 'destroy': + mon_destroy(args) + elif args.subcommand == 'create-initial': + mon_create_initial(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + + +@priority(30) +def make(parser): + """ + Ceph MON Daemon management + """ + parser.formatter_class = ToggleRawTextHelpFormatter + + mon_parser = parser.add_subparsers(dest='subcommand') + mon_parser.required = True + + mon_add = mon_parser.add_parser( + 'add', + help=('R|Add a monitor to an existing cluster:\n' + '\tceph-deploy mon add node1\n' + 'Or:\n' + '\tceph-deploy mon add --address 192.168.1.10 node1\n' + 'If the section for the monitor exists and defines a `mon addr` that\n' + 'will be used, otherwise it will fallback by resolving the hostname to an\n' + 'IP. If `--address` is used it will override all other options.') + ) + mon_add.add_argument( + '--address', + nargs='?', + ) + mon_add.add_argument( + 'mon', + nargs=1, + ) + + mon_create = mon_parser.add_parser( + 'create', + help=('R|Deploy monitors by specifying them like:\n' + '\tceph-deploy mon create node1 node2 node3\n' + 'If no hosts are passed it will default to use the\n' + '`mon initial members` defined in the configuration.') + ) + mon_create.add_argument( + '--keyrings', + nargs='?', + help='concatenate multiple keyrings to be seeded on new monitors', + ) + mon_create.add_argument( + 'mon', + nargs='*', + ) + + mon_create_initial = mon_parser.add_parser( + 'create-initial', + help=('Will deploy for monitors defined in `mon initial members`, ' + 'wait until they form quorum and then gatherkeys, reporting ' + 'the monitor status along the process. If monitors don\'t form ' + 'quorum the command will eventually time out.') + ) + mon_create_initial.add_argument( + '--keyrings', + nargs='?', + help='concatenate multiple keyrings to be seeded on new monitors', + ) + + mon_destroy = mon_parser.add_parser( + 'destroy', + help='Completely remove Ceph MON from remote host(s)' + ) + mon_destroy.add_argument( + 'mon', + nargs='+', + ) + + parser.set_defaults( + func=mon, + ) + +# +# Helpers +# + + +def get_mon_initial_members(args, error_on_empty=False, _cfg=None): + """ + Read the Ceph config file and return the value of mon_initial_members + Optionally, a NeedHostError can be raised if the value is None. + """ + if _cfg: + cfg = _cfg + else: + cfg = conf.ceph.load(args) + mon_initial_members = cfg.safe_get('global', 'mon_initial_members') + if not mon_initial_members: + if error_on_empty: + raise exc.NeedHostError( + 'could not find `mon initial members` defined in ceph.conf' + ) + else: + mon_initial_members = re.split(r'[,\s]+', mon_initial_members) + return mon_initial_members + + +def is_running(conn, args): + """ + Run a command to check the status of a mon, return a boolean. + + We heavily depend on the format of the output, if that ever changes + we need to modify this. + Check daemon status for 3 times + output of the status should be similar to:: + + mon.mira094: running {"version":"0.61.5"} + + or when it fails:: + + mon.mira094: dead {"version":"0.61.5"} + mon.mira094: not running {"version":"0.61.5"} + """ + stdout, stderr, _ = remoto.process.check( + conn, + args + ) + result_string = b' '.join(stdout) + for run_check in [b': running', b' start/running']: + if run_check in result_string: + return True + return False diff --git a/ceph_deploy/new.py b/ceph_deploy/new.py new file mode 100644 index 0000000..842117b --- /dev/null +++ b/ceph_deploy/new.py @@ -0,0 +1,276 @@ +import errno +import logging +import os +import uuid +import struct +import time +import base64 +import socket + +from ceph_deploy.cliutil import priority +from ceph_deploy import conf, hosts, exc +from ceph_deploy.util import arg_validators, ssh, net +from ceph_deploy.misc import mon_hosts +from ceph_deploy.lib import remoto +from ceph_deploy.connection import get_local_connection + + +LOG = logging.getLogger(__name__) + + +def generate_auth_key(): + key = os.urandom(16) + header = struct.pack( + ' up_osds: + difference = osds - up_osds + logger.warning('there %s %d OSD%s down' % ( + ['is', 'are'][difference != 1], + difference, + "s"[difference == 1:]) + ) + + if osds > in_osds: + difference = osds - in_osds + logger.warning('there %s %d OSD%s out' % ( + ['is', 'are'][difference != 1], + difference, + "s"[difference == 1:]) + ) + + if full: + logger.warning('OSDs are full!') + + if nearfull: + logger.warning('OSDs are near full!') + + +def create_osd( + conn, + cluster, + data, + journal, + zap, + fs_type, + dmcrypt, + dmcrypt_dir, + storetype, + block_wal, + block_db, + **kw): + """ + Run on osd node, creates an OSD from a data disk. + """ + ceph_volume_executable = system.executable_path(conn, 'ceph-volume') + args = [ + ceph_volume_executable, + '--cluster', cluster, + 'lvm', + 'create', + '--%s' % storetype, + '--data', data + ] + if zap: + LOG.warning('zapping is no longer supported when preparing') + if dmcrypt: + args.append('--dmcrypt') + # TODO: re-enable dmcrypt support once ceph-volume grows it + LOG.warning('dmcrypt is currently not supported') + + if storetype == 'bluestore': + if block_wal: + args.append('--block.wal') + args.append(block_wal) + if block_db: + args.append('--block.db') + args.append(block_db) + elif storetype == 'filestore': + if not journal: + raise RuntimeError('A journal lv or GPT partition must be specified when using filestore') + args.append('--journal') + args.append(journal) + + if kw.get('debug'): + remoto.process.run( + conn, + args, + extend_env={'CEPH_VOLUME_DEBUG': '1'} + ) + + else: + remoto.process.run( + conn, + args + ) + + +def create(args, cfg, create=False): + if not args.host: + raise RuntimeError('Required host was not specified as a positional argument') + LOG.debug( + 'Creating OSD on cluster %s with data device %s', + args.cluster, + args.data + ) + + key = get_bootstrap_osd_key(cluster=args.cluster) + + bootstrapped = set() + errors = 0 + hostname = args.host + + try: + if args.data is None: + raise exc.NeedDiskError(hostname) + + distro = hosts.get( + hostname, + username=args.username, + callbacks=[packages.ceph_is_installed] + ) + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + + if hostname not in bootstrapped: + bootstrapped.add(hostname) + LOG.debug('Deploying osd to %s', hostname) + + conf_data = conf.ceph.load_raw(args) + distro.conn.remote_module.write_conf( + args.cluster, + conf_data, + args.overwrite_conf + ) + + create_osd_keyring(distro.conn, args.cluster, key) + + # default to bluestore unless explicitly told not to + storetype = 'bluestore' + if args.filestore: + storetype = 'filestore' + + create_osd( + distro.conn, + cluster=args.cluster, + data=args.data, + journal=args.journal, + zap=args.zap_disk, + fs_type=args.fs_type, + dmcrypt=args.dmcrypt, + dmcrypt_dir=args.dmcrypt_key_dir, + storetype=storetype, + block_wal=args.block_wal, + block_db=args.block_db, + debug=args.debug, + ) + + # give the OSD a few seconds to start + time.sleep(5) + catch_osd_errors(distro.conn, distro.conn.logger, args) + LOG.debug('Host %s is now ready for osd use.', hostname) + distro.conn.exit() + + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to create %d OSDs' % errors) + + +def disk_zap(args): + + hostname = args.host + for disk in args.disk: + if not disk or not hostname: + raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk)) + LOG.debug('zapping %s on %s', disk, hostname) + distro = hosts.get( + hostname, + username=args.username, + callbacks=[packages.ceph_is_installed] + ) + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + + distro.conn.remote_module.zeroing(disk) + + ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume') + if args.debug: + remoto.process.run( + distro.conn, + [ + ceph_volume_executable, + 'lvm', + 'zap', + disk, + ], + env={'CEPH_VOLUME_DEBUG': '1'} + ) + else: + remoto.process.run( + distro.conn, + [ + ceph_volume_executable, + 'lvm', + 'zap', + disk, + ], + ) + + distro.conn.exit() + + +def disk_list(args, cfg): + command = ['fdisk', '-l'] + + for hostname in args.host: + distro = hosts.get( + hostname, + username=args.username, + callbacks=[packages.ceph_is_installed] + ) + out, err, code = remoto.process.check( + distro.conn, + command, + ) + for line in out: + if line.startswith('Disk /'): + distro.conn.logger.info(line) + + +def osd_list(args, cfg): + for hostname in args.host: + distro = hosts.get( + hostname, + username=args.username, + callbacks=[packages.ceph_is_installed] + ) + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + + LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname)) + ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume') + if args.debug: + remoto.process.run( + distro.conn, + [ + ceph_volume_executable, + 'lvm', + 'list', + ], + env={'CEPH_VOLUME_DEBUG': '1'} + + ) + else: + remoto.process.run( + distro.conn, + [ + ceph_volume_executable, + 'lvm', + 'list', + ], + ) + distro.conn.exit() + + +def osd(args): + cfg = conf.ceph.load(args) + + if args.subcommand == 'list': + osd_list(args, cfg) + elif args.subcommand == 'create': + create(args, cfg) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + sys.exit(1) + + +def disk(args): + cfg = conf.ceph.load(args) + + if args.subcommand == 'list': + disk_list(args, cfg) + elif args.subcommand == 'create': + create(args, cfg) + elif args.subcommand == 'zap': + disk_zap(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + sys.exit(1) + + +@priority(50) +def make(parser): + """ + Prepare a data disk on remote host. + """ + sub_command_help = dedent(""" + Create OSDs from a data disk on a remote host: + + ceph-deploy osd create {node} --data /path/to/device + + For bluestore, optional devices can be used:: + + ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device + ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device + ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device + + For filestore, the journal must be specified, as well as the objectstore:: + + ceph-deploy osd create {node} --filestore --data /path/to/data --journal /path/to/journal + + For data devices, it can be an existing logical volume in the format of: + vg/lv, or a device. For other OSD components like wal, db, and journal, it + can be logical volume (in vg/lv format) or it must be a GPT partition. + """ + ) + parser.formatter_class = argparse.RawDescriptionHelpFormatter + parser.description = sub_command_help + + osd_parser = parser.add_subparsers(dest='subcommand') + osd_parser.required = True + + osd_list = osd_parser.add_parser( + 'list', + help='List OSD info from remote host(s)' + ) + osd_list.add_argument( + 'host', + nargs='+', + metavar='HOST', + help='remote host(s) to list OSDs from' + ) + osd_list.add_argument( + '--debug', + action='store_true', + help='Enable debug mode on remote ceph-volume calls', + ) + osd_create = osd_parser.add_parser( + 'create', + help='Create new Ceph OSD daemon by preparing and activating a device' + ) + osd_create.add_argument( + '--data', + metavar='DATA', + help='The OSD data logical volume (vg/lv) or absolute path to device' + ) + osd_create.add_argument( + '--journal', + help='Logical Volume (vg/lv) or path to GPT partition', + ) + osd_create.add_argument( + '--zap-disk', + action='store_true', + help='DEPRECATED - cannot zap when creating an OSD' + ) + osd_create.add_argument( + '--fs-type', + metavar='FS_TYPE', + choices=['xfs', + 'btrfs' + ], + default='xfs', + help='filesystem to use to format DEVICE (xfs, btrfs)', + ) + osd_create.add_argument( + '--dmcrypt', + action='store_true', + help='use dm-crypt on DEVICE', + ) + osd_create.add_argument( + '--dmcrypt-key-dir', + metavar='KEYDIR', + default='/etc/ceph/dmcrypt-keys', + help='directory where dm-crypt keys are stored', + ) + osd_create.add_argument( + '--filestore', + action='store_true', default=None, + help='filestore objectstore', + ) + osd_create.add_argument( + '--bluestore', + action='store_true', default=None, + help='bluestore objectstore', + ) + osd_create.add_argument( + '--block-db', + default=None, + help='bluestore block.db path' + ) + osd_create.add_argument( + '--block-wal', + default=None, + help='bluestore block.wal path' + ) + osd_create.add_argument( + 'host', + nargs='?', + metavar='HOST', + help='Remote host to connect' + ) + osd_create.add_argument( + '--debug', + action='store_true', + help='Enable debug mode on remote ceph-volume calls', + ) + parser.set_defaults( + func=osd, + ) + + +@priority(50) +def make_disk(parser): + """ + Manage disks on a remote host. + """ + disk_parser = parser.add_subparsers(dest='subcommand') + disk_parser.required = True + + disk_zap = disk_parser.add_parser( + 'zap', + help='destroy existing data and filesystem on LV or partition', + ) + disk_zap.add_argument( + 'host', + nargs='?', + metavar='HOST', + help='Remote HOST(s) to connect' + ) + disk_zap.add_argument( + 'disk', + nargs='+', + metavar='DISK', + help='Disk(s) to zap' + ) + disk_zap.add_argument( + '--debug', + action='store_true', + help='Enable debug mode on remote ceph-volume calls', + ) + disk_list = disk_parser.add_parser( + 'list', + help='List disk info from remote host(s)' + ) + disk_list.add_argument( + 'host', + nargs='+', + metavar='HOST', + help='Remote HOST(s) to list OSDs from' + ) + disk_list.add_argument( + '--debug', + action='store_true', + help='Enable debug mode on remote ceph-volume calls', + ) + parser.set_defaults( + func=disk, + ) diff --git a/ceph_deploy/pkg.py b/ceph_deploy/pkg.py new file mode 100644 index 0000000..e40c17b --- /dev/null +++ b/ceph_deploy/pkg.py @@ -0,0 +1,86 @@ +import logging +from . import hosts + + +LOG = logging.getLogger(__name__) + + +def install(args): + packages = args.install.split(',') + for hostname in args.hosts: + distro = hosts.get(hostname, username=args.username) + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + rlogger = logging.getLogger(hostname) + rlogger.info('installing packages on %s' % hostname) + # Do not timeout on package install. If you we this command to install + # e.g. ceph-selinux or some other package with long post script we can + # easily timeout in the 5 minutes that we use as a default timeout, + # turning off the timeout completely for the time we run the command + # should make this much more safe. + distro.conn.global_timeout = None + distro.packager.install(packages) + distro.conn.exit() + + +def remove(args): + packages = args.remove.split(',') + for hostname in args.hosts: + distro = hosts.get(hostname, username=args.username) + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + + rlogger = logging.getLogger(hostname) + rlogger.info('removing packages from %s' % hostname) + # Do not timeout on package removal. If we use this command to remove + # e.g. ceph-selinux or some other package with long post script we can + # easily timeout in the 5 minutes that we use as a default timeout, + # turning off the timeout completely for the time we run the command + # should make this much more safe. + distro.conn.global_timeout = None + distro.packager.remove(packages) + distro.conn.exit() + + +def pkg(args): + if args.install: + install(args) + elif args.remove: + remove(args) + + +def make(parser): + """ + Manage packages on remote hosts. + """ + + action = parser.add_mutually_exclusive_group() + + action.add_argument( + '--install', + metavar='PKG(s)', + help='Comma-separated package(s) to install', + ) + + action.add_argument( + '--remove', + metavar='PKG(s)', + help='Comma-separated package(s) to remove', + ) + + parser.add_argument( + 'hosts', + nargs='+', + ) + + parser.set_defaults( + func=pkg, + ) diff --git a/ceph_deploy/repo.py b/ceph_deploy/repo.py new file mode 100644 index 0000000..9fd5c35 --- /dev/null +++ b/ceph_deploy/repo.py @@ -0,0 +1,113 @@ +import os +import logging + +from ceph_deploy import hosts +from ceph_deploy.cliutil import priority + + +LOG = logging.getLogger(__name__) + + +def install_repo(distro, args, cd_conf, rlogger): + if args.repo_name in cd_conf.get_repos(): + LOG.info('will use repository %s from ceph-deploy config', args.repo_name) + options = dict(cd_conf.items(args.repo_name)) + extra_repos = cd_conf.get_list(args.repo_name, 'extra-repos') + try: + repo_url = options.pop('baseurl') + gpg_url = options.pop('gpgkey', None) + except KeyError as err: + raise RuntimeError( + 'missing required key: %s in config section: %s' % (err, args.repo_name) + ) + else: + repo_url = os.environ.get('CEPH_DEPLOY_REPO_URL') or args.repo_url + gpg_url = os.environ.get('CEPH_DEPLOY_GPG_URL') or args.gpg_url + extra_repos = [] + + repo_url = repo_url.strip('/') # Remove trailing slashes + distro.packager.add_repo( + args.repo_name, + repo_url, + gpg_url=gpg_url + ) + + for xrepo in extra_repos: + rlogger.info('adding extra repo: %s' % xrepo) + options = dict(cd_conf.items(xrepo)) + try: + repo_url = options.pop('baseurl') + gpg_url = options.pop('gpgkey', None) + except KeyError as err: + raise RuntimeError( + 'missing required key: %s in config section: %s' % (err, xrepo) + ) + distro.packager.add_repo( + args.repo_name, + repo_url, + gpg_url=gpg_url + ) + + +def repo(args): + cd_conf = getattr(args, 'cd_conf', None) + + for hostname in args.host: + LOG.debug('Detecting platform for host %s ...', hostname) + distro = hosts.get( + hostname, + username=args.username + ) + rlogger = logging.getLogger(hostname) + + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + + if args.remove: + distro.packager.remove_repo(args.repo_name) + else: + install_repo(distro, args, cd_conf, rlogger) + + +@priority(70) +def make(parser): + """ + Repo definition management + """ + + parser.add_argument( + 'repo_name', + metavar='REPO-NAME', + help='Name of repo to manage. Can match an entry in cephdeploy.conf' + ) + + parser.add_argument( + '--repo-url', + help='a repo URL that mirrors/contains Ceph packages' + ) + + parser.add_argument( + '--gpg-url', + help='a GPG key URL to be used with custom repos' + ) + + parser.add_argument( + '--remove', '--delete', + action='store_true', + help='remove repo definition on remote host' + ) + + parser.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='host(s) to install on' + ) + + parser.set_defaults( + func=repo + ) diff --git a/ceph_deploy/rgw.py b/ceph_deploy/rgw.py new file mode 100644 index 0000000..c6b9e0b --- /dev/null +++ b/ceph_deploy/rgw.py @@ -0,0 +1,233 @@ +import errno +import logging +import os + +from ceph_deploy import conf +from ceph_deploy import exc +from ceph_deploy import hosts +from ceph_deploy.util import system +from ceph_deploy.lib import remoto +from ceph_deploy.cliutil import priority + + +LOG = logging.getLogger(__name__) + + +def get_bootstrap_rgw_key(cluster): + """ + Read the bootstrap-rgw key for `cluster`. + """ + path = '{cluster}.bootstrap-rgw.keyring'.format(cluster=cluster) + try: + with open(path, 'rb') as f: + return f.read() + except IOError: + raise RuntimeError('bootstrap-rgw keyring not found; run \'gatherkeys\'') + + +def create_rgw(distro, name, cluster, init): + conn = distro.conn + + path = '/var/lib/ceph/radosgw/{cluster}-{name}'.format( + cluster=cluster, + name=name + ) + + conn.remote_module.safe_makedirs(path) + + bootstrap_keyring = '/var/lib/ceph/bootstrap-rgw/{cluster}.keyring'.format( + cluster=cluster + ) + + keypath = os.path.join(path, 'keyring') + + stdout, stderr, returncode = remoto.process.check( + conn, + [ + 'ceph', + '--cluster', cluster, + '--name', 'client.bootstrap-rgw', + '--keyring', bootstrap_keyring, + 'auth', 'get-or-create', 'client.{name}'.format(name=name), + 'osd', 'allow rwx', + 'mon', 'allow rw', + '-o', + os.path.join(keypath), + ] + ) + if returncode > 0 and returncode != errno.EACCES: + for line in stderr: + conn.logger.error(line) + for line in stdout: + # yes stdout as err because this is an error + conn.logger.error(line) + conn.logger.error('exit code from command was: %s' % returncode) + raise RuntimeError('could not create rgw') + + remoto.process.check( + conn, + [ + 'ceph', + '--cluster', cluster, + '--name', 'client.bootstrap-rgw', + '--keyring', bootstrap_keyring, + 'auth', 'get-or-create', 'client.{name}'.format(name=name), + 'osd', 'allow *', + 'mon', 'allow *', + '-o', + os.path.join(keypath), + ] + ) + + conn.remote_module.touch_file(os.path.join(path, 'done')) + conn.remote_module.touch_file(os.path.join(path, init)) + + if init == 'upstart': + remoto.process.run( + conn, + [ + 'initctl', + 'emit', + 'radosgw', + 'cluster={cluster}'.format(cluster=cluster), + 'id={name}'.format(name=name), + ], + timeout=7 + ) + elif init == 'sysvinit': + remoto.process.run( + conn, + [ + 'service', + 'ceph-radosgw', + 'start', + ], + timeout=7 + ) + if distro.is_el: + system.enable_service(distro.conn, service='ceph-radosgw') + elif init == 'systemd': + remoto.process.run( + conn, + [ + 'systemctl', + 'enable', + 'ceph-radosgw@{name}'.format(name=name), + ], + timeout=7 + ) + remoto.process.run( + conn, + [ + 'systemctl', + 'start', + 'ceph-radosgw@{name}'.format(name=name), + ], + timeout=7 + ) + remoto.process.run( + conn, + [ + 'systemctl', + 'enable', + 'ceph.target', + ], + timeout=7 + ) + + +def rgw_create(args): + conf_data = conf.ceph.load_raw(args) + LOG.debug( + 'Deploying rgw, cluster %s hosts %s', + args.cluster, + ' '.join(':'.join(x or '' for x in t) for t in args.rgw), + ) + + key = get_bootstrap_rgw_key(cluster=args.cluster) + + bootstrapped = set() + errors = 0 + for hostname, name in args.rgw: + try: + distro = hosts.get(hostname, username=args.username) + rlogger = distro.conn.logger + LOG.info( + 'Distro info: %s %s %s', + distro.name, + distro.release, + distro.codename + ) + LOG.debug('remote host will use %s', distro.init) + + if hostname not in bootstrapped: + bootstrapped.add(hostname) + LOG.debug('deploying rgw bootstrap to %s', hostname) + distro.conn.remote_module.write_conf( + args.cluster, + conf_data, + args.overwrite_conf, + ) + + path = '/var/lib/ceph/bootstrap-rgw/{cluster}.keyring'.format( + cluster=args.cluster, + ) + + if not distro.conn.remote_module.path_exists(path): + rlogger.warning('rgw keyring does not exist yet, creating one') + distro.conn.remote_module.write_keyring(path, key) + + create_rgw(distro, name, args.cluster, distro.init) + distro.conn.exit() + LOG.info( + ('The Ceph Object Gateway (RGW) is now running on host %s and ' + 'default port %s'), + hostname, + '7480' + ) + except RuntimeError as e: + LOG.error(e) + errors += 1 + + if errors: + raise exc.GenericError('Failed to create %d RGWs' % errors) + + +def rgw(args): + if args.subcommand == 'create': + rgw_create(args) + else: + LOG.error('subcommand %s not implemented', args.subcommand) + + +def colon_separated(s): + host = s + name = s + if s.count(':') == 1: + (host, name) = s.split(':') + name = 'rgw.' + name + return (host, name) + + +@priority(30) +def make(parser): + """ + Ceph RGW daemon management + """ + rgw_parser = parser.add_subparsers(dest='subcommand') + rgw_parser.required = True + rgw_create = rgw_parser.add_parser( + 'create', + help='Create an RGW instance' + ) + rgw_create.add_argument( + 'rgw', + metavar='HOST[:NAME]', + nargs='+', + type=colon_separated, + help='host (and optionally the daemon name) to deploy on. \ + NAME is automatically prefixed with \'rgw.\'', + ) + parser.set_defaults( + func=rgw, + ) diff --git a/ceph_deploy/tests/__init__.py b/ceph_deploy/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ceph_deploy/tests/conftest.py b/ceph_deploy/tests/conftest.py new file mode 100644 index 0000000..ee7fcf1 --- /dev/null +++ b/ceph_deploy/tests/conftest.py @@ -0,0 +1,98 @@ +import logging +import os +import subprocess +import sys +import pytest + + +LOG = logging.getLogger(__name__) + + +def _prepend_path(env): + """ + Make sure the PATH contains the location where the Python binary + lives. This makes sure cli tools installed in a virtualenv work. + """ + if env is None: + env = os.environ + env = dict(env) + new = os.path.dirname(sys.executable) + path = env.get('PATH') + if path is not None: + new = new + ':' + path + env['PATH'] = new + return env + + +class CLIFailed(Exception): + """CLI tool failed""" + + def __init__(self, args, status): + self.args = args + self.status = status + + def __str__(self): + return '{doc}: {args}: exited with status {status}'.format( + doc=self.__doc__, + args=self.args, + status=self.status, + ) + + +class CLIProcess(object): + def __init__(self, **kw): + self.kw = kw + + def __enter__(self): + try: + self.p = subprocess.Popen(**self.kw) + except OSError as e: + raise AssertionError( + 'CLI tool {args!r} does not work: {err}'.format( + args=self.kw['args'], + err=e, + ), + ) + else: + return self.p + + def __exit__(self, exc_type, exc_val, exc_tb): + self.p.wait() + if self.p.returncode != 0: + err = CLIFailed( + args=self.kw['args'], + status=self.p.returncode, + ) + if exc_type is None: + # nothing else raised, so we should complain; if + # something else failed, we'll just log + raise err + else: + LOG.error(str(err)) + + +class CLITester(object): + # provide easy way for caller to access the exception class + # without importing us + Failed = CLIFailed + + def __init__(self, tmpdir): + self.tmpdir = tmpdir + + def __call__(self, **kw): + kw.setdefault('cwd', str(self.tmpdir)) + kw['env'] = _prepend_path(kw.get('env')) + kw['env']['COLUMNS'] = '80' + return CLIProcess(**kw) + + +@pytest.fixture +def cli(request, tmpdir): + """ + Test command line behavior. + """ + + # the tmpdir here will be the same value as the test function + # sees; we rely on that to let caller prepare and introspect + # any files the cli tool will read or create + return CLITester(tmpdir=tmpdir) diff --git a/ceph_deploy/tests/directory.py b/ceph_deploy/tests/directory.py new file mode 100644 index 0000000..81d3e19 --- /dev/null +++ b/ceph_deploy/tests/directory.py @@ -0,0 +1,13 @@ +import contextlib +import os + + +@contextlib.contextmanager +def directory(path): + prev = os.open('.', os.O_RDONLY | os.O_DIRECTORY) + try: + os.chdir(path) + yield + finally: + os.fchdir(prev) + os.close(prev) diff --git a/ceph_deploy/tests/fakes.py b/ceph_deploy/tests/fakes.py new file mode 100644 index 0000000..458ac6a --- /dev/null +++ b/ceph_deploy/tests/fakes.py @@ -0,0 +1,9 @@ + + +def fake_getaddrinfo(*a, **kw): + return_host = kw.get('return_host', 'host1') + return [[0,0,0,0, return_host]] + + +def fake_arg_val_hostname(self, host): + return host diff --git a/ceph_deploy/tests/parser/__init__.py b/ceph_deploy/tests/parser/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ceph_deploy/tests/parser/test_admin.py b/ceph_deploy/tests/parser/test_admin.py new file mode 100644 index 0000000..a86fa8e --- /dev/null +++ b/ceph_deploy/tests/parser/test_admin.py @@ -0,0 +1,33 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + + +class TestParserAdmin(object): + + def setup(self): + self.parser = get_parser() + + def test_admin_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('admin --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy admin' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_admin_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('admin'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_admin_one_host(self): + args = self.parser.parse_args('admin host1'.split()) + assert args.client == ['host1'] + + def test_admin_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args(['admin'] + hostnames) + assert args.client == hostnames diff --git a/ceph_deploy/tests/parser/test_config.py b/ceph_deploy/tests/parser/test_config.py new file mode 100644 index 0000000..74ccb02 --- /dev/null +++ b/ceph_deploy/tests/parser/test_config.py @@ -0,0 +1,60 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + +SUBCMDS_WITH_ARGS = ['push', 'pull'] + + +class TestParserConfig(object): + + def setup(self): + self.parser = get_parser() + + def test_config_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('config --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy config' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + @pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS) + def test_config_subcommands_with_args(self, cmd): + self.parser.parse_args(['config'] + ['%s' % cmd] + ['host1']) + + def test_config_invalid_subcommand(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('config bork'.split()) + out, err = capsys.readouterr() + assert 'invalid choice' in err + + def test_config_push_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('config push'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_config_push_one_host(self): + args = self.parser.parse_args('config push host1'.split()) + assert args.client == ['host1'] + + def test_config_push_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args('config push'.split() + hostnames) + assert args.client == hostnames + + def test_config_pull_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('config pull'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_config_pull_one_host(self): + args = self.parser.parse_args('config pull host1'.split()) + assert args.client == ['host1'] + + def test_config_pull_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args('config pull'.split() + hostnames) + assert args.client == hostnames diff --git a/ceph_deploy/tests/parser/test_disk.py b/ceph_deploy/tests/parser/test_disk.py new file mode 100644 index 0000000..cedd858 --- /dev/null +++ b/ceph_deploy/tests/parser/test_disk.py @@ -0,0 +1,88 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + +SUBCMDS_WITH_ARGS = ['list', 'zap'] + + +class TestParserDisk(object): + + def setup(self): + self.parser = get_parser() + + def test_disk_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('disk --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy disk' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + @pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS) + def test_disk_valid_subcommands_with_args(self, cmd): + self.parser.parse_args(['disk'] + ['%s' % cmd] + ['host1']) + + def test_disk_invalid_subcommand(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('disk bork'.split()) + out, err = capsys.readouterr() + assert 'invalid choice' in err + + def test_disk_list_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('disk list --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy disk list' in out + + def test_disk_list_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('disk list'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_disk_list_single_host(self): + args = self.parser.parse_args('disk list host1'.split()) + assert args.host[0] == 'host1' + assert args.debug is False + + def test_disk_list_single_host_debug(self): + args = self.parser.parse_args('disk list --debug host1'.split()) + assert args.host[0] == 'host1' + assert args.debug is True + + def test_disk_list_multi_host(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args('disk list'.split() + hostnames) + assert args.host == hostnames + + def test_disk_zap_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('disk zap --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy disk zap' in out + + def test_disk_zap_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('disk zap'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_disk_zap_single_host(self): + args = self.parser.parse_args('disk zap host1 /dev/sdb'.split()) + assert args.disk[0] == '/dev/sdb' + assert args.host == 'host1' + assert args.debug is False + + def test_disk_zap_multi_host(self): + host = 'host1' + disks = ['/dev/sda1', '/dev/sda2'] + args = self.parser.parse_args(['disk', 'zap', host] + disks) + assert args.disk == disks + + def test_disk_zap_debug_true(self): + args = \ + self.parser.parse_args('disk zap --debug host1 /dev/sdb'.split()) + assert args.disk[0] == '/dev/sdb' + assert args.host == 'host1' + assert args.debug is True diff --git a/ceph_deploy/tests/parser/test_gatherkeys.py b/ceph_deploy/tests/parser/test_gatherkeys.py new file mode 100644 index 0000000..1dcafcc --- /dev/null +++ b/ceph_deploy/tests/parser/test_gatherkeys.py @@ -0,0 +1,33 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + + +class TestParserGatherKeys(object): + + def setup(self): + self.parser = get_parser() + + def test_gather_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('gatherkeys --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy gatherkeys' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_gatherkeys_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('gatherkeys'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_gatherkeys_one_host(self): + args = self.parser.parse_args('gatherkeys host1'.split()) + assert args.mon == ['host1'] + + def test_gatherkeys_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args(['gatherkeys'] + hostnames) + assert args.mon == hostnames diff --git a/ceph_deploy/tests/parser/test_install.py b/ceph_deploy/tests/parser/test_install.py new file mode 100644 index 0000000..cb6284e --- /dev/null +++ b/ceph_deploy/tests/parser/test_install.py @@ -0,0 +1,158 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + +COMP_FLAGS = [ + 'mon', 'mds', 'rgw', 'osd', 'common', 'all' +] + + +class TestParserInstall(object): + + def setup(self): + self.parser = get_parser() + + def test_install_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('install --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy install' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_install_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('install'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_install_one_host(self): + args = self.parser.parse_args('install host1'.split()) + assert args.host == ['host1'] + + def test_install_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args(['install'] + hostnames) + assert frozenset(args.host) == frozenset(hostnames) + + def test_install_release_default_is_none(self): + args = self.parser.parse_args('install host1'.split()) + assert args.release is None + assert args.version_kind == "stable" + + def test_install_release(self): + args = self.parser.parse_args('install --release hammer host1'.split()) + assert args.release == "hammer" + assert args.version_kind == "stable" + + @pytest.mark.skipif(reason="No release name sanity checking yet") + def test_install_release_bad_codename(self): + args = self.parser.parse_args('install --release cephalopod host1'.split()) + assert args.release != "cephalopod" + + def test_install_testing_default_is_none(self): + args = self.parser.parse_args('install host1'.split()) + assert args.testing is None + assert args.version_kind == "stable" + + def test_install_testing_true(self): + args = self.parser.parse_args('install --testing host1'.split()) + assert len(args.testing) == 0 + assert args.version_kind == "testing" + + def test_install_dev_disabled_by_default(self): + args = self.parser.parse_args('install host1'.split()) + # dev defaults to master, but version_kind nullifies it + assert args.dev == "master" + assert args.version_kind == "stable" + + def test_install_dev_custom_version(self): + args = self.parser.parse_args('install --dev v0.80.8 host1'.split()) + assert args.dev == "v0.80.8" + assert args.version_kind == "dev" + + @pytest.mark.skipif(reason="test reflects desire, but not code reality") + def test_install_dev_option_default_is_master(self): + # I don't think this is the way argparse works. + args = self.parser.parse_args('install --dev host1'.split()) + assert args.dev == "master" + assert args.version_kind == "dev" + + def test_install_release_testing_mutex(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('install --release hammer --testing host1'.split()) + out, err = capsys.readouterr() + assert 'not allowed with argument' in err + + def test_install_release_dev_mutex(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('install --release hammer --dev master host1'.split()) + out, err = capsys.readouterr() + assert 'not allowed with argument' in err + + def test_install_testing_dev_mutex(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('install --testing --dev master host1'.split()) + out, err = capsys.readouterr() + assert 'not allowed with argument' in err + + @pytest.mark.parametrize('comp', COMP_FLAGS) + def test_install_component_default_is_false(self, comp): + args = self.parser.parse_args('install host1'.split()) + assert getattr(args, 'install_%s' % comp) is False + + @pytest.mark.parametrize('comp', COMP_FLAGS) + def test_install_component_true(self, comp): + args = self.parser.parse_args(('install --%s host1' % comp).split()) + assert getattr(args, 'install_%s' % comp) is True + + def test_install_multi_component(self): + args = self.parser.parse_args(('install --mon --rgw host1').split()) + assert args.install_mon + assert args.install_rgw + + def test_install_adjust_repos_default_is_true(self): + args = self.parser.parse_args('install host1'.split()) + assert args.adjust_repos + + def test_install_adjust_repos_false(self): + args = self.parser.parse_args('install --no-adjust-repos host1'.split()) + assert not args.adjust_repos + + def test_install_adjust_repos_false_with_custom_release(self): + args = self.parser.parse_args('install --release firefly --no-adjust-repos host1'.split()) + assert args.release == "firefly" + assert not args.adjust_repos + + def test_install_repo_default_is_false(self): + args = self.parser.parse_args('install host1'.split()) + assert not args.repo + + def test_install_repo_true(self): + args = self.parser.parse_args('install --repo host1'.split()) + assert args.repo + + def test_install_local_mirror_default_is_none(self): + args = self.parser.parse_args('install host1'.split()) + assert args.local_mirror is None + + def test_install_local_mirror_custom_path(self): + args = self.parser.parse_args('install --local-mirror /mnt/mymirror host1'.split()) + assert args.local_mirror == "/mnt/mymirror" + + def test_install_repo_url_default_is_none(self): + args = self.parser.parse_args('install host1'.split()) + assert args.repo_url is None + + def test_install_repo_url_custom_path(self): + args = self.parser.parse_args('install --repo-url https://ceph.com host1'.split()) + assert args.repo_url == "https://ceph.com" + + def test_install_gpg_url_default_is_none(self): + args = self.parser.parse_args('install host1'.split()) + assert args.gpg_url is None + + def test_install_gpg_url_custom_path(self): + args = self.parser.parse_args('install --gpg-url https://ceph.com/key host1'.split()) + assert args.gpg_url == "https://ceph.com/key" diff --git a/ceph_deploy/tests/parser/test_main.py b/ceph_deploy/tests/parser/test_main.py new file mode 100644 index 0000000..ac7186e --- /dev/null +++ b/ceph_deploy/tests/parser/test_main.py @@ -0,0 +1,100 @@ +import pytest + +import ceph_deploy +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + + +SUBCMDS_WITH_ARGS = [ + 'new', 'install', 'rgw', 'mds', 'mon', 'gatherkeys', 'disk', 'osd', + 'admin', 'config', 'uninstall', 'purgedata', 'purge', 'pkg' +] +SUBCMDS_WITHOUT_ARGS = ['forgetkeys'] + + +class TestParserMain(object): + + def setup(self): + self.parser = get_parser() + + def test_verbose_true(self): + args = self.parser.parse_args('--verbose forgetkeys'.split()) + assert args.verbose + + def test_verbose_default_is_false(self): + args = self.parser.parse_args('forgetkeys'.split()) + assert not args.verbose + + def test_quiet_true(self): + args = self.parser.parse_args('--quiet forgetkeys'.split()) + assert args.quiet + + def test_quiet_default_is_false(self): + args = self.parser.parse_args('forgetkeys'.split()) + assert not args.quiet + + def test_verbose_quiet_are_mutually_exclusive(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('--verbose --quiet forgetkeys'.split()) + out, err = capsys.readouterr() + assert 'not allowed with argument' in err + + def test_version(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('--version'.split()) + out, err = capsys.readouterr() + assert ceph_deploy.__version__ in (out.strip(), err.strip()) + + def test_custom_username(self): + args = self.parser.parse_args('--username trhoden forgetkeys'.split()) + assert args.username == 'trhoden' + + def test_default_username_is_none(self): + args = self.parser.parse_args('forgetkeys'.split()) + assert args.username is None + + def test_overwrite_conf_default_false(self): + args = self.parser.parse_args('forgetkeys'.split()) + assert not args.overwrite_conf + + def test_overwrite_conf_true(self): + args = self.parser.parse_args('--overwrite-conf forgetkeys'.split()) + assert args.overwrite_conf + + def test_default_cluster_name(self): + args = self.parser.parse_args('forgetkeys'.split()) + assert args.cluster == 'ceph' + + def test_default_ceph_conf_is_none(self): + args = self.parser.parse_args('forgetkeys'.split()) + assert args.ceph_conf is None + + def test_custom_ceph_conf(self): + args = self.parser.parse_args('--ceph-conf /tmp/ceph.conf forgetkeys'.split()) + assert args.ceph_conf == '/tmp/ceph.conf' + + @pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS) + def test_valid_subcommands_with_args(self, cmd, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args(['%s' % cmd]) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + assert 'invalid choice' not in err + + @pytest.mark.parametrize('cmd', SUBCMDS_WITHOUT_ARGS) + def test_valid_subcommands_without_args(self, cmd, capsys): + self.parser.parse_args(['%s' % cmd]) + + def test_invalid_subcommand(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('bork'.split()) + out, err = capsys.readouterr() + assert 'invalid choice' in err + + def test_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('--help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy' in out + assert 'optional arguments:' in out + assert 'commands:' in out diff --git a/ceph_deploy/tests/parser/test_mds.py b/ceph_deploy/tests/parser/test_mds.py new file mode 100644 index 0000000..0b81c38 --- /dev/null +++ b/ceph_deploy/tests/parser/test_mds.py @@ -0,0 +1,35 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + + +class TestParserMDS(object): + + def setup(self): + self.parser = get_parser() + + def test_mds_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('mds --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy mds' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_mds_create_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('mds create'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_mds_create_one_host(self): + args = self.parser.parse_args('mds create host1'.split()) + assert args.mds[0][0] == 'host1' + + def test_mds_create_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args(['mds', 'create'] + hostnames) + # args.mds is a list of tuples, and tuple[0] is the hostname + hosts = [x[0] for x in args.mds] + assert frozenset(hosts) == frozenset(hostnames) diff --git a/ceph_deploy/tests/parser/test_mon.py b/ceph_deploy/tests/parser/test_mon.py new file mode 100644 index 0000000..4c03101 --- /dev/null +++ b/ceph_deploy/tests/parser/test_mon.py @@ -0,0 +1,122 @@ +import pytest + +from ceph_deploy.cli import get_parser + +SUBCMDS_WITH_ARGS = ['add', 'destroy', 'create'] +SUBCMDS_WITHOUT_ARGS = ['create', 'create-initial'] + + +class TestParserMON(object): + + def setup(self): + self.parser = get_parser() + + def test_mon_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('mon --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy mon' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + @pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS) + def test_mon_valid_subcommands_with_args(self, cmd, capsys): + args = self.parser.parse_args(['mon'] + ['%s' % cmd] + ['host1']) + assert args.subcommand == cmd + + @pytest.mark.parametrize('cmd', SUBCMDS_WITHOUT_ARGS) + def test_mon_valid_subcommands_without_args(self, cmd, capsys): + args = self.parser.parse_args(['mon'] + ['%s' % cmd]) + assert args.subcommand == cmd + + def test_mon_invalid_subcommand(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('mon bork'.split()) + out, err = capsys.readouterr() + assert 'invalid choice' in err + + def test_mon_create_initial_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('mon create-initial --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy mon create-initial' in out + + def test_mon_create_initial_keyrings_default_none(self): + args = self.parser.parse_args('mon create-initial'.split()) + assert args.keyrings is None + + def test_mon_create_initial_keyrings_custom_dir(self): + args = self.parser.parse_args('mon create-initial --keyrings /tmp/keys'.split()) + assert args.keyrings == "/tmp/keys" + + def test_mon_create_initial_keyrings_host_raises_err(self): + with pytest.raises(SystemExit): + self.parser.parse_args('mon create-initial test1'.split()) + + def test_mon_create_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('mon create --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy mon create' in out + + def test_mon_create_keyrings_default_none(self): + args = self.parser.parse_args('mon create'.split()) + assert args.keyrings is None + + def test_mon_create_keyrings_custom_dir(self): + args = self.parser.parse_args('mon create --keyrings /tmp/keys'.split()) + assert args.keyrings == "/tmp/keys" + + def test_mon_create_single_host(self): + args = self.parser.parse_args('mon create test1'.split()) + assert args.mon == ['test1'] + + def test_mon_create_multi_host(self): + hosts = ['host1', 'host2', 'host3'] + args = self.parser.parse_args('mon create'.split() + hosts) + assert args.mon == hosts + + def test_mon_add_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('mon add --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy mon add' in out + + def test_mon_add_address_default_none(self): + args = self.parser.parse_args('mon add test1'.split()) + assert args.address is None + + def test_mon_add_address_custom_addr(self): + args = self.parser.parse_args('mon add test1 --address 10.10.0.1'.split()) + assert args.address == '10.10.0.1' + + def test_mon_add_no_host_raises_err(self): + with pytest.raises(SystemExit): + self.parser.parse_args('mon add'.split()) + + def test_mon_add_one_host_okay(self): + args = self.parser.parse_args('mon add test1'.split()) + assert args.mon == ["test1"] + + def test_mon_add_multi_host_raises_err(self): + with pytest.raises(SystemExit): + self.parser.parse_args('mon add test1 test2'.split()) + + def test_mon_destroy_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('mon destroy --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy mon destroy' in out + + def test_mon_destroy_no_host_raises_err(self): + with pytest.raises(SystemExit): + self.parser.parse_args('mon destroy'.split()) + + def test_mon_destroy_one_host_okay(self): + args = self.parser.parse_args('mon destroy test1'.split()) + assert args.mon == ["test1"] + + def test_mon_destroy_multi_host(self): + hosts = ['host1', 'host2', 'host3'] + args = self.parser.parse_args('mon destroy'.split() + hosts) + assert args.mon == hosts diff --git a/ceph_deploy/tests/parser/test_new.py b/ceph_deploy/tests/parser/test_new.py new file mode 100644 index 0000000..9395bab --- /dev/null +++ b/ceph_deploy/tests/parser/test_new.py @@ -0,0 +1,84 @@ +import pytest +from mock import patch + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.fakes import fake_arg_val_hostname +from ceph_deploy.tests.util import assert_too_few_arguments + +@patch('ceph_deploy.util.arg_validators.Hostname.__call__', fake_arg_val_hostname) +class TestParserNew(object): + + def setup(self): + self.parser = get_parser() + + def test_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('new --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy new' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_new_copykey_true_by_default(self): + args = self.parser.parse_args('new host1'.split()) + assert args.ssh_copykey + + def test_new_copykey_false(self): + args = self.parser.parse_args('new --no-ssh-copykey host1'.split()) + assert not args.ssh_copykey + + def test_new_fsid_none_by_default(self): + args = self.parser.parse_args('new host1'.split()) + assert args.fsid is None + + def test_new_fsid_custom_fsid(self): + args = self.parser.parse_args('new --fsid bc50d015-65c9-457a-bfed-e37b92756527 host1'.split()) + assert args.fsid == 'bc50d015-65c9-457a-bfed-e37b92756527' + + @pytest.mark.skipif(reason="no UUID validation yet") + def test_new_fsid_custom_fsid_bad(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('new --fsid bc50d015-65c9-457a-bfed-e37'.split()) + out, err = capsys.readouterr() + #TODO check for correct error string in err + + def test_new_networks_none_by_default(self): + args = self.parser.parse_args('new host1'.split()) + assert args.public_network is None + assert args.cluster_network is None + + def test_new_public_network_custom(self): + args = self.parser.parse_args('new --public-network 10.10.0.0/16 host1'.split()) + assert args.public_network == "10.10.0.0/16" + + def test_new_cluster_network_custom(self): + args = self.parser.parse_args('new --cluster-network 10.10.0.0/16 host1'.split()) + assert args.cluster_network == "10.10.0.0/16" + + def test_new_public_network_custom_bad(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('new --public-network 10.10.0.0'.split()) + out, err = capsys.readouterr() + assert "error: subnet must" in err + + def test_new_cluster_network_custom_bad(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('new --cluster-network 10.10.0.0'.split()) + out, err = capsys.readouterr() + assert "error: subnet must" in err + + def test_new_mon_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('new'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_new_one_mon(self): + hostnames = ['test1'] + args = self.parser.parse_args(['new'] + hostnames) + assert args.mon == hostnames + + def test_new_multiple_mons(self): + hostnames = ['test1', 'test2', 'test3'] + args = self.parser.parse_args(['new'] + hostnames) + assert frozenset(args.mon) == frozenset(hostnames) diff --git a/ceph_deploy/tests/parser/test_osd.py b/ceph_deploy/tests/parser/test_osd.py new file mode 100644 index 0000000..3b983c2 --- /dev/null +++ b/ceph_deploy/tests/parser/test_osd.py @@ -0,0 +1,101 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + +SUBCMDS_WITH_ARGS = ['list', 'create'] + + +class TestParserOSD(object): + + def setup(self): + self.parser = get_parser() + + def test_osd_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('osd --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy osd' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + @pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS) + def test_osd_valid_subcommands_with_args(self, cmd): + self.parser.parse_args(['osd'] + ['%s' % cmd] + ['host1']) + + def test_osd_invalid_subcommand(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('osd bork'.split()) + out, err = capsys.readouterr() + assert 'invalid choice' in err + + def test_osd_list_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('osd list --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy osd list' in out + + def test_osd_list_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('osd list'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_osd_list_single_host(self): + args = self.parser.parse_args('osd list host1'.split()) + assert args.host[0] == 'host1' + + def test_osd_list_multi_host(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args('osd list'.split() + hostnames) + assert args.host == hostnames + + def test_osd_create_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('osd create --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy osd create' in out + + def test_osd_create_single_host(self): + args = self.parser.parse_args('osd create host1 --data /dev/sdb'.split()) + assert args.host == 'host1' + assert args.data == '/dev/sdb' + + def test_osd_create_zap_default_false(self): + args = self.parser.parse_args('osd create host1 --data /dev/sdb'.split()) + assert args.zap_disk is False + + def test_osd_create_zap_true(self): + args = self.parser.parse_args('osd create --zap-disk host1 --data /dev/sdb'.split()) + assert args.zap_disk is True + + def test_osd_create_fstype_default_xfs(self): + args = self.parser.parse_args('osd create host1 --data /dev/sdb'.split()) + assert args.fs_type == "xfs" + + def test_osd_create_fstype_btrfs(self): + args = self.parser.parse_args('osd create --fs-type btrfs host1 --data /dev/sdb'.split()) + assert args.fs_type == "btrfs" + + def test_osd_create_fstype_invalid(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('osd create --fs-type bork host1 --data /dev/sdb'.split()) + out, err = capsys.readouterr() + assert 'invalid choice' in err + + def test_osd_create_dmcrypt_default_false(self): + args = self.parser.parse_args('osd create host1 --data /dev/sdb'.split()) + assert args.dmcrypt is False + + def test_osd_create_dmcrypt_true(self): + args = self.parser.parse_args('osd create --dmcrypt host1 --data /dev/sdb'.split()) + assert args.dmcrypt is True + + def test_osd_create_dmcrypt_key_dir_default(self): + args = self.parser.parse_args('osd create host1 --data /dev/sdb'.split()) + assert args.dmcrypt_key_dir == "/etc/ceph/dmcrypt-keys" + + def test_osd_create_dmcrypt_key_dir_custom(self): + args = self.parser.parse_args('osd create --dmcrypt --dmcrypt-key-dir /tmp/keys host1 --data /dev/sdb'.split()) + assert args.dmcrypt_key_dir == "/tmp/keys" + diff --git a/ceph_deploy/tests/parser/test_pkg.py b/ceph_deploy/tests/parser/test_pkg.py new file mode 100644 index 0000000..9061a68 --- /dev/null +++ b/ceph_deploy/tests/parser/test_pkg.py @@ -0,0 +1,66 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + + +class TestParserPkg(object): + + def setup(self): + self.parser = get_parser() + + def test_pkg_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('pkg --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy pkg' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_pkg_install_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('pkg --install pkg1'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_pkg_install_one_host(self): + args = self.parser.parse_args('pkg --install pkg1 host1'.split()) + assert args.hosts == ['host1'] + assert args.install == "pkg1" + + def test_pkg_install_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args('pkg --install pkg1'.split() + hostnames) + assert args.hosts == hostnames + assert args.install == "pkg1" + + def test_pkg_install_muliple_pkgs(self): + args = self.parser.parse_args('pkg --install pkg1,pkg2 host1'.split()) + assert args.install == "pkg1,pkg2" + + def test_pkg_remove_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('pkg --remove pkg1'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_pkg_remove_one_host(self): + args = self.parser.parse_args('pkg --remove pkg1 host1'.split()) + assert args.hosts == ['host1'] + assert args.remove == "pkg1" + + def test_pkg_remove_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args('pkg --remove pkg1'.split() + hostnames) + assert args.hosts == hostnames + assert args.remove == "pkg1" + + def test_pkg_remove_muliple_pkgs(self): + args = self.parser.parse_args('pkg --remove pkg1,pkg2 host1'.split()) + assert args.remove == "pkg1,pkg2" + + def test_pkg_install_remove_are_mutex(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('pkg --install pkg2 --remove pkg1 host1'.split()) + out, err = capsys.readouterr() + assert "argument --remove: not allowed with argument --install" in err diff --git a/ceph_deploy/tests/parser/test_purge.py b/ceph_deploy/tests/parser/test_purge.py new file mode 100644 index 0000000..8dcb348 --- /dev/null +++ b/ceph_deploy/tests/parser/test_purge.py @@ -0,0 +1,33 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + + +class TestParserPurge(object): + + def setup(self): + self.parser = get_parser() + + def test_purge_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('purge --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy purge' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_purge_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('purge'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_purge_one_host(self): + args = self.parser.parse_args('purge host1'.split()) + assert args.host == ['host1'] + + def test_purge_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args(['purge'] + hostnames) + assert frozenset(args.host) == frozenset(hostnames) diff --git a/ceph_deploy/tests/parser/test_purgedata.py b/ceph_deploy/tests/parser/test_purgedata.py new file mode 100644 index 0000000..dadef2b --- /dev/null +++ b/ceph_deploy/tests/parser/test_purgedata.py @@ -0,0 +1,33 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + + +class TestParserPurgeData(object): + + def setup(self): + self.parser = get_parser() + + def test_purgedata_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('purgedata --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy purgedata' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_purgedata_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('purgedata'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_purgedata_one_host(self): + args = self.parser.parse_args('purgedata host1'.split()) + assert args.host == ['host1'] + + def test_purgedata_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args(['purgedata'] + hostnames) + assert frozenset(args.host) == frozenset(hostnames) diff --git a/ceph_deploy/tests/parser/test_repo.py b/ceph_deploy/tests/parser/test_repo.py new file mode 100644 index 0000000..a84901e --- /dev/null +++ b/ceph_deploy/tests/parser/test_repo.py @@ -0,0 +1,71 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + + +class TestParserRepo(object): + + def setup(self): + self.parser = get_parser() + + def test_repo_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('repo --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy repo' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_repo_name_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('repo'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_repo_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('repo ceph'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_repo_one_host(self): + args = self.parser.parse_args('repo ceph host1'.split()) + assert args.host == ['host1'] + + def test_repo_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args(['repo', 'ceph'] + hostnames) + assert frozenset(args.host) == frozenset(hostnames) + + def test_repo_name(self): + args = self.parser.parse_args('repo ceph host1'.split()) + assert args.repo_name == 'ceph' + + def test_repo_remove_default_is_false(self): + args = self.parser.parse_args('repo ceph host1'.split()) + assert not args.remove + + def test_repo_remove_set_true(self): + args = self.parser.parse_args('repo ceph --remove host1'.split()) + assert args.remove + + def test_repo_remove_delete_alias(self): + args = self.parser.parse_args('repo ceph --delete host1'.split()) + assert args.remove + + def test_repo_url_default_is_none(self): + args = self.parser.parse_args('repo ceph host1'.split()) + assert args.repo_url is None + + def test_repo_url_custom_path(self): + args = self.parser.parse_args('repo ceph --repo-url https://ceph.com host1'.split()) + assert args.repo_url == "https://ceph.com" + + def test_repo_gpg_url_default_is_none(self): + args = self.parser.parse_args('repo ceph host1'.split()) + assert args.gpg_url is None + + def test_repo_gpg_url_custom_path(self): + args = self.parser.parse_args('repo ceph --gpg-url https://ceph.com/key host1'.split()) + assert args.gpg_url == "https://ceph.com/key" diff --git a/ceph_deploy/tests/parser/test_rgw.py b/ceph_deploy/tests/parser/test_rgw.py new file mode 100644 index 0000000..5cbf0a0 --- /dev/null +++ b/ceph_deploy/tests/parser/test_rgw.py @@ -0,0 +1,35 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + + +class TestParserRGW(object): + + def setup(self): + self.parser = get_parser() + + def test_rgw_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('rgw --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy rgw' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_rgw_create_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('rgw create'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_rgw_create_one_host(self): + args = self.parser.parse_args('rgw create host1'.split()) + assert args.rgw[0][0] == 'host1' + + def test_rgw_create_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args(['rgw', 'create'] + hostnames) + # args.rgw is a list of tuples, and tuple[0] is the hostname + hosts = [x[0] for x in args.rgw] + assert frozenset(hosts) == frozenset(hostnames) diff --git a/ceph_deploy/tests/parser/test_uninstall.py b/ceph_deploy/tests/parser/test_uninstall.py new file mode 100644 index 0000000..30cd918 --- /dev/null +++ b/ceph_deploy/tests/parser/test_uninstall.py @@ -0,0 +1,33 @@ +import pytest + +from ceph_deploy.cli import get_parser +from ceph_deploy.tests.util import assert_too_few_arguments + + +class TestParserUninstall(object): + + def setup(self): + self.parser = get_parser() + + def test_uninstall_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('uninstall --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy uninstall' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_uninstall_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('uninstall'.split()) + out, err = capsys.readouterr() + assert_too_few_arguments(err) + + def test_uninstall_one_host(self): + args = self.parser.parse_args('uninstall host1'.split()) + assert args.host == ['host1'] + + def test_uninstall_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args(['uninstall'] + hostnames) + assert frozenset(args.host) == frozenset(hostnames) diff --git a/ceph_deploy/tests/test_cli_admin.py b/ceph_deploy/tests/test_cli_admin.py new file mode 100644 index 0000000..06a95dc --- /dev/null +++ b/ceph_deploy/tests/test_cli_admin.py @@ -0,0 +1,60 @@ +import os +import subprocess + +import pytest +from mock import patch, MagicMock, Mock + +from ceph_deploy.cli import _main as main +from ceph_deploy.hosts import remotes +from ceph_deploy.tests.directory import directory + + +def test_bad_no_conf(tmpdir, cli): + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', 'admin', 'host1'], + stderr=subprocess.PIPE, + ) as p: + result = p.stderr.read().decode('utf-8') + assert 'No such file or directory: \'ceph.conf\'' in result + assert err.value.status == 1 + + +def test_bad_no_key(tmpdir, cli): + with tmpdir.join('ceph.conf').open('w'): + pass + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', 'admin', 'host1'], + stderr=subprocess.PIPE, + ) as p: + result = p.stderr.read().decode('utf-8') + assert 'ceph.client.admin.keyring not found' in result + assert err.value.status == 1 + + +def test_write_keyring(tmpdir): + with tmpdir.join('ceph.conf').open('w'): + pass + with tmpdir.join('ceph.client.admin.keyring').open('wb'): + pass + + etc_ceph = os.path.join(str(tmpdir), 'etc', 'ceph') + os.makedirs(etc_ceph) + + distro = MagicMock() + distro.conn = MagicMock() + remotes.write_file.__defaults__ = (0o644, str(tmpdir), -1, -1) + distro.conn.remote_module = remotes + distro.conn.remote_module.write_conf = Mock() + + with patch('ceph_deploy.admin.hosts'): + with patch('ceph_deploy.admin.hosts.get', MagicMock(return_value=distro)): + with directory(str(tmpdir)): + main(args=['admin', 'host1']) + + keyring_file = os.path.join(etc_ceph, 'ceph.client.admin.keyring') + assert os.path.exists(keyring_file) + + file_mode = oct(os.stat(keyring_file).st_mode & 0o777) + assert file_mode == oct(0o600) diff --git a/ceph_deploy/tests/test_cli_mon.py b/ceph_deploy/tests/test_cli_mon.py new file mode 100644 index 0000000..9948681 --- /dev/null +++ b/ceph_deploy/tests/test_cli_mon.py @@ -0,0 +1,56 @@ +import subprocess + +import pytest +from mock import Mock, patch + +from ceph_deploy.cli import _main as main +from ceph_deploy.tests.directory import directory +from ceph_deploy.tests.util import assert_too_few_arguments + + +#TODO: This test does check that things fail if the .conf file is missing +def test_bad_no_conf(tmpdir, cli): + with pytest.raises(cli.Failed) as err: + with cli( + args=['ceph-deploy', 'mon'], + stderr=subprocess.PIPE, + ) as p: + result = p.stderr.read().decode('utf-8') + assert 'usage: ceph-deploy' in result + assert_too_few_arguments(result) + assert err.value.status == 2 + + +def make_fake_connection(platform_information=None): + get_connection = Mock() + get_connection.return_value = get_connection + get_connection.remote_module.platform_information = Mock( + return_value=platform_information) + return get_connection + + +def test_new(tmpdir, capsys): + with tmpdir.join('ceph.conf').open('w') as f: + f.write("""\ +[global] +fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0 +mon initial members = host1 +""") + + fake_ip_addresses = lambda x: ['10.0.0.1'] + try: + with patch('ceph_deploy.new.net.ip_addresses', fake_ip_addresses): + with patch('ceph_deploy.new.net.get_nonlocal_ip', lambda x: '10.0.0.1'): + with patch('ceph_deploy.new.arg_validators.Hostname', lambda: lambda x: x): + with patch('ceph_deploy.new.hosts'): + with directory(str(tmpdir)): + main(['-v', 'new', '--no-ssh-copykey', 'host1']) + except SystemExit as e: + raise AssertionError('Unexpected exit: %s', e) + out, err = capsys.readouterr() + err = err.lower() + assert 'creating new cluster named ceph' in err + assert 'monitor host1 at 10.0.0.1' in err + assert 'resolving host host1' in err + assert "monitor initial members are ['host1']" in err + assert "monitor addrs are ['10.0.0.1']" in err diff --git a/ceph_deploy/tests/test_cli_new.py b/ceph_deploy/tests/test_cli_new.py new file mode 100644 index 0000000..056bc78 --- /dev/null +++ b/ceph_deploy/tests/test_cli_new.py @@ -0,0 +1,71 @@ +import re +import uuid + +from mock import patch + +from ceph_deploy import conf +from ceph_deploy.cli import _main as main +from ceph_deploy.tests.directory import directory +import pytest + + +def test_write_global_conf_section(tmpdir): + fake_ip_addresses = lambda x: ['10.0.0.1'] + + with patch('ceph_deploy.new.hosts'): + with patch('ceph_deploy.new.net.ip_addresses', fake_ip_addresses): + with patch('ceph_deploy.new.net.get_nonlocal_ip', lambda x: '10.0.0.1'): + with patch('ceph_deploy.new.arg_validators.Hostname', lambda: lambda x: x): + with directory(str(tmpdir)): + main(args=['new', 'host1']) + with tmpdir.join('ceph.conf').open() as f: + cfg = conf.ceph.parse(f) + assert cfg.sections() == ['global'] + + +@pytest.fixture +def newcfg(request, tmpdir): + fake_ip_addresses = lambda x: ['10.0.0.1'] + + def new(*args): + with patch('ceph_deploy.new.net.ip_addresses', fake_ip_addresses): + with patch('ceph_deploy.new.hosts'): + with patch('ceph_deploy.new.net.get_nonlocal_ip', lambda x: '10.0.0.1'): + with patch('ceph_deploy.new.arg_validators.Hostname', lambda: lambda x: x): + with directory(str(tmpdir)): + main(args=['new'] + list(args)) + with tmpdir.join('ceph.conf').open() as f: + cfg = conf.ceph.parse(f) + return cfg + return new + + +def test_uuid(newcfg): + cfg = newcfg('host1') + fsid = cfg.get('global', 'fsid') + # make sure it's a valid uuid + uuid.UUID(hex=fsid) + # make sure it looks pretty, too + UUID_RE = re.compile( + r'^[0-9a-f]{8}-' + + r'[0-9a-f]{4}-' + # constant 4 here, we want to enforce randomness and not leak + # MACs or time + + r'4[0-9a-f]{3}-' + + r'[0-9a-f]{4}-' + + r'[0-9a-f]{12}$', + ) + assert UUID_RE.match(fsid) + + +def test_mons(newcfg): + cfg = newcfg('node01', 'node07', 'node34') + mon_initial_members = cfg.get('global', 'mon_initial_members') + assert mon_initial_members == 'node01, node07, node34' + + +def test_defaults(newcfg): + cfg = newcfg('host1') + assert cfg.get('global', 'auth cluster required') == 'cephx' + assert cfg.get('global', 'auth service required') == 'cephx' + assert cfg.get('global', 'auth client required') == 'cephx' diff --git a/ceph_deploy/tests/test_cli_rgw.py b/ceph_deploy/tests/test_cli_rgw.py new file mode 100644 index 0000000..b0b86c4 --- /dev/null +++ b/ceph_deploy/tests/test_cli_rgw.py @@ -0,0 +1,11 @@ +import ceph_deploy.rgw as rgw + + +def test_rgw_prefix_auto(): + daemon = rgw.colon_separated("hostname") + assert daemon == ("hostname", "rgw.hostname") + + +def test_rgw_prefix_custom(): + daemon = rgw.colon_separated("hostname:mydaemon") + assert daemon == ("hostname", "rgw.mydaemon") diff --git a/ceph_deploy/tests/test_conf.py b/ceph_deploy/tests/test_conf.py new file mode 100644 index 0000000..8d435df --- /dev/null +++ b/ceph_deploy/tests/test_conf.py @@ -0,0 +1,86 @@ +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO +from ceph_deploy import conf + + +def test_simple(): + f = StringIO("""\ +[foo] +bar = baz +""") + cfg = conf.ceph.parse(f) + assert cfg.get('foo', 'bar') == 'baz' + + +def test_indent_space(): + f = StringIO("""\ +[foo] + bar = baz +""") + cfg = conf.ceph.parse(f) + assert cfg.get('foo', 'bar') == 'baz' + + +def test_indent_tab(): + f = StringIO("""\ +[foo] +\tbar = baz +""") + cfg = conf.ceph.parse(f) + assert cfg.get('foo', 'bar') == 'baz' + + +def test_words_underscore(): + f = StringIO("""\ +[foo] +bar_thud = baz +""") + cfg = conf.ceph.parse(f) + assert cfg.get('foo', 'bar_thud') == 'baz' + assert cfg.get('foo', 'bar thud') == 'baz' + + +def test_words_space(): + f = StringIO("""\ +[foo] +bar thud = baz +""") + cfg = conf.ceph.parse(f) + assert cfg.get('foo', 'bar_thud') == 'baz' + assert cfg.get('foo', 'bar thud') == 'baz' + + +def test_words_many(): + f = StringIO("""\ +[foo] +bar__ thud quux = baz +""") + cfg = conf.ceph.parse(f) + assert cfg.get('foo', 'bar_thud_quux') == 'baz' + assert cfg.get('foo', 'bar thud quux') == 'baz' + + +def test_write_words_underscore(): + cfg = conf.ceph.CephConf() + cfg.add_section('foo') + cfg.set('foo', 'bar thud quux', 'baz') + f = StringIO() + cfg.write(f) + f.seek(0) + assert f.readlines() == ['[foo]\n', 'bar_thud_quux = baz\n','\n'] + + +def test_section_repeat(): + f = StringIO("""\ +[foo] +bar = bez +thud = quux + +[foo] +bar = baz +""") + cfg = conf.ceph.parse(f) + assert cfg.get('foo', 'bar') == 'baz' + assert cfg.get('foo', 'thud') == 'quux' diff --git a/ceph_deploy/tests/test_gather_keys.py b/ceph_deploy/tests/test_gather_keys.py new file mode 100644 index 0000000..37f5b1c --- /dev/null +++ b/ceph_deploy/tests/test_gather_keys.py @@ -0,0 +1,141 @@ +from ceph_deploy import gatherkeys +from ceph_deploy import new +import mock +import pytest +import tempfile +import os +import shutil + + +def get_key_static(keytype, key_path): + with open(key_path, 'w') as f: + f.write("[%s]\n" % (gatherkeys.keytype_identity(keytype))) + f.write("key=fred\n") + + +def get_key_dynamic(keytype, key_path): + with open(key_path, 'w', 0o600) as f: + f.write("[%s]\n" % (gatherkeys.keytype_identity(keytype))) + f.write("key='%s'" % (new.generate_auth_key())) + + +def mock_time_strftime(time_format): + return "20160412144231" + + +def mock_get_keys_fail(args, host, dest_dir): + return False + + +def mock_get_keys_sucess_static(args, host, dest_dir): + for keytype in ["admin", "mon", "osd", "mds", "mgr", "rgw"]: + keypath = gatherkeys.keytype_path_to(args, keytype) + path = "%s/%s" % (dest_dir, keypath) + get_key_static(keytype, path) + return True + + +def mock_get_keys_sucess_dynamic(args, host, dest_dir): + for keytype in ["admin", "mon", "osd", "mds", "mgr", "rgw"]: + keypath = gatherkeys.keytype_path_to(args, keytype) + path = "%s/%s" % (dest_dir, keypath) + get_key_dynamic(keytype, path) + return True + + +class TestGatherKeys(object): + """ + Since we are testing things that effect the content of the current working + directory we should test in a clean empty directory. + """ + def setup(self): + """ + Make temp directory for tests and set as current working directory + """ + self.orginaldir = os.getcwd() + self.test_dir = tempfile.mkdtemp() + os.chdir(self.test_dir) + + + def teardown(self): + """ + Set current working directory to old value + Remove temp directory and content + """ + os.chdir(self.orginaldir) + shutil.rmtree(self.test_dir) + + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_with_mon', mock_get_keys_fail) + def test_gatherkeys_fail(self): + """ + Test 'gatherkeys' fails when connecting to mon fails. + """ + args = mock.Mock() + args.cluster = "ceph" + args.mon = ['host1'] + with pytest.raises(RuntimeError): + gatherkeys.gatherkeys(args) + + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_with_mon', mock_get_keys_sucess_static) + def test_gatherkeys_success(self): + """ + Test 'gatherkeys' succeeds when getinig keys that are always the same. + Test 'gatherkeys' does not backup identical keys + """ + args = mock.Mock() + args.cluster = "ceph" + args.mon = ['host1'] + gatherkeys.gatherkeys(args) + dir_content = os.listdir(self.test_dir) + assert "ceph.client.admin.keyring" in dir_content + assert "ceph.bootstrap-mds.keyring" in dir_content + assert "ceph.bootstrap-mgr.keyring" in dir_content + assert "ceph.mon.keyring" in dir_content + assert "ceph.bootstrap-osd.keyring" in dir_content + assert "ceph.bootstrap-rgw.keyring" in dir_content + assert len(dir_content) == 6 + # Now we repeat as no new keys are generated + gatherkeys.gatherkeys(args) + dir_content = os.listdir(self.test_dir) + assert len(dir_content) == 6 + + + @mock.patch('ceph_deploy.gatherkeys.time.strftime', mock_time_strftime) + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_with_mon', mock_get_keys_sucess_dynamic) + def test_gatherkeys_backs_up(self): + """ + Test 'gatherkeys' succeeds when getting keys that are always different. + Test 'gatherkeys' does backup keys that are not identical. + """ + args = mock.Mock() + args.cluster = "ceph" + args.mon = ['host1'] + gatherkeys.gatherkeys(args) + dir_content = os.listdir(self.test_dir) + assert "ceph.client.admin.keyring" in dir_content + assert "ceph.bootstrap-mds.keyring" in dir_content + assert "ceph.bootstrap-mgr.keyring" in dir_content + assert "ceph.mon.keyring" in dir_content + assert "ceph.bootstrap-osd.keyring" in dir_content + assert "ceph.bootstrap-rgw.keyring" in dir_content + assert len(dir_content) == 6 + # Now we repeat as new keys are generated and old + # are backed up + gatherkeys.gatherkeys(args) + dir_content = os.listdir(self.test_dir) + mocked_time = mock_time_strftime(None) + assert "ceph.client.admin.keyring" in dir_content + assert "ceph.bootstrap-mds.keyring" in dir_content + assert "ceph.bootstrap-mgr.keyring" in dir_content + assert "ceph.mon.keyring" in dir_content + assert "ceph.bootstrap-osd.keyring" in dir_content + assert "ceph.bootstrap-rgw.keyring" in dir_content + assert "ceph.client.admin.keyring-%s" % (mocked_time) in dir_content + assert "ceph.bootstrap-mds.keyring-%s" % (mocked_time) in dir_content + assert "ceph.bootstrap-mgr.keyring-%s" % (mocked_time) in dir_content + assert "ceph.mon.keyring-%s" % (mocked_time) in dir_content + assert "ceph.bootstrap-osd.keyring-%s" % (mocked_time) in dir_content + assert "ceph.bootstrap-rgw.keyring-%s" % (mocked_time) in dir_content + assert len(dir_content) == 12 diff --git a/ceph_deploy/tests/test_gather_keys_missing.py b/ceph_deploy/tests/test_gather_keys_missing.py new file mode 100644 index 0000000..0369aa8 --- /dev/null +++ b/ceph_deploy/tests/test_gather_keys_missing.py @@ -0,0 +1,179 @@ +from ceph_deploy import gatherkeys +from ceph_deploy import new +import mock +import tempfile +import shutil +import os +import pytest + + +class mock_conn(object): + def __init__(self): + pass + +class mock_distro(object): + def __init__(self): + self.conn = mock_conn() + +class mock_rlogger(object): + def error(self, *arg): + return + + def debug(self, *arg): + return + + +def mock_remoto_process_check_success(conn, args): + secret = new.generate_auth_key() + out = '[mon.]\nkey = %s\ncaps mon = allow *\n' % secret + return out.encode('utf-8').split(b'\n'), [], 0 + + +def mock_remoto_process_check_rc_error(conn, args): + return [b""], [b"this failed\n"], 1 + + +class TestGatherKeysMissing(object): + """ + Since we are testing things that effect the content a directory we should + test in a clean empty directory. + """ + + def setup(self): + """ + Make temp directory for tests. + """ + self.args = mock.Mock() + self.distro = mock_distro() + self.test_dir = tempfile.mkdtemp() + self.rlogger = mock_rlogger() + self.keypath_remote = "some_path" + + def teardown(self): + """ + Remove temp directory and content + """ + shutil.rmtree(self.test_dir) + + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) + def test_success_admin(self): + keytype = 'admin' + rc = gatherkeys.gatherkeys_missing( + self.args, + self.distro, + self.rlogger, + self.keypath_remote, + keytype, + self.test_dir + ) + assert rc is True + keyname = gatherkeys.keytype_path_to(self.args, keytype) + keypath_gen = os.path.join(self.test_dir, keyname) + assert os.path.isfile(keypath_gen) + + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) + def test_success_mds(self): + keytype = 'mds' + rc = gatherkeys.gatherkeys_missing( + self.args, + self.distro, + self.rlogger, + self.keypath_remote, + keytype, + self.test_dir + ) + assert rc is True + keyname = gatherkeys.keytype_path_to(self.args, keytype) + keypath_gen = os.path.join(self.test_dir, keyname) + assert os.path.isfile(keypath_gen) + + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) + def test_success_mgr(self): + keytype = 'mgr' + rc = gatherkeys.gatherkeys_missing( + self.args, + self.distro, + self.rlogger, + self.keypath_remote, + keytype, + self.test_dir + ) + assert rc is True + keyname = gatherkeys.keytype_path_to(self.args, keytype) + keypath_gen = os.path.join(self.test_dir, keyname) + assert os.path.isfile(keypath_gen) + + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) + def test_success_osd(self): + keytype = 'osd' + rc = gatherkeys.gatherkeys_missing( + self.args, + self.distro, + self.rlogger, + self.keypath_remote, + keytype, + self.test_dir + ) + assert rc is True + keyname = gatherkeys.keytype_path_to(self.args, keytype) + keypath_gen = os.path.join(self.test_dir, keyname) + assert os.path.isfile(keypath_gen) + + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) + def test_success_rgw(self): + keytype = 'rgw' + rc = gatherkeys.gatherkeys_missing( + self.args, + self.distro, + self.rlogger, + self.keypath_remote, + keytype, + self.test_dir + ) + assert rc is True + keyname = gatherkeys.keytype_path_to(self.args, keytype) + keypath_gen = os.path.join(self.test_dir, keyname) + assert os.path.isfile(keypath_gen) + + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_rc_error) + def test_remoto_process_check_rc_error(self): + keytype = 'admin' + rc = gatherkeys.gatherkeys_missing( + self.args, + self.distro, + self.rlogger, + self.keypath_remote, + keytype, + self.test_dir + ) + assert rc is False + keyname = gatherkeys.keytype_path_to(self.args, keytype) + keypath_gen = os.path.join(self.test_dir, keyname) + assert not os.path.isfile(keypath_gen) + + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) + def test_fail_identity_missing(self): + keytype = 'silly' + with pytest.raises(RuntimeError): + gatherkeys.gatherkeys_missing( + self.args, + self.distro, + self.rlogger, + self.keypath_remote, + keytype, + self.test_dir + ) + + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) + def test_fail_capabilities_missing(self): + keytype = 'mon' + with pytest.raises(RuntimeError): + gatherkeys.gatherkeys_missing( + self.args, + self.distro, + self.rlogger, + self.keypath_remote, + keytype, + self.test_dir + ) + diff --git a/ceph_deploy/tests/test_gather_keys_with_mon.py b/ceph_deploy/tests/test_gather_keys_with_mon.py new file mode 100644 index 0000000..805cc17 --- /dev/null +++ b/ceph_deploy/tests/test_gather_keys_with_mon.py @@ -0,0 +1,219 @@ +from ceph_deploy import gatherkeys +from ceph_deploy import new +import mock +import json +import copy + + +remoto_process_check_success_output = { + "name": "ceph-node1", + "rank": 0, + "state": "leader", + "election_epoch": 6, + "quorum": [ + 0, + 1, + 2 + ], + "outside_quorum": [], + "extra_probe_peers": [ + "192.168.99.125:6789\/0", + "192.168.99.126:6789\/0" + ], + "sync_provider": [], + "monmap": { + "epoch": 1, + "fsid": "4dbee7eb-929b-4f3f-ad23-8a4e47235e40", + "modified": "2016-04-11 05:35:21.665220", + "created": "2016-04-11 05:35:21.665220", + "mons": [ + { + "rank": 0, + "name": "host0", + "addr": "192.168.99.124:6789\/0" + }, + { + "rank": 1, + "name": "host1", + "addr": "192.168.99.125:6789\/0" + }, + { + "rank": 2, + "name": "host2", + "addr": "192.168.99.126:6789\/0" + } + ] + } + } + + +class mock_remote_module(object): + def get_file(self, path): + return self.get_file_result + + def shortname(self): + hostname_split = self.longhostname.split('.') + return hostname_split[0] + +class mock_conn(object): + def __init__(self): + self.remote_module = mock_remote_module() + + +class mock_distro(object): + def __init__(self): + self.conn = mock_conn() + + +def mock_hosts_get_file_key_content(host, **kwargs): + output = mock_distro() + mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % new.generate_auth_key() + output.conn.remote_module.get_file_result = mon_keyring.encode('utf-8') + output.conn.remote_module.longhostname = host + return output + + +def mock_hosts_get_file_key_content_none(host, **kwargs): + output = mock_distro() + output.conn.remote_module.get_file_result = None + output.conn.remote_module.longhostname = host + return output + + +def mock_gatherkeys_missing_success(args, distro, rlogger, path_keytype_mon, keytype, dest_dir): + return True + + +def mock_gatherkeys_missing_fail(args, distro, rlogger, path_keytype_mon, keytype, dest_dir): + return False + + +def mock_remoto_process_check_success(conn, args): + out = json.dumps(remoto_process_check_success_output,sort_keys=True, indent=4) + return out.encode('utf-8').split(b'\n'), [], 0 + + +def mock_remoto_process_check_rc_error(conn, args): + return [b""], [b"this failed\n"], 1 + + +def mock_remoto_process_check_out_not_json(conn, args): + return [b"}bad output{"], [b""], 0 + + +def mock_remoto_process_check_out_missing_quorum(conn, args): + outdata = copy.deepcopy(remoto_process_check_success_output) + del outdata["quorum"] + out = json.dumps(outdata,sort_keys=True, indent=4) + return out.encode('utf-8').split(b'\n'), [], 0 + + +def mock_remoto_process_check_out_missing_quorum_1(conn, args): + outdata = copy.deepcopy(remoto_process_check_success_output) + del outdata["quorum"][1] + out = json.dumps(outdata,sort_keys=True, indent=4) + return out.encode('utf-8').split(b'\n'), [], 0 + + +def mock_remoto_process_check_out_missing_monmap(conn, args): + outdata = copy.deepcopy(remoto_process_check_success_output) + del outdata["monmap"] + out = json.dumps(outdata,sort_keys=True, indent=4) + return out.encode('utf-8').split(b'\n'), [], 0 + + +def mock_remoto_process_check_out_missing_mons(conn, args): + outdata = copy.deepcopy(remoto_process_check_success_output) + del outdata["monmap"]["mons"] + out = json.dumps(outdata,sort_keys=True, indent=4) + return out.encode('utf-8').split(b'\n'), [], 0 + + +def mock_remoto_process_check_out_missing_monmap_host1(conn, args): + outdata = copy.deepcopy(remoto_process_check_success_output) + del outdata["monmap"]["mons"][1] + out = json.dumps(outdata,sort_keys=True, indent=4) + return out.encode('utf-8').split(b'\n'), [], 0 + + +class TestGatherKeysWithMon(object): + """ + Test gatherkeys_with_mon function + """ + def setup(self): + self.args = mock.Mock() + self.args.cluster = "ceph" + self.args.mon = ['host1'] + self.host = 'host1' + self.test_dir = '/tmp' + + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) + @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) + def test_success(self): + rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) + assert rc is True + + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) + @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content_none) + def test_monkey_none(self): + rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) + assert rc is False + + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_fail) + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_success) + @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) + def test_missing_fail(self): + rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) + assert rc is False + + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_rc_error) + @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) + def test_remoto_process_check_rc_error(self): + rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) + assert rc is False + + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_out_not_json) + @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) + def test_remoto_process_check_out_not_json(self): + rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) + assert rc is False + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_out_missing_quorum) + @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) + def test_remoto_process_check_out_missing_quorum(self): + rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) + assert rc is False + + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_out_missing_quorum_1) + @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) + def test_remoto_process_check_out_missing_quorum_1(self): + rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) + assert rc is False + + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_out_missing_mons) + @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) + def test_remoto_process_check_out_missing_mon(self): + rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) + assert rc is False + + + @mock.patch('ceph_deploy.gatherkeys.gatherkeys_missing', mock_gatherkeys_missing_success) + @mock.patch('ceph_deploy.lib.remoto.process.check', mock_remoto_process_check_out_missing_monmap_host1) + @mock.patch('ceph_deploy.hosts.get', mock_hosts_get_file_key_content) + def test_remoto_process_check_out_missing_monmap_host1(self): + rc = gatherkeys.gatherkeys_with_mon(self.args, self.host, self.test_dir) + assert rc is False diff --git a/ceph_deploy/tests/test_install.py b/ceph_deploy/tests/test_install.py new file mode 100644 index 0000000..4a47f9d --- /dev/null +++ b/ceph_deploy/tests/test_install.py @@ -0,0 +1,149 @@ +from mock import Mock + +from ceph_deploy import install + + +class TestSanitizeArgs(object): + + def setup(self): + self.args = Mock() + # set the default behavior we set in cli.py + self.args.default_release = False + self.args.stable = None + + def test_args_release_not_specified(self): + self.args.release = None + result = install.sanitize_args(self.args) + # XXX + # we should get `args.release` to be the latest release + # but we don't want to be updating this test every single + # time there is a new default value, and we can't programatically + # change that. Future improvement: make the default release a + # variable in `ceph_deploy/__init__.py` + assert result.default_release is True + + def test_args_release_is_specified(self): + self.args.release = 'dumpling' + result = install.sanitize_args(self.args) + assert result.default_release is False + + def test_args_release_stable_is_used(self): + self.args.stable = 'dumpling' + result = install.sanitize_args(self.args) + assert result.release == 'dumpling' + + def test_args_stable_is_not_used(self): + self.args.release = 'dumpling' + result = install.sanitize_args(self.args) + assert result.stable is None + + +class TestDetectComponents(object): + + def setup(self): + self.args = Mock() + # default values for install_* flags + self.args.install_all = False + self.args.install_mds = False + self.args.install_mgr = False + self.args.install_mon = False + self.args.install_osd = False + self.args.install_rgw = False + self.args.install_tests = False + self.args.install_common = False + self.args.repo = False + self.distro = Mock() + + def test_install_with_repo_option_returns_no_packages(self): + self.args.repo = True + result = install.detect_components(self.args, self.distro) + assert result == [] + + def test_install_all_returns_all_packages_deb(self): + self.args.install_all = True + self.distro.is_rpm = False + self.distro.is_deb = True + self.distro.is_pkgtarxz = False + result = sorted(install.detect_components(self.args, self.distro)) + assert result == sorted([ + 'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'radosgw' + ]) + + def test_install_all_with_other_options_returns_all_packages_deb(self): + self.distro.is_rpm = False + self.distro.is_deb = True + self.distro.is_pkgtarxz = False + self.args.install_all = True + self.args.install_mds = True + self.args.install_mgr = True + self.args.install_mon = True + self.args.install_osd = True + result = sorted(install.detect_components(self.args, self.distro)) + assert result == sorted([ + 'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'radosgw' + ]) + + def test_install_all_returns_all_packages_rpm(self): + self.args.install_all = True + result = sorted(install.detect_components(self.args, self.distro)) + assert result == sorted([ + 'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw' + ]) + + def test_install_all_with_other_options_returns_all_packages_rpm(self): + self.args.install_all = True + self.args.install_mds = True + self.args.install_mon = True + self.args.install_mgr = True + self.args.install_osd = True + result = sorted(install.detect_components(self.args, self.distro)) + assert result == sorted([ + 'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw' + ]) + + def test_install_all_returns_all_packages_pkgtarxz(self): + self.args.install_all = True + self.distro.is_rpm = False + self.distro.is_deb = False + self.distro.is_pkgtarxz = True + result = sorted(install.detect_components(self.args, self.distro)) + assert result == sorted([ + 'ceph', + ]) + + def test_install_all_with_other_options_returns_all_packages_pkgtarxz(self): + self.distro.is_rpm = False + self.distro.is_deb = False + self.distro.is_pkgtarxz = True + self.args.install_all = True + self.args.install_mds = True + self.args.install_mgr = True + self.args.install_mon = True + self.args.install_osd = True + result = sorted(install.detect_components(self.args, self.distro)) + assert result == sorted([ + 'ceph', + ]) + + def test_install_only_one_component(self): + self.args.install_osd = True + result = install.detect_components(self.args, self.distro) + assert result == ['ceph-osd'] + + def test_install_a_couple_of_components(self): + self.args.install_osd = True + self.args.install_mds = True + self.args.install_mgr = True + result = sorted(install.detect_components(self.args, self.distro)) + assert result == sorted(['ceph-osd', 'ceph-mds', 'ceph-mgr']) + + def test_install_tests(self): + self.args.install_tests = True + result = sorted(install.detect_components(self.args, self.distro)) + assert result == sorted(['ceph-test']) + + def test_install_all_should_be_default_when_no_options_passed(self): + result = sorted(install.detect_components(self.args, self.distro)) + assert result == sorted([ + 'ceph-osd', 'ceph-mds', 'ceph', 'ceph-mon', 'ceph-radosgw' + ]) diff --git a/ceph_deploy/tests/test_keys_equivalent.py b/ceph_deploy/tests/test_keys_equivalent.py new file mode 100644 index 0000000..c748baf --- /dev/null +++ b/ceph_deploy/tests/test_keys_equivalent.py @@ -0,0 +1,171 @@ +from ceph_deploy import gatherkeys +from ceph_deploy import new +import tempfile +import shutil +import pytest + + +def write_key_mon_with_caps(path, secret): + mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % secret + with open(path, 'w', 0o600) as f: + f.write(mon_keyring) + + +def write_key_mon_with_caps_with_tab(path, secret): + mon_keyring = '[mon.]\n\tkey = %s\n\tcaps mon = allow *\n' % secret + with open(path, 'w', 0o600) as f: + f.write(mon_keyring) + + +def write_key_mon_with_caps_with_tab_quote(path, secret): + mon_keyring = '[mon.]\n\tkey = %s\n\tcaps mon = "allow *"\n' % secret + with open(path, 'w', 0o600) as f: + f.write(mon_keyring) + + +def write_key_mon_without_caps(path, secret): + mon_keyring = '[mon.]\nkey = %s\n' % secret + with open(path, 'w', 0o600) as f: + f.write(mon_keyring) + + +class TestKeysEquivalent(object): + """ + Since we are testing things that effect the content of the current working + directory we should test in a clean empty directory. + """ + def setup(self): + """ + Make temp directory for tests. + """ + self.test_dir = tempfile.mkdtemp() + + + def teardown(self): + """ + Remove temp directory and content + """ + shutil.rmtree(self.test_dir) + + + def test_identical_with_caps(self): + secret_01 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_with_caps(key_path_01, secret_01) + write_key_mon_with_caps(key_path_02, secret_01) + same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) + assert same is True + + + def test_different_with_caps(self): + secret_01 = new.generate_auth_key() + secret_02 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_with_caps(key_path_01, secret_01) + write_key_mon_with_caps(key_path_02, secret_02) + same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) + assert same is False + + + def test_identical_without_caps(self): + secret_01 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_without_caps(key_path_01, secret_01) + write_key_mon_without_caps(key_path_02, secret_01) + same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) + assert same is True + + + def test_different_without_caps(self): + secret_01 = new.generate_auth_key() + secret_02 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_without_caps(key_path_01, secret_01) + write_key_mon_without_caps(key_path_02, secret_02) + same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) + assert same is False + + + def test_identical_mixed_caps(self): + secret_01 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_with_caps(key_path_01, secret_01) + write_key_mon_without_caps(key_path_02, secret_01) + same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) + assert same is True + + + def test_different_mixed_caps(self): + secret_01 = new.generate_auth_key() + secret_02 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_with_caps(key_path_01, secret_01) + write_key_mon_without_caps(key_path_02, secret_02) + same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) + assert same is False + + + def test_identical_caps_mixed_tabs(self): + secret_01 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_with_caps(key_path_01, secret_01) + write_key_mon_with_caps_with_tab(key_path_02, secret_01) + same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) + assert same is True + + + def test_different_caps_mixed_tabs(self): + secret_01 = new.generate_auth_key() + secret_02 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_with_caps(key_path_01, secret_01) + write_key_mon_with_caps_with_tab(key_path_02, secret_02) + same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) + assert same is False + + + def test_identical_caps_mixed_quote(self): + secret_01 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_with_caps_with_tab(key_path_01, secret_01) + write_key_mon_with_caps_with_tab_quote(key_path_02, secret_01) + same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) + assert same is True + + + def test_different_caps_mixed_quote(self): + secret_01 = new.generate_auth_key() + secret_02 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_with_caps_with_tab(key_path_01, secret_01) + write_key_mon_with_caps_with_tab_quote(key_path_02, secret_02) + same = gatherkeys._keyring_equivalent(key_path_01, key_path_02) + assert same is False + + + def test_missing_key_1(self): + secret_02 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_with_caps_with_tab_quote(key_path_02, secret_02) + with pytest.raises(IOError): + gatherkeys._keyring_equivalent(key_path_01, key_path_02) + + + def test_missing_key_2(self): + secret_01 = new.generate_auth_key() + key_path_01 = self.test_dir + "/01.keyring" + key_path_02 = self.test_dir + "/02.keyring" + write_key_mon_with_caps_with_tab_quote(key_path_01, secret_01) + with pytest.raises(IOError): + gatherkeys._keyring_equivalent(key_path_01, key_path_02) diff --git a/ceph_deploy/tests/test_mon.py b/ceph_deploy/tests/test_mon.py new file mode 100644 index 0000000..585aa41 --- /dev/null +++ b/ceph_deploy/tests/test_mon.py @@ -0,0 +1,93 @@ +from ceph_deploy import exc, mon +from ceph_deploy.conf.ceph import CephConf +from mock import Mock +import pytest + + +def make_fake_conf(): + return CephConf() + +# NOTE: If at some point we re-use this helper, move it out +# and make it even more generic + +def make_fake_conn(receive_returns=None): + receive_returns = receive_returns or ([b'{}'], [], 0) + conn = Mock() + conn.return_value = conn + conn.execute = conn + conn.receive = Mock(return_value=receive_returns) + conn.gateway.remote_exec = conn.receive + conn.result = Mock(return_value=conn) + conn.cmd = lambda x: x + return conn + + +class TestGetMonInitialMembers(object): + + def test_assert_if_mon_none_and_empty_True(self): + cfg = make_fake_conf() + with pytest.raises(exc.NeedHostError): + mon.get_mon_initial_members(Mock(), True, cfg) + + def test_return_if_mon_none_and_empty_false(self): + cfg = make_fake_conf() + mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg) + assert mon_initial_members is None + + def test_single_item_if_mon_not_none(self): + cfg = make_fake_conf() + cfg.add_section('global') + cfg.set('global', 'mon initial members', 'AAAA') + mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg) + assert set(mon_initial_members) == set(['AAAA']) + + def test_multiple_item_if_mon_not_none(self): + cfg = make_fake_conf() + cfg.add_section('global') + cfg.set('global', 'mon initial members', 'AAAA, BBBB') + mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg) + assert set(mon_initial_members) == set(['AAAA', 'BBBB']) + + +class TestCatchCommonErrors(object): + + def setup(self): + self.logger = Mock() + + def assert_logger_message(self, logger, msg): + calls = logger.call_args_list + for log_call in calls: + if msg in log_call[0][0]: + return True + raise AssertionError('"%s" was not found in any of %s' % (msg, calls)) + + def test_warn_if_no_intial_members(self): + fake_conn = make_fake_conn() + cfg = make_fake_conf() + mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock()) + expected_msg = 'is not defined in `mon initial members`' + self.assert_logger_message(self.logger.warning, expected_msg) + + def test_warn_if_host_not_in_intial_members(self): + fake_conn = make_fake_conn() + cfg = make_fake_conf() + cfg.add_section('global') + cfg.set('global', 'mon initial members', 'AAAA') + mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock()) + expected_msg = 'is not defined in `mon initial members`' + self.assert_logger_message(self.logger.warning, expected_msg) + + def test_warn_if_not_mon_in_monmap(self): + fake_conn = make_fake_conn() + cfg = make_fake_conf() + mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock()) + expected_msg = 'does not exist in monmap' + self.assert_logger_message(self.logger.warning, expected_msg) + + def test_warn_if_not_public_addr_and_not_public_netw(self): + fake_conn = make_fake_conn() + cfg = make_fake_conf() + cfg.add_section('global') + mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg, Mock()) + expected_msg = 'neither `public_addr` nor `public_network`' + self.assert_logger_message(self.logger.warning, expected_msg) diff --git a/ceph_deploy/tests/test_remotes.py b/ceph_deploy/tests/test_remotes.py new file mode 100644 index 0000000..4c7ac03 --- /dev/null +++ b/ceph_deploy/tests/test_remotes.py @@ -0,0 +1,255 @@ +from mock import patch +from ceph_deploy.hosts import remotes +from ceph_deploy.hosts.remotes import platform_information, parse_os_release + +class FakeExists(object): + + def __init__(self, existing_paths): + self.existing_paths = existing_paths + + def __call__(self, path): + for existing_path in self.existing_paths: + if path == existing_path: + return path + + +class TestWhich(object): + + def setup(self): + self.exists_module = 'ceph_deploy.hosts.remotes.os.path.exists' + + def test_finds_absolute_paths(self): + exists = FakeExists(['/bin/ls']) + with patch(self.exists_module, exists): + path = remotes.which('ls') + assert path == '/bin/ls' + + def test_does_not_find_executable(self): + exists = FakeExists(['/bin/foo']) + with patch(self.exists_module, exists): + path = remotes.which('ls') + assert path is None + +class TestPlatformInformation(object): + """ tests various inputs that remotes.platform_information handles + + you can test your OS string by comparing the results with the output from: + python -c "import platform; print platform.linux_distribution()" + """ + + def setup(self): + pass + + def test_handles_deb_version_num(self): + def fake_distro(): return ('debian', '8.4', '') + distro, release, codename = platform_information(fake_distro) + assert distro == 'debian' + assert release == '8.4' + assert codename == 'jessie' + + def test_handles_deb_version_slash(self): + def fake_distro(): return ('debian', 'wheezy/something', '') + distro, release, codename = platform_information(fake_distro) + assert distro == 'debian' + assert release == 'wheezy/something' + assert codename == 'wheezy' + + def test_handles_deb_version_slash_sid(self): + def fake_distro(): return ('debian', 'jessie/sid', '') + distro, release, codename = platform_information(fake_distro) + assert distro == 'debian' + assert release == 'jessie/sid' + assert codename == 'sid' + + def test_handles_no_codename(self): + def fake_distro(): return ('SlaOS', '99.999', '') + distro, release, codename = platform_information(fake_distro) + assert distro == 'SlaOS' + assert release == '99.999' + assert codename == '' + + # Normal distro strings + def test_hanles_centos_64(self): + def fake_distro(): return ('CentOS', '6.4', 'Final') + distro, release, codename = platform_information(fake_distro) + assert distro == 'CentOS' + assert release == '6.4' + assert codename == 'Final' + + + def test_handles_ubuntu_percise(self): + def fake_distro(): return ('Ubuntu', '12.04', 'precise') + distro, release, codename = platform_information(fake_distro) + assert distro == 'Ubuntu' + assert release == '12.04' + assert codename == 'precise' + +class TestParseOsRelease(object): + """ test various forms of /etc/os-release """ + + def setup(self): + pass + + def test_handles_centos_7(self, tmpdir): + path = str(tmpdir.join('os_release')) + with open(path, 'w') as os_release: + os_release.write(""" +NAME="CentOS Linux" +VERSION="7 (Core)" +ID="centos" +ID_LIKE="rhel fedora" +VERSION_ID="7" +PRETTY_NAME="CentOS Linux 7 (Core)" +ANSI_COLOR="0;31" +CPE_NAME="cpe:/o:centos:centos:7" +HOME_URL="https://www.centos.org/" +BUG_REPORT_URL="https://bugs.centos.org/" + +CENTOS_MANTISBT_PROJECT="CentOS-7" +CENTOS_MANTISBT_PROJECT_VERSION="7" +REDHAT_SUPPORT_PRODUCT="centos" +REDHAT_SUPPORT_PRODUCT_VERSION="7" +""") + distro, release, codename = parse_os_release(path) + assert distro == 'centos' + assert release == '7' + assert codename == 'core' + + + def test_handles_debian_stretch(self, tmpdir): + path = str(tmpdir.join('os_release')) + with open(path, 'w') as os_release: + os_release.write(""" +PRETTY_NAME="Debian GNU/Linux 9 (stretch)" +NAME="Debian GNU/Linux" +VERSION_ID="9" +VERSION="9 (stretch)" +ID=debian +HOME_URL="https://www.debian.org/" +SUPPORT_URL="https://www.debian.org/support" +BUG_REPORT_URL="https://bugs.debian.org/" +""") + distro, release, codename = parse_os_release(path) + assert distro == 'debian' + assert release == '9' + assert codename == 'stretch' + + def test_handles_fedora_26(self, tmpdir): + path = str(tmpdir.join('os_release')) + with open(path, 'w') as os_release: + os_release.write(""" +NAME=Fedora +VERSION="26 (Twenty Six)" +ID=fedora +VERSION_ID=26 +PRETTY_NAME="Fedora 26 (Twenty Six)" +ANSI_COLOR="0;34" +CPE_NAME="cpe:/o:fedoraproject:fedora:26" +HOME_URL="https://fedoraproject.org/" +BUG_REPORT_URL="https://bugzilla.redhat.com/" +REDHAT_BUGZILLA_PRODUCT="Fedora" +REDHAT_BUGZILLA_PRODUCT_VERSION=26 +REDHAT_SUPPORT_PRODUCT="Fedora" +REDHAT_SUPPORT_PRODUCT_VERSION=26 +PRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy +""") + distro, release, codename = parse_os_release(path) + assert distro == 'fedora' + assert release == '26' + assert codename == 'twenty six' + + def test_handles_opensuse_leap_42_2(self, tmpdir): + path = str(tmpdir.join('os_release')) + with open(path, 'w') as os_release: + os_release.write(""" +NAME="openSUSE Leap" +VERSION="42.2" +ID=opensuse +ID_LIKE="suse" +VERSION_ID="42.2" +PRETTY_NAME="openSUSE Leap 42.2" +ANSI_COLOR="0;32" +CPE_NAME="cpe:/o:opensuse:leap:42.2" +BUG_REPORT_URL="https://bugs.opensuse.org" +HOME_URL="https://www.opensuse.org/" +""") + distro, release, codename = parse_os_release(path) + assert distro == 'opensuse' + assert release == '42.2' + assert codename == '42.2' + + def test_handles_opensuse_tumbleweed(self, tmpdir): + path = str(tmpdir.join('os_release')) + with open(path, 'w') as os_release: + os_release.write(""" +NAME="openSUSE Tumbleweed" +# VERSION="20170502" +ID=opensuse +ID_LIKE="suse" +VERSION_ID="20170502" +PRETTY_NAME="openSUSE Tumbleweed" +ANSI_COLOR="0;32" +CPE_NAME="cpe:/o:opensuse:tumbleweed:20170502" +BUG_REPORT_URL="https://bugs.opensuse.org" +HOME_URL="https://www.opensuse.org/" +""") + distro, release, codename = parse_os_release(path) + assert distro == 'opensuse' + assert release == '20170502' + assert codename == 'tumbleweed' + + def test_handles_sles_12_sp3(self, tmpdir): + path = str(tmpdir.join('os_release')) + with open(path, 'w') as os_release: + os_release.write(""" +NAME="SLES" +VERSION="12-SP3" +VERSION_ID="12.3" +PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3" +ID="sles" +ANSI_COLOR="0;32" +CPE_NAME="cpe:/o:suse:sles:12:sp3" +""") + distro, release, codename = parse_os_release(path) + assert distro == 'sles' + assert release == '12.3' + assert codename == '12-SP3' + + def test_handles_ubuntu_xenial(self, tmpdir): + path = str(tmpdir.join('os_release')) + with open(path, 'w') as os_release: + os_release.write(""" +NAME="Ubuntu" +VERSION="16.04 LTS (Xenial Xerus)" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 16.04 LTS" +VERSION_ID="16.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/" +UBUNTU_CODENAME=xenial +""") + distro, release, codename = parse_os_release(path) + assert distro == 'ubuntu' + assert release == '16.04' + assert codename == 'xenial' + + def test_handles_alt_8_2(self, tmpdir): + path = str(tmpdir.join('os_release')) + with open(path, 'w') as os_release: + os_release.write(""" +NAME="ALT" +VERSION="8.2 " +ID=altlinux +VERSION_ID=8.2 +PRETTY_NAME="ALT Workstation K 8.2 (Centaurea Ruthenica)" +ANSI_COLOR="1;33" +CPE_NAME="cpe:/o:alt:kworkstation:8.2" +HOME_URL="http://www.basealt.ru" +BUG_REPORT_URL="https://bugs.altlinux.org/" +""") + distro, release, codename = parse_os_release(path) + assert distro == 'altlinux' + assert release == '8.2' + assert codename == '8.2' diff --git a/ceph_deploy/tests/unit/hosts/test_altlinux.py b/ceph_deploy/tests/unit/hosts/test_altlinux.py new file mode 100644 index 0000000..cc65fde --- /dev/null +++ b/ceph_deploy/tests/unit/hosts/test_altlinux.py @@ -0,0 +1,10 @@ +from ceph_deploy.hosts.alt.install import map_components, NON_SPLIT_PACKAGES + + +class TestALTMapComponents(object): + def test_valid(self): + pkgs = map_components(NON_SPLIT_PACKAGES, ['ceph-osd', 'ceph-common', 'ceph-radosgw']) + assert 'ceph' in pkgs + assert 'ceph-common' in pkgs + assert 'ceph-radosgw' in pkgs + assert 'ceph-osd' not in pkgs diff --git a/ceph_deploy/tests/unit/hosts/test_centos.py b/ceph_deploy/tests/unit/hosts/test_centos.py new file mode 100644 index 0000000..187582c --- /dev/null +++ b/ceph_deploy/tests/unit/hosts/test_centos.py @@ -0,0 +1,64 @@ +from ceph_deploy.hosts import centos +from ceph_deploy import hosts +from mock import Mock, patch + + +def pytest_generate_tests(metafunc): + # called once per each test function + try: + funcarglist = metafunc.cls.params[metafunc.function.__name__] + except AttributeError: + return + argnames = list(funcarglist[0]) + metafunc.parametrize(argnames, [[funcargs[name] for name in argnames] + for funcargs in funcarglist]) + + +class TestCentosRepositoryUrlPart(object): + + params= { + 'test_repository_url_part': [ + dict(distro="CentOS Linux", release='4.3', codename="Foo", output='el6'), + dict(distro="CentOS Linux", release='6.5', codename="Final", output='el6'), + dict(distro="CentOS Linux", release='7.0', codename="Core", output='el7'), + dict(distro="CentOS Linux", release='7.0.1406', codename="Core", output='el7'), + dict(distro="CentOS Linux", release='10.4.000', codename="Core", output='el10'), + dict(distro="RedHat", release='4.3', codename="Foo", output='el6'), + dict(distro="Red Hat Enterprise Linux Server", release='5.8', codename="Tikanga", output="el6"), + dict(distro="Red Hat Enterprise Linux Server", release='6.5', codename="Santiago", output='rhel6'), + dict(distro="RedHat", release='7.0.1406', codename="Core", output='rhel7'), + dict(distro="RedHat", release='10.999.12', codename="Core", output='rhel10'), + ], + 'test_rpm_dist': [ + dict(distro="CentOS Linux", release='4.3', codename="Foo", output='el6'), + dict(distro="CentOS Linux", release='6.5', codename="Final", output='el6'), + dict(distro="CentOS Linux", release='7.0', codename="Core", output='el7'), + dict(distro="CentOS Linux", release='7.0.1406', codename="Core", output='el7'), + dict(distro="CentOS Linux", release='10.10.9191', codename="Core", output='el10'), + dict(distro="RedHat", release='4.3', codename="Foo", output='el6'), + dict(distro="Red Hat Enterprise Linux Server", release='5.8', codename="Tikanga", output="el6"), + dict(distro="Red Hat Enterprise Linux Server", release='6.5', codename="Santiago", output='el6'), + dict(distro="RedHat", release='7.0', codename="Core", output='el7'), + dict(distro="RedHat", release='7.0.1406', codename="Core", output='el7'), + dict(distro="RedHat", release='10.9.8765', codename="Core", output='el10'), + ] + } + + def make_fake_connection(self, platform_information=None): + get_connection = Mock() + get_connection.return_value = get_connection + get_connection.remote_module.platform_information = Mock( + return_value=platform_information) + return get_connection + + def test_repository_url_part(self, distro, release, codename, output): + fake_get_connection = self.make_fake_connection((distro, release, codename)) + with patch('ceph_deploy.hosts.get_connection', fake_get_connection): + self.module = hosts.get('testhost') + assert centos.repository_url_part(self.module) == output + + def test_rpm_dist(self, distro, release, codename, output): + fake_get_connection = self.make_fake_connection((distro, release, codename)) + with patch('ceph_deploy.hosts.get_connection', fake_get_connection): + self.module = hosts.get('testhost') + assert centos.rpm_dist(self.module) == output diff --git a/ceph_deploy/tests/unit/hosts/test_common.py b/ceph_deploy/tests/unit/hosts/test_common.py new file mode 100644 index 0000000..278d09e --- /dev/null +++ b/ceph_deploy/tests/unit/hosts/test_common.py @@ -0,0 +1,24 @@ +from ceph_deploy.hosts.common import map_components + + +class TestMapComponents(object): + + def test_map_components_all_split(self): + components = ['ceph-mon', 'ceph-osd'] + packages = map_components([], components) + assert set(packages) == set(components) + + def test_map_components_mds_not_split(self): + components = ['ceph-mon', 'ceph-osd', 'ceph-mds'] + packages = map_components(['ceph-mds'], components) + assert set(packages) == set(['ceph-mon', 'ceph-osd', 'ceph']) + + def test_map_components_no_duplicates(self): + components = ['ceph-mon', 'ceph-osd', 'ceph-mds'] + packages = map_components(['ceph-mds', 'ceph-osd'], components) + assert set(packages) == set(['ceph-mon', 'ceph']) + assert len(packages) == len(set(['ceph-mon', 'ceph'])) + + def test_map_components_no_components(self): + packages = map_components(['ceph-mon'], []) + assert len(packages) == 0 diff --git a/ceph_deploy/tests/unit/hosts/test_hosts.py b/ceph_deploy/tests/unit/hosts/test_hosts.py new file mode 100644 index 0000000..dbc448c --- /dev/null +++ b/ceph_deploy/tests/unit/hosts/test_hosts.py @@ -0,0 +1,433 @@ +from pytest import raises +from mock import Mock, patch + +from ceph_deploy import exc +from ceph_deploy import hosts + + +class TestNormalized(object): + + def test_get_debian(self): + result = hosts._normalized_distro_name('Debian') + assert result == 'debian' + + def test_get_centos(self): + result = hosts._normalized_distro_name('CentOS Linux') + assert result == 'centos' + + def test_get_ubuntu(self): + result = hosts._normalized_distro_name('Ubuntu') + assert result == 'ubuntu' + + def test_get_mint(self): + result = hosts._normalized_distro_name('LinuxMint') + assert result == 'ubuntu' + + def test_get_suse(self): + result = hosts._normalized_distro_name('SUSE LINUX') + assert result == 'suse' + + def test_get_redhat(self): + result = hosts._normalized_distro_name('RedHatEnterpriseLinux') + assert result == 'redhat' + + def test_get_virtuozzo(self): + result = hosts._normalized_distro_name('Virtuozzo Linux') + assert result == 'virtuozzo' + + def test_get_arch(self): + result = hosts._normalized_distro_name('Arch Linux') + assert result == 'arch' + + +class TestNormalizeRelease(object): + + def test_int_single_version(self): + result = hosts._normalized_release('1') + assert result.int_major == 1 + assert result.int_minor == 0 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_single_version_with_trailing_space(self): + result = hosts._normalized_release(' 1') + assert result.int_major == 1 + assert result.int_minor == 0 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_single_version_with_prepended_zero(self): + result = hosts._normalized_release('01') + assert result.int_major == 1 + assert result.int_minor == 0 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_minor_version(self): + result = hosts._normalized_release('1.8') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_minor_version_with_trailing_space(self): + result = hosts._normalized_release(' 1.8') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_minor_version_with_prepended_zero(self): + result = hosts._normalized_release('01.08') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_patch_version(self): + result = hosts._normalized_release('1.8.1234') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 0 + + def test_int_patch_version_with_trailing_space(self): + result = hosts._normalized_release(' 1.8.1234') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 0 + + def test_int_patch_version_with_prepended_zero(self): + result = hosts._normalized_release('01.08.01234') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 0 + + def test_int_garbage_version(self): + result = hosts._normalized_release('1.8.1234.1') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 1 + + def test_int_garbage_version_with_trailing_space(self): + result = hosts._normalized_release(' 1.8.1234.1') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 1 + + def test_int_garbage_version_with_prepended_zero(self): + result = hosts._normalized_release('01.08.01234.1') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 1 + + def test_int_single_version_rc(self): + result = hosts._normalized_release('1rc-123') + assert result.int_major == 1 + assert result.int_minor == 0 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_single_version_with_trailing_space_rc(self): + result = hosts._normalized_release(' 1rc-123') + assert result.int_major == 1 + assert result.int_minor == 0 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_single_version_with_prepended_zero_rc(self): + result = hosts._normalized_release('01rc-123') + assert result.int_major == 1 + assert result.int_minor == 0 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_minor_version_rc(self): + result = hosts._normalized_release('1.8rc-123') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_minor_version_with_trailing_space_rc(self): + result = hosts._normalized_release(' 1.8rc-123') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_minor_version_with_prepended_zero_rc(self): + result = hosts._normalized_release('01.08rc-123') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 0 + assert result.int_garbage == 0 + + def test_int_patch_version_rc(self): + result = hosts._normalized_release('1.8.1234rc-123') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 0 + + def test_int_patch_version_with_trailing_space_rc(self): + result = hosts._normalized_release(' 1.8.1234rc-123') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 0 + + def test_int_patch_version_with_prepended_zero_rc(self): + result = hosts._normalized_release('01.08.01234rc-123') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 0 + + def test_int_garbage_version_rc(self): + result = hosts._normalized_release('1.8.1234.1rc-123') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 1 + + def test_int_garbage_version_with_trailing_space_rc(self): + result = hosts._normalized_release(' 1.8.1234.1rc-123') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 1 + + def test_int_garbage_version_with_prepended_zero_rc(self): + result = hosts._normalized_release('01.08.01234.1rc-1') + assert result.int_major == 1 + assert result.int_minor == 8 + assert result.int_patch == 1234 + assert result.int_garbage == 1 + + # with non ints + + def test_single_version(self): + result = hosts._normalized_release('1') + assert result.major == "1" + assert result.minor == "0" + assert result.patch == "0" + assert result.garbage == "0" + + def test_single_version_with_trailing_space(self): + result = hosts._normalized_release(' 1') + assert result.major == "1" + assert result.minor == "0" + assert result.patch == "0" + assert result.garbage == "0" + + def test_single_version_with_prepended_zero(self): + result = hosts._normalized_release('01') + assert result.major == "01" + assert result.minor == "0" + assert result.patch == "0" + assert result.garbage == "0" + + def test_minor_version(self): + result = hosts._normalized_release('1.8') + assert result.major == "1" + assert result.minor == "8" + assert result.patch == "0" + assert result.garbage == "0" + + def test_minor_version_with_trailing_space(self): + result = hosts._normalized_release(' 1.8') + assert result.major == "1" + assert result.minor == "8" + assert result.patch == "0" + assert result.garbage == "0" + + def test_minor_version_with_prepended_zero(self): + result = hosts._normalized_release('01.08') + assert result.major == "01" + assert result.minor == "08" + assert result.patch == "0" + assert result.garbage == "0" + + def test_patch_version(self): + result = hosts._normalized_release('1.8.1234') + assert result.major == "1" + assert result.minor == "8" + assert result.patch == "1234" + assert result.garbage == "0" + + def test_patch_version_with_trailing_space(self): + result = hosts._normalized_release(' 1.8.1234') + assert result.major == "1" + assert result.minor == "8" + assert result.patch == "1234" + assert result.garbage == "0" + + def test_patch_version_with_prepended_zero(self): + result = hosts._normalized_release('01.08.01234') + assert result.major == "01" + assert result.minor == "08" + assert result.patch == "01234" + assert result.garbage == "0" + + def test_garbage_version(self): + result = hosts._normalized_release('1.8.1234.1') + assert result.major == "1" + assert result.minor == "8" + assert result.patch == "1234" + assert result.garbage == "1" + + def test_garbage_version_with_trailing_space(self): + result = hosts._normalized_release(' 1.8.1234.1') + assert result.major == "1" + assert result.minor == "8" + assert result.patch == "1234" + assert result.garbage == "1" + + def test_garbage_version_with_prepended_zero(self): + result = hosts._normalized_release('01.08.01234.1') + assert result.major == "01" + assert result.minor == "08" + assert result.patch == "01234" + assert result.garbage == "1" + + def test_patch_version_rc(self): + result = hosts._normalized_release('1.8.1234rc-123') + assert result.major == "1" + assert result.minor == "8" + assert result.patch == "1234rc-123" + assert result.garbage == "0" + + def test_patch_version_with_trailing_space_rc(self): + result = hosts._normalized_release(' 1.8.1234rc-123') + assert result.major == "1" + assert result.minor == "8" + assert result.patch == "1234rc-123" + assert result.garbage == "0" + + def test_patch_version_with_prepended_zero_rc(self): + result = hosts._normalized_release('01.08.01234.1rc-123') + assert result.major == "01" + assert result.minor == "08" + assert result.patch == "01234" + assert result.garbage == "1rc-123" + + def test_garbage_version_rc(self): + result = hosts._normalized_release('1.8.1234.1rc-123') + assert result.major == "1" + assert result.minor == "8" + assert result.patch == "1234" + assert result.garbage == "1rc-123" + + def test_garbage_version_with_trailing_space_rc(self): + result = hosts._normalized_release(' 1.8.1234.1rc-123') + assert result.major == "1" + assert result.minor == "8" + assert result.patch == "1234" + assert result.garbage == "1rc-123" + + def test_garbage_version_with_prepended_zero_rc(self): + result = hosts._normalized_release('01.08.01234.1rc-1') + assert result.major == "01" + assert result.minor == "08" + assert result.patch == "01234" + assert result.garbage == "1rc-1" + + def test_garbage_version_with_no_numbers(self): + result = hosts._normalized_release('sid') + assert result.major == "sid" + assert result.minor == "0" + assert result.patch == "0" + assert result.garbage == "0" + + +class TestHostGet(object): + + def make_fake_connection(self, platform_information=None): + get_connection = Mock() + get_connection.return_value = get_connection + get_connection.remote_module.platform_information = Mock( + return_value=platform_information) + return get_connection + + def test_get_unsupported(self): + fake_get_connection = self.make_fake_connection(('Solaris Enterprise', '', '')) + with patch('ceph_deploy.hosts.get_connection', fake_get_connection): + with raises(exc.UnsupportedPlatform): + hosts.get('myhost') + + def test_get_unsupported_message(self): + fake_get_connection = self.make_fake_connection(('Solaris Enterprise', '', '')) + with patch('ceph_deploy.hosts.get_connection', fake_get_connection): + with raises(exc.UnsupportedPlatform) as error: + hosts.get('myhost') + + assert error.value.__str__() == 'Platform is not supported: Solaris Enterprise ' + + def test_get_unsupported_message_release(self): + fake_get_connection = self.make_fake_connection(('Solaris', 'Tijuana', '12')) + with patch('ceph_deploy.hosts.get_connection', fake_get_connection): + with raises(exc.UnsupportedPlatform) as error: + hosts.get('myhost') + + assert error.value.__str__() == 'Platform is not supported: Solaris 12 Tijuana' + + +class TestGetDistro(object): + + def test_get_debian(self): + result = hosts._get_distro('Debian') + assert result.__name__.endswith('debian') + + def test_get_ubuntu(self): + # Ubuntu imports debian stuff + result = hosts._get_distro('Ubuntu') + assert result.__name__.endswith('debian') + + def test_get_centos(self): + result = hosts._get_distro('CentOS') + assert result.__name__.endswith('centos') + + def test_get_scientific(self): + result = hosts._get_distro('Scientific') + assert result.__name__.endswith('centos') + + def test_get_oracle(self): + result = hosts._get_distro('Oracle Linux Server') + assert result.__name__.endswith('centos') + + def test_get_redhat(self): + result = hosts._get_distro('RedHat') + assert result.__name__.endswith('centos') + + def test_get_redhat_whitespace(self): + result = hosts._get_distro('Red Hat Enterprise Linux') + assert result.__name__.endswith('centos') + + def test_get_uknown(self): + assert hosts._get_distro('Solaris') is None + + def test_get_fallback(self): + result = hosts._get_distro('Solaris', 'Debian') + assert result.__name__.endswith('debian') + + def test_get_mint(self): + result = hosts._get_distro('LinuxMint') + assert result.__name__.endswith('debian') + + def test_get_virtuozzo(self): + result = hosts._get_distro('Virtuozzo Linux') + assert result.__name__.endswith('centos') + + def test_get_arch(self): + result = hosts._get_distro('Arch Linux') + assert result.__name__.endswith('arch') + + def test_get_altlinux(self): + result = hosts._get_distro('ALT Linux') + assert result.__name__.endswith('alt') diff --git a/ceph_deploy/tests/unit/hosts/test_remotes.py b/ceph_deploy/tests/unit/hosts/test_remotes.py new file mode 100644 index 0000000..69ee4f7 --- /dev/null +++ b/ceph_deploy/tests/unit/hosts/test_remotes.py @@ -0,0 +1,37 @@ +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO + +from ceph_deploy.hosts import remotes + + +class TestObjectGrep(object): + + def setup(self): + self.file_object = StringIO('foo\n') + self.file_object.seek(0) + + def test_finds_term(self): + assert remotes.object_grep('foo', self.file_object) + + def test_does_not_find_anything(self): + assert remotes.object_grep('bar', self.file_object) is False + + +class TestWhich(object): + + def test_executable_is_a_directory(self, monkeypatch): + monkeypatch.setattr(remotes.os.path, 'exists', lambda x: True) + monkeypatch.setattr(remotes.os.path, 'isfile', lambda x: False) + assert remotes.which('foo') is None + + def test_executable_does_not_exist(self, monkeypatch): + monkeypatch.setattr(remotes.os.path, 'exists', lambda x: False) + monkeypatch.setattr(remotes.os.path, 'isfile', lambda x: True) + assert remotes.which('foo') is None + + def test_executable_exists_as_file(self, monkeypatch): + monkeypatch.setattr(remotes.os.path, 'exists', lambda x: True) + monkeypatch.setattr(remotes.os.path, 'isfile', lambda x: True) + assert remotes.which('foo') == '/usr/local/bin/foo' diff --git a/ceph_deploy/tests/unit/hosts/test_suse.py b/ceph_deploy/tests/unit/hosts/test_suse.py new file mode 100644 index 0000000..d3b3415 --- /dev/null +++ b/ceph_deploy/tests/unit/hosts/test_suse.py @@ -0,0 +1,34 @@ +from ceph_deploy.hosts import suse +from ceph_deploy.hosts.suse.install import map_components, NON_SPLIT_PACKAGES + +class TestSuseInit(object): + def setup(self): + self.host = suse + + def test_choose_init_default(self): + self.host.release = None + init_type = self.host.choose_init(self.host) + assert init_type == "systemd" + + def test_choose_init_SLE_11(self): + self.host.release = '11' + init_type = self.host.choose_init(self.host) + assert init_type == "sysvinit" + + def test_choose_init_SLE_12(self): + self.host.release = '12' + init_type = self.host.choose_init(self.host) + assert init_type == "systemd" + + def test_choose_init_openSUSE_13_1(self): + self.host.release = '13.1' + init_type = self.host.choose_init(self.host) + assert init_type == "systemd" + +class TestSuseMapComponents(object): + def test_valid(self): + pkgs = map_components(NON_SPLIT_PACKAGES, ['ceph-osd', 'ceph-common', 'ceph-radosgw']) + assert 'ceph' in pkgs + assert 'ceph-common' in pkgs + assert 'ceph-radosgw' in pkgs + assert 'ceph-osd' not in pkgs diff --git a/ceph_deploy/tests/unit/hosts/test_util.py b/ceph_deploy/tests/unit/hosts/test_util.py new file mode 100644 index 0000000..c4a5947 --- /dev/null +++ b/ceph_deploy/tests/unit/hosts/test_util.py @@ -0,0 +1,29 @@ +from ceph_deploy.hosts import util +from mock import Mock + + +class TestInstallYumPriorities(object): + + def setup(self): + self.distro = Mock() + self.patch_path = 'ceph_deploy.hosts.centos.install.pkg_managers.yum' + self.yum = Mock() + + def test_centos_six(self): + self.distro.release = ('6', '0') + self.distro.normalized_name = 'centos' + util.install_yum_priorities(self.distro, _yum=self.yum) + assert self.yum.call_args[0][1] == 'yum-plugin-priorities' + + def test_centos_five(self): + self.distro.release = ('5', '0') + self.distro.normalized_name = 'centos' + util.install_yum_priorities(self.distro, _yum=self.yum) + assert self.yum.call_args[0][1] == 'yum-priorities' + + def test_fedora(self): + self.distro.release = ('20', '0') + self.distro.normalized_name = 'fedora' + util.install_yum_priorities(self.distro, _yum=self.yum) + assert self.yum.call_args[0][1] == 'yum-plugin-priorities' + diff --git a/ceph_deploy/tests/unit/test_cli.py b/ceph_deploy/tests/unit/test_cli.py new file mode 100644 index 0000000..fff75fc --- /dev/null +++ b/ceph_deploy/tests/unit/test_cli.py @@ -0,0 +1,46 @@ +from ceph_deploy import cli +from ceph_deploy.tests import util + + +class FakeLogger(object): + + def __init__(self): + self._calls = [] + self._info = [] + + def _output(self): + return '\n'.join(self._calls) + + def _record(self, level, message): + self._calls.append(message) + method = getattr(self, '_%s' % level) + method.append(message) + + def info(self, message): + self._record('info', message) + + +class TestLogFlags(object): + + def setup(self): + self.logger = FakeLogger() + + def test_logs_multiple_object_attributes(self): + args = util.Empty(verbose=True, adjust_repos=False) + cli.log_flags(args, logger=self.logger) + result = self.logger._output() + assert ' verbose ' in result + assert ' adjust_repos ' in result + + def test_attributes_are_logged_with_values(self): + args = util.Empty(verbose=True) + cli.log_flags(args, logger=self.logger) + result = self.logger._output() + assert ' verbose ' in result + assert ' : True' in result + + def test_private_attributes_are_not_logged(self): + args = util.Empty(verbose=True, _private='some value') + cli.log_flags(args, logger=self.logger) + result = self.logger._output() + assert ' _private ' not in result diff --git a/ceph_deploy/tests/unit/test_conf.py b/ceph_deploy/tests/unit/test_conf.py new file mode 100644 index 0000000..c0a3521 --- /dev/null +++ b/ceph_deploy/tests/unit/test_conf.py @@ -0,0 +1,192 @@ +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO +from textwrap import dedent +import pytest +from mock import Mock, patch, mock_open +from ceph_deploy import conf + + +class TestLocateOrCreate(object): + + def setup(self): + self.fake_file = mock_open() + + def test_no_conf(self): + fake_path = Mock() + fake_path.exists = Mock(return_value=False) + with patch('ceph_deploy.conf.cephdeploy.open', self.fake_file, create=True): + with patch('ceph_deploy.conf.cephdeploy.path', fake_path): + conf.cephdeploy.location() + + assert self.fake_file.called is True + assert self.fake_file.call_args[0][0].endswith('/.cephdeploy.conf') + + def test_cwd_conf_exists(self): + fake_path = Mock() + fake_path.join = Mock(return_value='/srv/cephdeploy.conf') + fake_path.exists = Mock(return_value=True) + with patch('ceph_deploy.conf.cephdeploy.path', fake_path): + result = conf.cephdeploy.location() + + assert result == '/srv/cephdeploy.conf' + + def test_home_conf_exists(self): + fake_path = Mock() + fake_path.expanduser = Mock(return_value='/home/alfredo/.cephdeploy.conf') + fake_path.exists = Mock(side_effect=[False, True]) + with patch('ceph_deploy.conf.cephdeploy.path', fake_path): + result = conf.cephdeploy.location() + + assert result == '/home/alfredo/.cephdeploy.conf' + + +class TestConf(object): + + def test_has_repos(self): + cfg = conf.cephdeploy.Conf() + cfg.sections = lambda: ['foo'] + assert cfg.has_repos is True + + def test_has_no_repos(self): + cfg = conf.cephdeploy.Conf() + cfg.sections = lambda: ['ceph-deploy-install'] + assert cfg.has_repos is False + + def test_get_repos_is_empty(self): + cfg = conf.cephdeploy.Conf() + cfg.sections = lambda: ['ceph-deploy-install'] + assert cfg.get_repos() == [] + + def test_get_repos_is_not_empty(self): + cfg = conf.cephdeploy.Conf() + cfg.sections = lambda: ['ceph-deploy-install', 'foo'] + assert cfg.get_repos() == ['foo'] + + def test_get_safe_not_empty(self): + cfg = conf.cephdeploy.Conf() + cfg.get = lambda section, key: True + assert cfg.get_safe(1, 2) is True + + def test_get_safe_empty(self): + cfg = conf.cephdeploy.Conf() + assert cfg.get_safe(1, 2) is None + + +class TestConfGetList(object): + + def test_get_list_empty(self): + cfg = conf.cephdeploy.Conf() + conf_file = StringIO(dedent(""" + [foo] + key = + """)) + cfg.readfp(conf_file) + assert cfg.get_list('foo', 'key') == [''] + + def test_get_list_empty_when_no_key(self): + cfg = conf.cephdeploy.Conf() + conf_file = StringIO(dedent(""" + [foo] + """)) + cfg.readfp(conf_file) + assert cfg.get_list('foo', 'key') == [] + + def test_get_list_if_value_is_one_item(self): + cfg = conf.cephdeploy.Conf() + conf_file = StringIO(dedent(""" + [foo] + key = 1 + """)) + cfg.readfp(conf_file) + assert cfg.get_list('foo', 'key') == ['1'] + + def test_get_list_with_mutltiple_items(self): + cfg = conf.cephdeploy.Conf() + conf_file = StringIO(dedent(""" + [foo] + key = 1, 3, 4 + """)) + cfg.readfp(conf_file) + assert cfg.get_list('foo', 'key') == ['1', '3', '4'] + + def test_get_rid_of_comments(self): + cfg = conf.cephdeploy.Conf() + conf_file = StringIO(dedent(""" + [foo] + key = 1, 3, 4 # this is a wonderful comment y'all + """)) + cfg.readfp(conf_file) + assert cfg.get_list('foo', 'key') == ['1', '3', '4'] + + def test_get_rid_of_whitespace(self): + cfg = conf.cephdeploy.Conf() + conf_file = StringIO(dedent(""" + [foo] + key = 1, 3 , 4 + """)) + cfg.readfp(conf_file) + assert cfg.get_list('foo', 'key') == ['1', '3', '4'] + + def test_get_default_repo(self): + cfg = conf.cephdeploy.Conf() + conf_file = StringIO(dedent(""" + [foo] + default = True + """)) + cfg.readfp(conf_file) + assert cfg.get_default_repo() == 'foo' + + def test_get_default_repo_fails_non_truthy(self): + cfg = conf.cephdeploy.Conf() + conf_file = StringIO(dedent(""" + [foo] + default = 0 + """)) + cfg.readfp(conf_file) + assert cfg.get_default_repo() is False + + +truthy_values = ['yes', 'true', 'on'] +falsy_values = ['no', 'false', 'off'] + + +class TestSetOverrides(object): + + def setup(self): + self.args = Mock() + self.args.func.__name__ = 'foo' + self.conf = Mock() + + def test_override_global(self): + self.conf.sections = Mock(return_value=['ceph-deploy-global']) + self.conf.items = Mock(return_value=(('foo', 1),)) + arg_obj = conf.cephdeploy.set_overrides(self.args, self.conf) + assert arg_obj.foo == 1 + + def test_override_foo_section(self): + self.conf.sections = Mock( + return_value=['ceph-deploy-global', 'ceph-deploy-foo'] + ) + self.conf.items = Mock(return_value=(('bar', 1),)) + arg_obj = conf.cephdeploy.set_overrides(self.args, self.conf) + assert arg_obj.bar == 1 + + @pytest.mark.parametrize('value', truthy_values) + def test_override_truthy_values(self, value): + self.conf.sections = Mock( + return_value=['ceph-deploy-global', 'ceph-deploy-install'] + ) + self.conf.items = Mock(return_value=(('bar', value),)) + arg_obj = conf.cephdeploy.set_overrides(self.args, self.conf) + assert arg_obj.bar is True + + @pytest.mark.parametrize('value', falsy_values) + def test_override_falsy_values(self, value): + self.conf.sections = Mock( + return_value=['ceph-deploy-global', 'ceph-deploy-install'] + ) + self.conf.items = Mock(return_value=(('bar', value),)) + arg_obj = conf.cephdeploy.set_overrides(self.args, self.conf) + assert arg_obj.bar is False diff --git a/ceph_deploy/tests/unit/test_exc.py b/ceph_deploy/tests/unit/test_exc.py new file mode 100644 index 0000000..cd38686 --- /dev/null +++ b/ceph_deploy/tests/unit/test_exc.py @@ -0,0 +1,16 @@ +from pytest import raises +from ceph_deploy import exc + + +class TestExecutableNotFound(object): + + def test_executable_is_used(self): + with raises(exc.DeployError) as error: + raise exc.ExecutableNotFound('vim', 'node1') + assert "'vim'" in str(error) + + def test_host_is_used(self): + with raises(exc.DeployError) as error: + raise exc.ExecutableNotFound('vim', 'node1') + assert "node1" in str(error) + diff --git a/ceph_deploy/tests/unit/test_mon.py b/ceph_deploy/tests/unit/test_mon.py new file mode 100644 index 0000000..012a6b6 --- /dev/null +++ b/ceph_deploy/tests/unit/test_mon.py @@ -0,0 +1,224 @@ +import sys +import py.test +from mock import Mock, patch +# the below import of mock again is to workaround a py.test issue: +# https://github.com/pytest-dev/pytest/issues/1035 +import mock +from ceph_deploy import mon +from ceph_deploy.hosts.common import mon_create +from ceph_deploy.misc import mon_hosts, remote_shortname + + +def path_exists(target_paths=None): + """ + A quick helper that enforces a check for the existence of a path. Since we + are dealing with fakes, we allow to pass in a list of paths that are OK to + return True, otherwise return False. + """ + target_paths = target_paths or [] + + def exists(path): + return path in target_paths + return exists + + +@py.test.mark.skipif(reason='failing due to removal of pushy') +class TestCreateMon(object): + + def setup(self): + # this setup is way more verbose than normal + # but we are forced to because this function needs a lot + # passed in for remote execution. No other way around it. + self.socket = Mock() + self.socket.gethostname.return_value = 'hostname' + self.fake_file = mock.mock_open() + self.distro = Mock() + self.sprocess = Mock() + self.paths = Mock() + self.paths.mon.path = Mock(return_value='/cluster-hostname') + self.logger = Mock() + self.logger.info = self.logger.debug = lambda x: sys.stdout.write(str(x) + "\n") + + def test_create_mon_tmp_path_if_nonexistent(self): + self.distro.sudo_conn.modules.os.path.exists = Mock( + side_effect=path_exists(['/cluster-hostname'])) + self.paths.mon.constants.tmp_path = '/var/lib/ceph/tmp' + args = Mock(return_value=['cluster', '1234', 'initd']) + args.cluster = 'cluster' + with patch('ceph_deploy.hosts.common.conf.load'): + mon_create(self.distro, args, Mock(), 'hostname') + + result = self.distro.conn.remote_module.create_mon_path.call_args_list[-1] + assert result ==mock.call('/var/lib/ceph/mon/cluster-hostname') + + def test_write_keyring(self): + self.distro.sudo_conn.modules.os.path.exists = Mock( + side_effect=path_exists(['/'])) + args = Mock(return_value=['cluster', '1234', 'initd']) + args.cluster = 'cluster' + with patch('ceph_deploy.hosts.common.conf.load'): + with patch('ceph_deploy.hosts.common.remote') as fake_remote: + mon_create(self.distro, self.logger, args, Mock(), 'hostname') + + # the second argument to `remote()` should be the write func + result = fake_remote.call_args_list[1][0][-1].__name__ + assert result == 'write_monitor_keyring' + + def test_write_done_path(self): + self.distro.sudo_conn.modules.os.path.exists = Mock( + side_effect=path_exists(['/'])) + args = Mock(return_value=['cluster', '1234', 'initd']) + args.cluster = 'cluster' + + with patch('ceph_deploy.hosts.common.conf.load'): + with patch('ceph_deploy.hosts.common.remote') as fake_remote: + mon_create(self.distro, self.logger, args, Mock(), 'hostname') + + # the second to last argument to `remote()` should be the done path + # write + result = fake_remote.call_args_list[-2][0][-1].__name__ + assert result == 'create_done_path' + + def test_write_init_path(self): + self.distro.sudo_conn.modules.os.path.exists = Mock( + side_effect=path_exists(['/'])) + args = Mock(return_value=['cluster', '1234', 'initd']) + args.cluster = 'cluster' + + with patch('ceph_deploy.hosts.common.conf.load'): + with patch('ceph_deploy.hosts.common.remote') as fake_remote: + mon_create(self.distro, self.logger, args, Mock(), 'hostname') + + result = fake_remote.call_args_list[-1][0][-1].__name__ + assert result == 'create_init_path' + + def test_mon_hosts(self): + hosts = Mock() + for (name, host) in mon_hosts(('name1', 'name2.localdomain', + 'name3:1.2.3.6', 'name4:localhost.localdomain')): + hosts.get(name, host) + + expected = [mock.call.get('name1', 'name1'), + mock.call.get('name2', 'name2.localdomain'), + mock.call.get('name3', '1.2.3.6'), + mock.call.get('name4', 'localhost.localdomain')] + result = hosts.mock_calls + assert result == expected + + def test_remote_shortname_fqdn(self): + socket = Mock() + socket.gethostname.return_value = 'host.f.q.d.n' + assert remote_shortname(socket) == 'host' + + def test_remote_shortname_host(self): + socket = Mock() + socket.gethostname.return_value = 'host' + assert remote_shortname(socket) == 'host' + + +@py.test.mark.skipif(reason='failing due to removal of pushy') +class TestIsRunning(object): + + def setup(self): + self.fake_popen = Mock() + self.fake_popen.return_value = self.fake_popen + + def test_is_running_centos(self): + centos_out = ['', "mon.mire094: running {'version': '0.6.15'}"] + self.fake_popen.communicate = Mock(return_value=centos_out) + with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen): + result = mon.is_running(['ceph', 'status']) + assert result is True + + def test_is_not_running_centos(self): + centos_out = ['', "mon.mire094: not running {'version': '0.6.15'}"] + self.fake_popen.communicate = Mock(return_value=centos_out) + with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen): + result = mon.is_running(['ceph', 'status']) + assert result is False + + def test_is_dead_centos(self): + centos_out = ['', "mon.mire094: dead {'version': '0.6.15'}"] + self.fake_popen.communicate = Mock(return_value=centos_out) + with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen): + result = mon.is_running(['ceph', 'status']) + assert result is False + + def test_is_running_ubuntu(self): + ubuntu_out = ['', "ceph-mon (ceph/mira103) start/running, process 5866"] + self.fake_popen.communicate = Mock(return_value=ubuntu_out) + with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen): + result = mon.is_running(['ceph', 'status']) + assert result is True + + def test_is_not_running_ubuntu(self): + ubuntu_out = ['', "ceph-mon (ceph/mira103) start/dead, process 5866"] + self.fake_popen.communicate = Mock(return_value=ubuntu_out) + with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen): + result = mon.is_running(['ceph', 'status']) + assert result is False + + def test_is_dead_ubuntu(self): + ubuntu_out = ['', "ceph-mon (ceph/mira103) stop/not running, process 5866"] + self.fake_popen.communicate = Mock(return_value=ubuntu_out) + with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen): + result = mon.is_running(['ceph', 'status']) + assert result is False + + +class TestKeyringParser(object): + + def test_line_ends_with_newline_char(self, tmpdir): + keyring = tmpdir.join('foo.mon.keyring') + keyring.write('[section]\nasdfasdf\nkey = value') + result = mon.keyring_parser(keyring.strpath) + + assert result == ['section'] + + def test_line_does_not_end_with_newline_char(self, tmpdir): + keyring = tmpdir.join('foo.mon.keyring') + keyring.write('[section]asdfasdf\nkey = value') + result = mon.keyring_parser(keyring.strpath) + + assert result == [] + + +class TestConcatenateKeyrings(object): + + def setup(self): + self.args = Mock() + + def make_keyring(self, tmpdir, name, contents): + keyring = tmpdir.join(name) + keyring.write(contents) + return keyring + + def test_multiple_keyrings_work(self, tmpdir): + self.make_keyring(tmpdir, 'foo.keyring', '[mon.1]\nkey = value\n') + self.make_keyring(tmpdir, 'bar.keyring', '[mon.2]\nkey = value\n') + self.make_keyring(tmpdir, 'fez.keyring', '[mon.3]\nkey = value\n') + self.args.keyrings = tmpdir.strpath + result = mon.concatenate_keyrings(self.args).split('\n') + assert '[mon.2]' in result + assert 'key = value' in result + assert '[mon.3]' in result + assert 'key = value' in result + assert '[mon.1]' in result + assert 'key = value' in result + + def test_skips_duplicate_content(self, tmpdir): + self.make_keyring(tmpdir, 'foo.keyring', '[mon.1]\nkey = value\n') + self.make_keyring(tmpdir, 'bar.keyring', '[mon.2]\nkey = value\n') + self.make_keyring(tmpdir, 'fez.keyring', '[mon.3]\nkey = value\n') + self.make_keyring(tmpdir, 'dupe.keyring', '[mon.3]\nkey = value\n') + self.args.keyrings = tmpdir.strpath + result = mon.concatenate_keyrings(self.args).split('\n') + assert result.count('[mon.3]') == 1 + assert result.count('[mon.2]') == 1 + assert result.count('[mon.1]') == 1 + + def test_errors_when_no_keyrings(self, tmpdir): + self.args.keyrings = tmpdir.strpath + + with py.test.raises(RuntimeError): + mon.concatenate_keyrings(self.args) diff --git a/ceph_deploy/tests/unit/test_new.py b/ceph_deploy/tests/unit/test_new.py new file mode 100644 index 0000000..a32b2ea --- /dev/null +++ b/ceph_deploy/tests/unit/test_new.py @@ -0,0 +1,28 @@ +from ceph_deploy import new +from ceph_deploy.tests import util +import pytest + + +class TestValidateHostIp(object): + + def test_for_all_subnets_all_ips_match(self): + ips = util.generate_ips("10.0.0.1", "10.0.0.40") + ips.extend(util.generate_ips("10.0.1.1", "10.0.1.40")) + subnets = ["10.0.0.1/16", "10.0.1.1/16"] + assert new.validate_host_ip(ips, subnets) is None + + def test_all_subnets_have_one_matching_ip(self): + ips = util.generate_ips("10.0.0.1", "10.0.0.40") + ips.extend(util.generate_ips("10.0.1.1", "10.0.1.40")) + # regardless of extra IPs that may not match. The requirement + # is already satisfied + ips.extend(util.generate_ips("10.1.2.1", "10.1.2.40")) + subnets = ["10.0.0.1/16", "10.0.1.1/16"] + assert new.validate_host_ip(ips, subnets) is None + + def test_not_all_subnets_have_one_matching_ip(self): + ips = util.generate_ips("10.0.0.1", "10.0.0.40") + ips.extend(util.generate_ips("10.0.1.1", "10.0.1.40")) + subnets = ["10.0.0.1/16", "10.1.1.1/16"] + with pytest.raises(RuntimeError): + new.validate_host_ip(ips, subnets) diff --git a/ceph_deploy/tests/unit/util/test_arg_validators.py b/ceph_deploy/tests/unit/util/test_arg_validators.py new file mode 100644 index 0000000..8acb712 --- /dev/null +++ b/ceph_deploy/tests/unit/util/test_arg_validators.py @@ -0,0 +1,128 @@ +import socket +from mock import Mock +from argparse import ArgumentError +from pytest import raises + +from ceph_deploy.util import arg_validators + + +class TestRegexMatch(object): + + def test_match_raises(self): + validator = arg_validators.RegexMatch(r'\d+') + with raises(ArgumentError): + validator('1') + + def test_match_passes(self): + validator = arg_validators.RegexMatch(r'\d+') + assert validator('foo') == 'foo' + + def test_default_error_message(self): + validator = arg_validators.RegexMatch(r'\d+') + with raises(ArgumentError) as error: + validator('1') + message = error.value.message + assert message == 'must match pattern \d+' + + def test_custom_error_message(self): + validator = arg_validators.RegexMatch(r'\d+', 'wat') + with raises(ArgumentError) as error: + validator('1') + message = error.value.message + assert message == 'wat' + + +class TestHostName(object): + + def setup(self): + self.fake_sock = Mock() + self.fake_sock.gaierror = socket.gaierror + self.fake_sock.getaddrinfo.side_effect = socket.gaierror + + def test_hostname_is_not_resolvable(self): + hostname = arg_validators.Hostname(self.fake_sock) + with raises(ArgumentError) as error: + hostname('unresolvable') + message = error.value.message + assert 'is not resolvable' in message + + def test_hostname_with_name_is_not_resolvable(self): + hostname = arg_validators.Hostname(self.fake_sock) + with raises(ArgumentError) as error: + hostname('name:foo') + message = error.value.message + assert 'foo is not resolvable' in message + + def test_ip_is_allowed_when_paired_with_host(self): + self.fake_sock = Mock() + self.fake_sock.gaierror = socket.gaierror + + def side_effect(*args): + # First call passes, second call raises socket.gaierror + self.fake_sock.getaddrinfo.side_effect = socket.gaierror + + self.fake_sock.getaddrinfo.side_effect = side_effect + hostname = arg_validators.Hostname(self.fake_sock) + result = hostname('name:192.168.1.111') + assert result == 'name:192.168.1.111' + + def test_ipv6_is_allowed_when_paired_with_host(self): + self.fake_sock = Mock() + self.fake_sock.gaierror = socket.gaierror + + def side_effect(*args): + # First call passes, second call raises socket.gaierror + self.fake_sock.getaddrinfo.side_effect = socket.gaierror + + self.fake_sock.getaddrinfo.side_effect = side_effect + hostname = arg_validators.Hostname(self.fake_sock) + result = hostname('name:2001:0db8:85a3:0000:0000:8a2e:0370:7334') + assert result == 'name:2001:0db8:85a3:0000:0000:8a2e:0370:7334' + + def test_host_is_resolvable(self): + self.fake_sock = Mock() + self.fake_sock.gaierror = socket.gaierror + + def side_effect(*args): + # First call passes, second call raises socket.gaierror + self.fake_sock.getaddrinfo.side_effect = socket.gaierror + + self.fake_sock.getaddrinfo.side_effect = side_effect + hostname = arg_validators.Hostname(self.fake_sock) + result = hostname('name:example.com') + assert result == 'name:example.com' + + def test_hostname_must_be_an_ip(self): + self.fake_sock.getaddrinfo = Mock() + hostname = arg_validators.Hostname(self.fake_sock) + with raises(ArgumentError) as error: + hostname('0') + message = error.value.message + assert '0 must be a hostname' in message + + +class TestSubnet(object): + + def test_subnet_has_less_than_four_numbers(self): + validator = arg_validators.Subnet() + + with raises(ArgumentError) as error: + validator('3.3.3/12') + message = error.value.message + assert 'at least 4 numbers' in message + + def test_subnet_has_non_digits(self): + validator = arg_validators.Subnet() + + with raises(ArgumentError) as error: + validator('3.3.3.a/12') + message = error.value.message + assert 'have digits separated by dots' in message + + def test_subnet_missing_slash(self): + validator = arg_validators.Subnet() + + with raises(ArgumentError) as error: + validator('3.3.3.3') + message = error.value.message + assert 'must contain a slash' in message diff --git a/ceph_deploy/tests/unit/util/test_constants.py b/ceph_deploy/tests/unit/util/test_constants.py new file mode 100644 index 0000000..ce32a57 --- /dev/null +++ b/ceph_deploy/tests/unit/util/test_constants.py @@ -0,0 +1,16 @@ +from ceph_deploy.util import constants + + +class TestPaths(object): + + def test_mon_path(self): + assert constants.mon_path.startswith('/') + assert constants.mon_path.endswith('/mon') + + def test_mds_path(self): + assert constants.mds_path.startswith('/') + assert constants.mds_path.endswith('/mds') + + def test_tmp_path(self): + assert constants.tmp_path.startswith('/') + assert constants.tmp_path.endswith('/tmp') diff --git a/ceph_deploy/tests/unit/util/test_net.py b/ceph_deploy/tests/unit/util/test_net.py new file mode 100644 index 0000000..9c71bad --- /dev/null +++ b/ceph_deploy/tests/unit/util/test_net.py @@ -0,0 +1,53 @@ +try: + from urllib.error import HTTPError +except ImportError: + from urllib2 import HTTPError + +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + +from ceph_deploy.util import net +from ceph_deploy.tests import util +import pytest + + +# The following class adds about 1900 tests via py.test generation + +class TestIpInSubnet(object): + + @pytest.mark.parametrize('ip', util.generate_ips("10.0.0.1", "10.0.0.255")) + def test_correct_for_10_0_0_255(self, ip): + assert net.ip_in_subnet(ip, "10.0.0.0/16") + + @pytest.mark.parametrize('ip', util.generate_ips("10.0.0.1", "10.0.0.255")) + def test_false_for_10_0_0_255(self, ip): + assert net.ip_in_subnet(ip, "10.2.0.0/24") is False + + @pytest.mark.parametrize('ip', util.generate_ips("255.255.255.1", "255.255.255.255")) + def test_false_for_255_addresses(self, ip): + assert net.ip_in_subnet(ip, "10.9.1.0/16") is False + + @pytest.mark.parametrize('ip', util.generate_ips("172.7.1.1", "172.7.1.255")) + def test_false_for_172_addresses(self, ip): + assert net.ip_in_subnet(ip, "172.3.0.0/16") is False + + @pytest.mark.parametrize('ip', util.generate_ips("10.9.8.0", "10.9.8.255")) + def test_true_for_16_subnets(self, ip): + assert net.ip_in_subnet(ip, "10.9.1.0/16") is True + + @pytest.mark.parametrize('ip', util.generate_ips("10.9.8.0", "10.9.8.255")) + def test_false_for_24_subnets(self, ip): + assert net.ip_in_subnet(ip, "10.9.1.0/24") is False + + +class TestGetRequest(object): + + def test_urlopen_fails(self, monkeypatch): + def bad_urlopen(url): + raise HTTPError('url', 500, 'error', '', StringIO()) + + monkeypatch.setattr(net, 'urlopen', bad_urlopen) + with pytest.raises(RuntimeError): + net.get_request('https://example.ceph.com') diff --git a/ceph_deploy/tests/unit/util/test_packages.py b/ceph_deploy/tests/unit/util/test_packages.py new file mode 100644 index 0000000..73cc3e8 --- /dev/null +++ b/ceph_deploy/tests/unit/util/test_packages.py @@ -0,0 +1,43 @@ +from mock import Mock, patch +from ceph_deploy.exc import ExecutableNotFound +from ceph_deploy.util import packages + + +class TestCephIsInstalled(object): + + def test_installed(self): + with patch('ceph_deploy.util.packages.system'): + c = packages.Ceph(Mock()) + assert c.installed is True + + def test_not_installed(self): + with patch('ceph_deploy.util.packages.system') as fsystem: + bad_executable = Mock( + side_effect=ExecutableNotFound('host', 'ceph') + ) + fsystem.executable_path = bad_executable + c = packages.Ceph(Mock()) + assert c.installed is False + + +class TestCephVersion(object): + + def test_executable_not_found(self): + with patch('ceph_deploy.util.packages.system') as fsystem: + bad_executable = Mock( + side_effect=ExecutableNotFound('host', 'ceph') + ) + fsystem.executable_path = bad_executable + c = packages.Ceph(Mock()) + assert c._get_version_output() == '' + + def test_output_is_unusable(self): + _check = Mock(return_value=(b'', b'', 1)) + c = packages.Ceph(Mock(), _check=_check) + assert c._get_version_output() == '' + + def test_output_usable(self): + version = b'ceph version 9.0.1-kjh234h123hd (asdf78asdjh234)' + _check = Mock(return_value=(version, b'', 1)) + c = packages.Ceph(Mock(), _check=_check) + assert c._get_version_output() == '9.0.1-kjh234h123hd' diff --git a/ceph_deploy/tests/unit/util/test_paths.py b/ceph_deploy/tests/unit/util/test_paths.py new file mode 100644 index 0000000..64ff906 --- /dev/null +++ b/ceph_deploy/tests/unit/util/test_paths.py @@ -0,0 +1,50 @@ +from ceph_deploy.util import paths + + +class TestMonPaths(object): + + def test_base_path(self): + result = paths.mon.base('mycluster') + assert result.endswith('/mycluster-') + + def test_path(self): + result = paths.mon.path('mycluster', 'myhostname') + assert result.startswith('/') + assert result.endswith('/mycluster-myhostname') + + def test_done(self): + result = paths.mon.done('mycluster', 'myhostname') + assert result.startswith('/') + assert result.endswith('mycluster-myhostname/done') + + def test_init(self): + result = paths.mon.init('mycluster', 'myhostname', 'init') + assert result.startswith('/') + assert result.endswith('mycluster-myhostname/init') + + def test_keyring(self): + result = paths.mon.keyring('mycluster', 'myhostname') + assert result.startswith('/') + assert result.endswith('tmp/mycluster-myhostname.mon.keyring') + + def test_asok(self): + result = paths.mon.asok('mycluster', 'myhostname') + assert result.startswith('/') + assert result.endswith('mycluster-mon.myhostname.asok') + + def test_monmap(self): + result = paths.mon.monmap('mycluster', 'myhostname') + assert result.startswith('/') + assert result.endswith('tmp/mycluster.myhostname.monmap') + + def test_gpg_url_release(self): + result = paths.gpg.url('release') + assert result == "https://download.ceph.com/keys/release.asc" + + def test_gpg_url_autobuild(self): + result = paths.gpg.url('autobuild') + assert result == "https://download.ceph.com/keys/autobuild.asc" + + def test_gpg_url_http(self): + result = paths.gpg.url('release', protocol="http") + assert result == "http://download.ceph.com/keys/release.asc" diff --git a/ceph_deploy/tests/unit/util/test_pkg_managers.py b/ceph_deploy/tests/unit/util/test_pkg_managers.py new file mode 100644 index 0000000..5f06ad0 --- /dev/null +++ b/ceph_deploy/tests/unit/util/test_pkg_managers.py @@ -0,0 +1,195 @@ +from mock import patch, Mock +from ceph_deploy.util import pkg_managers + + +class TestApt(object): + + def setup(self): + self.to_patch = 'ceph_deploy.util.pkg_managers.remoto.process.run' + + def test_install_single_package(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.Apt(Mock()).install('vim') + result = fake_run.call_args_list[-1] + assert 'install' in result[0][-1] + assert result[0][-1][-1] == 'vim' + + def test_install_multiple_packages(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.Apt(Mock()).install(['vim', 'zsh']) + result = fake_run.call_args_list[-1] + assert 'install' in result[0][-1] + assert result[0][-1][-2:] == ['vim', 'zsh'] + + def test_remove_single_package(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.Apt(Mock()).remove('vim') + result = fake_run.call_args_list[-1] + assert 'remove' in result[0][-1] + assert result[0][-1][-1] == 'vim' + + def test_remove_multiple_packages(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.Apt(Mock()).remove(['vim', 'zsh']) + result = fake_run.call_args_list[-1] + assert 'remove' in result[0][-1] + assert result[0][-1][-2:] == ['vim', 'zsh'] + + +class TestYum(object): + + def setup(self): + self.to_patch = 'ceph_deploy.util.pkg_managers.remoto.process.run' + + def test_install_single_package(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.Yum(Mock()).install('vim') + result = fake_run.call_args_list[-1] + assert 'install' in result[0][-1] + assert result[0][-1][-1] == 'vim' + + def test_install_multiple_packages(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.Yum(Mock()).install(['vim', 'zsh']) + result = fake_run.call_args_list[-1] + assert 'install' in result[0][-1] + assert result[0][-1][-2:] == ['vim', 'zsh'] + + def test_remove_single_package(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.Yum(Mock()).remove('vim') + result = fake_run.call_args_list[-1] + assert 'remove' in result[0][-1] + assert result[0][-1][-1] == 'vim' + + def test_remove_multiple_packages(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.Yum(Mock()).remove(['vim', 'zsh']) + result = fake_run.call_args_list[-1] + assert 'remove' in result[0][-1] + assert result[0][-1][-2:] == ['vim', 'zsh'] + + +class TestZypper(object): + + def setup(self): + self.to_patch = 'ceph_deploy.util.pkg_managers.remoto.process.run' + self.to_check = 'ceph_deploy.util.pkg_managers.remoto.process.check' + + def test_install_single_package(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.Zypper(Mock()).install('vim') + result = fake_run.call_args_list[-1] + assert 'install' in result[0][-1] + assert result[0][-1][-1] == 'vim' + + def test_install_multiple_packages(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.Zypper(Mock()).install(['vim', 'zsh']) + result = fake_run.call_args_list[-1] + assert 'install' in result[0][-1] + assert result[0][-1][-2:] == ['vim', 'zsh'] + + def test_remove_single_package(self): + fake_check = Mock() + fake_check.return_value = '', '', 0 + with patch(self.to_check, fake_check): + pkg_managers.Zypper(Mock()).remove('vim') + result = fake_check.call_args_list[-1] + assert 'remove' in result[0][-1] + assert result[0][-1][-1] == 'vim' + + def test_remove_multiple_packages(self): + fake_check = Mock() + fake_check.return_value = '', '', 0 + with patch(self.to_check, fake_check): + pkg_managers.Zypper(Mock()).remove(['vim', 'zsh']) + result = fake_check.call_args_list[-1] + assert 'remove' in result[0][-1] + assert result[0][-1][-2:] == ['vim', 'zsh'] + + +class TestDNF(object): + + def setup(self): + self.to_patch = 'ceph_deploy.util.pkg_managers.remoto.process.run' + + def test_install_single_package(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.DNF(Mock()).install('vim') + result = fake_run.call_args_list[-1] + assert 'install' in result[0][-1] + assert result[0][-1][-1] == 'vim' + + def test_install_multiple_packages(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.DNF(Mock()).install(['vim', 'zsh']) + result = fake_run.call_args_list[-1] + assert 'install' in result[0][-1] + assert result[0][-1][-2:] == ['vim', 'zsh'] + + def test_remove_single_package(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.DNF(Mock()).remove('vim') + result = fake_run.call_args_list[-1] + assert 'remove' in result[0][-1] + assert result[0][-1][-1] == 'vim' + + def test_remove_multiple_packages(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.DNF(Mock()).remove(['vim', 'zsh']) + result = fake_run.call_args_list[-1] + assert 'remove' in result[0][-1] + + +class TestAtpRpm(object): + + def setup(self): + self.to_patch = 'ceph_deploy.util.pkg_managers.remoto.process.run' + + def test_install_single_package(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.AptRpm(Mock()).install('vim') + result = fake_run.call_args_list[-1] + assert 'install' in result[0][-1] + assert result[0][-1][-1] == 'vim' + + def test_install_multiple_packages(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.AptRpm(Mock()).install(['vim', 'zsh']) + result = fake_run.call_args_list[-1] + assert 'install' in result[0][-1] + assert result[0][-1][-2:] == ['vim', 'zsh'] + + def test_remove_single_package(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.AptRpm(Mock()).remove('vim') + result = fake_run.call_args_list[-1] + assert 'remove' in result[0][-1] + assert result[0][-1][-1] == 'vim' + + def test_remove_multiple_packages(self): + fake_run = Mock() + with patch(self.to_patch, fake_run): + pkg_managers.AptRpm(Mock()).remove(['vim', 'zsh']) + result = fake_run.call_args_list[-1] + assert 'remove' in result[0][-1] + assert result[0][-1][-2:] == ['vim', 'zsh'] + diff --git a/ceph_deploy/tests/unit/util/test_system.py b/ceph_deploy/tests/unit/util/test_system.py new file mode 100644 index 0000000..73ca546 --- /dev/null +++ b/ceph_deploy/tests/unit/util/test_system.py @@ -0,0 +1,57 @@ +from mock import Mock +from pytest import raises +from ceph_deploy.util import system +from ceph_deploy import exc + + +class TestExecutablePath(object): + + def test_returns_path(self): + fake_conn = Mock() + fake_conn.remote_module.which = Mock(return_value='/path') + result = system.executable_path(fake_conn, 'foo') + assert result == '/path' + + def test_cannot_find_executable(self): + fake_conn = Mock() + fake_conn.remote_module.which = Mock(return_value=None) + with raises(exc.ExecutableNotFound): + system.executable_path(fake_conn, 'foo') + + +class TestIsUpstart(object): + + def test_it_is_actually_systemd(self): + fake_conn = Mock() + fake_conn.remote_module.grep = Mock(return_value=True) + result = system.is_upstart(fake_conn) + assert result is False + + def test_no_initctl(self): + fake_conn = Mock() + fake_conn.remote_module.grep = Mock(return_value=False) + fake_conn.remote_module.which = Mock(return_value=None) + result = system.is_upstart(fake_conn) + assert result is False + + def test_initctl_version_says_upstart(self, monkeypatch): + fake_conn = Mock() + fake_conn.remote_module.grep = Mock(return_value=False) + fake_conn.remote_module.which = Mock(return_value='/bin/initctl') + fake_stdout = ([b'init', b'(upstart 1.12.1)'], [], 0) + fake_check = Mock(return_value=fake_stdout) + monkeypatch.setattr("ceph_deploy.util.system.remoto.process.check", lambda *a: fake_check()) + + result = system.is_upstart(fake_conn) + assert result is True + + def test_initctl_version_says_something_else(self, monkeypatch): + fake_conn = Mock() + fake_conn.remote_module.grep = Mock(return_value=False) + fake_conn.remote_module.which = Mock(return_value='/bin/initctl') + fake_stdout = ([b'nosh', b'version', b'1.14'], [], 0) + fake_check = Mock(return_value=fake_stdout) + monkeypatch.setattr("ceph_deploy.util.system.remoto.process.check", lambda *a: fake_check()) + + result = system.is_upstart(fake_conn) + assert result is False diff --git a/ceph_deploy/tests/unit/util/test_templates.py b/ceph_deploy/tests/unit/util/test_templates.py new file mode 100644 index 0000000..36c39cd --- /dev/null +++ b/ceph_deploy/tests/unit/util/test_templates.py @@ -0,0 +1,29 @@ +from textwrap import dedent +from ceph_deploy.util import templates + + +class TestCustomRepo(object): + + def test_only_repo_name(self): + result = templates.custom_repo(reponame='foo') + assert result == '[foo]' + + def test_second_line_with_good_value(self): + result = templates.custom_repo(reponame='foo', enabled=0) + assert result == '[foo]\nenabled=0' + + def test_mixed_values(self): + result = templates.custom_repo( + reponame='foo', + enabled=0, + gpgcheck=1, + baseurl='example.org') + assert result == dedent("""\ + [foo] + baseurl=example.org + enabled=0 + gpgcheck=1""") + + def test_allow_invalid_options(self): + result = templates.custom_repo(reponame='foo', bar='bar') + assert result == '[foo]' diff --git a/ceph_deploy/tests/util.py b/ceph_deploy/tests/util.py new file mode 100644 index 0000000..50932da --- /dev/null +++ b/ceph_deploy/tests/util.py @@ -0,0 +1,33 @@ + + +def generate_ips(start_ip, end_ip): + start = list(map(int, start_ip.split("."))) + end = list(map(int, end_ip.split("."))) + temp = start + ip_range = [] + + ip_range.append(start_ip) + while temp != end: + start[3] += 1 + for i in (3, 2, 1): + if temp[i] == 256: + temp[i] = 0 + temp[i-1] += 1 + ip_range.append(".".join(map(str, temp))) + + return ip_range + + +class Empty(object): + """ + A bare class, with explicit behavior for key/value items to be set at + instantiation. + """ + def __init__(self, **kw): + for k, v in kw.items(): + setattr(self, k, v) + + +def assert_too_few_arguments(err): + assert ("error: too few arguments" in err or + "error: the following argument" in err) diff --git a/ceph_deploy/util/__init__.py b/ceph_deploy/util/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ceph_deploy/util/arg_validators.py b/ceph_deploy/util/arg_validators.py new file mode 100644 index 0000000..e61fcd1 --- /dev/null +++ b/ceph_deploy/util/arg_validators.py @@ -0,0 +1,83 @@ +import socket +import argparse +import re + + +class RegexMatch(object): + """ + Performs regular expression match on value. + If the regular expression pattern matches it will it will return an error + message that will work with argparse. + """ + + def __init__(self, pattern, statement=None): + self.string_pattern = pattern + self.pattern = re.compile(pattern) + self.statement = statement + if not self.statement: + self.statement = "must match pattern %s" % self.string_pattern + + def __call__(self, string): + match = self.pattern.search(string) + if match: + raise argparse.ArgumentError(None, self.statement) + return string + + +class Hostname(object): + """ + Checks wether a given hostname is resolvable in DNS, otherwise raising and + argparse error. + """ + + def __init__(self, _socket=None): + self.socket = _socket or socket # just used for testing + + def __call__(self, string): + parts = string.split(':', 1) + name = parts[0] + host = parts[-1] + try: + self.socket.getaddrinfo(host, 0) + except self.socket.gaierror: + msg = "hostname: %s is not resolvable" % host + raise argparse.ArgumentError(None, msg) + + try: + self.socket.getaddrinfo(name, 0, 0, 0, 0, self.socket.AI_NUMERICHOST) + except self.socket.gaierror: + return string # not an IP + else: + msg = '%s must be a hostname not an IP' % name + raise argparse.ArgumentError(None, msg) + + return string + + +class Subnet(object): + """ + A really dumb validator to ensure that we are receiving a subnet (or + something that actually looks like a subnet). + + It doesn't enforce at all the constraints of proper validation as that has + its own set of caveats that are difficult to implement given that + ceph-deploy doesn't (should not) include third party dependencies. + """ + + def __call__(self, string): + ip = string.split('/')[0] + ip_parts = ip.split('.') + + if len(ip_parts) != 4: + err = "subnet must have at least 4 numbers separated by dots like x.x.x.x/xx, but got: %s" % string + raise argparse.ArgumentError(None, err) + + if [i for i in ip_parts[:4] if i.isalpha()]: # only numbers + err = "subnet must have digits separated by dots like x.x.x.x/xx, but got: %s" % string + raise argparse.ArgumentError(None, err) + + if len(string.split('/')) != 2: + err = "subnet must contain a slash, like x.x.x.x/xx, but got: %s" % string + raise argparse.ArgumentError(None, err) + + return string diff --git a/ceph_deploy/util/constants.py b/ceph_deploy/util/constants.py new file mode 100644 index 0000000..a76dcd6 --- /dev/null +++ b/ceph_deploy/util/constants.py @@ -0,0 +1,36 @@ +from os.path import join +from collections import namedtuple + +# Base Path for ceph +base_path = '/var/lib/ceph' + +# Base run Path +base_run_path = '/var/run/ceph' + +tmp_path = join(base_path, 'tmp') + +mon_path = join(base_path, 'mon') + +mgr_path = join(base_path, 'mgr') + +mds_path = join(base_path, 'mds') + +osd_path = join(base_path, 'osd') + +# Default package components to install +_base_components = [ + 'ceph', + 'ceph-osd', + 'ceph-mds', + 'ceph-mon', +] + +default_components = namedtuple('DefaultComponents', ['rpm', 'deb', 'pkgtarxz']) + +# the difference here is because RPMs currently name the radosgw differently than DEBs. +# TODO: This needs to get unified once the packaging naming gets consistent +default_components.rpm = tuple(_base_components + ['ceph-radosgw']) +default_components.deb = tuple(_base_components + ['radosgw']) +default_components.pkgtarxz = tuple(['ceph']) + +gpg_key_base_url = "download.ceph.com/keys/" diff --git a/ceph_deploy/util/decorators.py b/ceph_deploy/util/decorators.py new file mode 100644 index 0000000..70e002a --- /dev/null +++ b/ceph_deploy/util/decorators.py @@ -0,0 +1,112 @@ +import logging +import sys +import traceback +from functools import wraps + + +def catches(catch=None, handler=None, exit=True, handle_all=False): + """ + Very simple decorator that tries any of the exception(s) passed in as + a single exception class or tuple (containing multiple ones) returning the + exception message and optionally handling the problem if it raises with the + handler if it is provided. + + So instead of doing something like this:: + + def bar(): + try: + some_call() + print "Success!" + except TypeError, exc: + print "Error while handling some call: %s" % exc + sys.exit(1) + + You would need to decorate it like this to have the same effect:: + + @catches(TypeError) + def bar(): + some_call() + print "Success!" + + If multiple exceptions need to be caught they need to be provided as a + tuple:: + + @catches((TypeError, AttributeError)) + def bar(): + some_call() + print "Success!" + + If adding a handler, it should accept a single argument, which would be the + exception that was raised, it would look like:: + + def my_handler(exc): + print 'Handling exception %s' % str(exc) + raise SystemExit + + @catches(KeyboardInterrupt, handler=my_handler) + def bar(): + some_call() + + Note that the handler needs to raise its SystemExit if it wants to halt + execution, otherwise the decorator would continue as a normal try/except + block. + + + :param catch: A tuple with one (or more) Exceptions to catch + :param handler: Optional handler to have custom handling of exceptions + :param exit: Raise a ``SystemExit`` after handling exceptions + :param handle_all: Handle all other exceptions via logging. + """ + catch = catch or Exception + logger = logging.getLogger('ceph_deploy') + + def decorate(f): + + @wraps(f) + def newfunc(*a, **kw): + exit_from_catch = False + try: + return f(*a, **kw) + except catch as e: + if handler: + return handler(e) + else: + logger.error(make_exception_message(e)) + + if exit: + exit_from_catch = True + sys.exit(1) + except Exception: # anything else, no need to save the exception as a variable + if handle_all is False: # re-raise if we are not supposed to handle everything + raise + # Make sure we don't spit double tracebacks if we are raising + # SystemExit from the `except catch` block + + if exit_from_catch: + sys.exit(1) + + str_failure = traceback.format_exc() + for line in str_failure.split('\n'): + logger.error("%s" % line) + sys.exit(1) + + return newfunc + + return decorate + +# +# Decorator helpers +# + + +def make_exception_message(exc): + """ + An exception is passed in and this function + returns the proper string depending on the result + so it is readable enough. + """ + if str(exc): + return '%s: %s\n' % (exc.__class__.__name__, exc) + else: + return '%s\n' % (exc.__class__.__name__) + diff --git a/ceph_deploy/util/files.py b/ceph_deploy/util/files.py new file mode 100644 index 0000000..6770596 --- /dev/null +++ b/ceph_deploy/util/files.py @@ -0,0 +1,5 @@ + + +def read_file(path): + with open(path, 'rb') as f: + return f.read() diff --git a/ceph_deploy/util/help_formatters.py b/ceph_deploy/util/help_formatters.py new file mode 100644 index 0000000..2cb562d --- /dev/null +++ b/ceph_deploy/util/help_formatters.py @@ -0,0 +1,33 @@ +import argparse + + +class ToggleRawTextHelpFormatter(argparse.HelpFormatter): + """ArgParse help formatter that allows raw text in individual help strings + + Inspired by the SmartFormatter at + https://bitbucket.org/ruamel/std.argparse + + Normally to include newlines in the help output of argparse, you have + use argparse.RawDescriptionHelpFormatter. But this means raw text is enabled + everywhere, and not just for specific help entries where you might need it. + + This help formatter allows for you to optional enable/toggle raw text on + individual menu items by prefixing the help string with 'R|'. + + Example: + + parser.formatter_class = ToggleRawTextHelpFormatter + parser.add_argument('--verbose', action=store_true, + help='Enable verbose mode') + #Above help is formatted just as default argparse.HelpFormatter + + parser.add_argument('--complex-arg', action=store_true, + help=('R|This help description use ' + 'newlines and tabs and they will be preserved in' + 'the help output.\n\n' + '\tHow cool is that?')) + """ + def _split_lines(self, text, width): + if text.startswith('R|'): + return text[2:].splitlines() + return argparse.HelpFormatter._split_lines(self, text, width) diff --git a/ceph_deploy/util/log.py b/ceph_deploy/util/log.py new file mode 100644 index 0000000..c72303c --- /dev/null +++ b/ceph_deploy/util/log.py @@ -0,0 +1,67 @@ +import logging +import sys + +BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) + +COLORS = { + 'WARNING': YELLOW, + 'INFO': WHITE, + 'DEBUG': BLUE, + 'CRITICAL': RED, + 'ERROR': RED, + 'FATAL': RED, +} + +RESET_SEQ = "\033[0m" +COLOR_SEQ = "\033[1;%dm" +BOLD_SEQ = "\033[1m" + +BASE_COLOR_FORMAT = "[$BOLD%(name)s$RESET][%(color_levelname)-17s] %(message)s" +BASE_FORMAT = "[%(name)s][%(levelname)-6s] %(message)s" +FILE_FORMAT = "[%(asctime)s]" + BASE_FORMAT + +def supports_color(): + """ + Returns True if the running system's terminal supports color, and False + otherwise. + """ + unsupported_platform = (sys.platform in ('win32', 'Pocket PC')) + # isatty is not always implemented, #6223. + is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() + if unsupported_platform or not is_a_tty: + return False + return True + + +def color_message(message): + message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ) + return message + + +class ColoredFormatter(logging.Formatter): + """ + A very basic logging formatter that not only applies color to the levels of + the ouput but will also truncate the level names so that they do not alter + the visuals of logging when presented on the terminal. + """ + + def __init__(self, msg): + logging.Formatter.__init__(self, msg) + + def format(self, record): + levelname = record.levelname + truncated_level = record.levelname[:6] + levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + truncated_level + RESET_SEQ + record.color_levelname = levelname_color + return logging.Formatter.format(self, record) + + +def color_format(): + """ + Main entry point to get a colored formatter, it will use the + BASE_FORMAT by default and fall back to no colors if the system + does not support it + """ + str_format = BASE_COLOR_FORMAT if supports_color() else BASE_FORMAT + color_format = color_message(str_format) + return ColoredFormatter(color_format) diff --git a/ceph_deploy/util/net.py b/ceph_deploy/util/net.py new file mode 100644 index 0000000..73c3914 --- /dev/null +++ b/ceph_deploy/util/net.py @@ -0,0 +1,399 @@ +try: + from urllib.request import urlopen + from urllib.error import HTTPError +except ImportError: + from urllib2 import urlopen, HTTPError + +from ceph_deploy import exc +import logging +import re +import socket +from ceph_deploy.lib import remoto + + +LOG = logging.getLogger(__name__) + + +# TODO: at some point, it might be way more accurate to do this in the actual +# host where we need to get IPs from. SaltStack does this by calling `ip` and +# parsing the output, which is probably the one true way of dealing with it. + +def get_nonlocal_ip(host, subnet=None): + """ + Search result of getaddrinfo() for a non-localhost-net address + """ + try: + ailist = socket.getaddrinfo(host, None) + except socket.gaierror: + raise exc.UnableToResolveError(host) + for ai in ailist: + # an ai is a 5-tuple; the last element is (ip, port) + ip = ai[4][0] + if subnet and ip_in_subnet(ip, subnet): + LOG.info('found ip (%s) for host (%s) to be in cluster subnet (%s)' % ( + ip, + host, + subnet,) + ) + + return ip + + if not ip.startswith('127.'): + if subnet: + LOG.warning('could not match ip (%s) for host (%s) for cluster subnet (%s)' % ( + ip, + host, + subnet,) + ) + return ip + raise exc.UnableToResolveError(host) + + +def ip_in_subnet(ip, subnet): + """Does IP exists in a given subnet utility. Returns a boolean""" + ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16) + netstr, bits = subnet.split('/') + netaddr = int(''.join(['%02x' % int(x) for x in netstr.split('.')]), 16) + mask = (0xffffffff << (32 - int(bits))) & 0xffffffff + return (ipaddr & mask) == (netaddr & mask) + + +def in_subnet(cidr, addrs=None): + """ + Returns True if host is within specified subnet, otherwise False + """ + for address in addrs: + if ip_in_subnet(address, cidr): + return True + return False + + +def ip_addresses(conn, interface=None, include_loopback=False): + """ + Returns a list of IPv4/IPv6 addresses assigned to the host. 127.0.0.1/::1 is + ignored, unless 'include_loopback=True' is indicated. If 'interface' is + provided, then only IP addresses from that interface will be returned. + + Example output looks like:: + + >>> ip_addresses(conn) + >>> ['192.168.1.111', '10.0.1.12', '2001:db8::100'] + + """ + ret = set() + ifaces = linux_interfaces(conn) + if interface is None: + target_ifaces = ifaces + else: + target_ifaces = dict((k, v) for k, v in ifaces.items() + if k == interface) + if not target_ifaces: + LOG.error('Interface {0} not found.'.format(interface)) + for info in target_ifaces.values(): + for ipv4 in info.get('inet', []): + loopback = in_subnet('127.0.0.0/8', [ipv4.get('address')]) or ipv4.get('label') == 'lo' + if not loopback or include_loopback: + ret.add(ipv4['address']) + for secondary in info.get('secondary', []): + addr = secondary.get('address') + if addr and secondary.get('type') == 'inet': + if include_loopback or (not include_loopback and not in_subnet('127.0.0.0/8', [addr])): + ret.add(addr) + for ipv6 in info.get('inet6', []): + # When switching to Python 3 the IPAddress module can do all this work for us + if ipv6.get('address').startswith('fe80::'): + continue + + if not include_loopback and '::1' == ipv6.get('address'): + continue + + ret.add(ipv6['address']) + if ret: + conn.logger.debug('IP addresses found: %s' % str(list(ret))) + return sorted(list(ret)) + + +def linux_interfaces(conn): + """ + Obtain interface information for *NIX/BSD variants in remote servers. + + Example output from a remote node with a couple of interfaces:: + + {'eth0': {'hwaddr': '08:00:27:08:c2:e4', + 'inet': [{'address': '10.0.2.15', + 'broadcast': '10.0.2.255', + 'label': 'eth0', + 'netmask': '255.255.255.0'}], + 'inet6': [{'address': 'fe80::a00:27ff:fe08:c2e4', + 'prefixlen': '64'}], + 'up': True}, + 'eth1': {'hwaddr': '08:00:27:70:06:f1', + 'inet': [{'address': '192.168.111.101', + 'broadcast': '192.168.111.255', + 'label': 'eth1', + 'netmask': '255.255.255.0'}], + 'inet6': [{'address': 'fe80::a00:27ff:fe70:6f1', + 'prefixlen': '64'}], + 'up': True}, + 'lo': {'hwaddr': '00:00:00:00:00:00', + 'inet': [{'address': '127.0.0.1', + 'broadcast': None, + 'label': 'lo', + 'netmask': '255.0.0.0'}], + 'inet6': [{'address': '::1', 'prefixlen': '128'}], + 'up': True}} + + :param conn: A connection object to a remote node + """ + ifaces = dict() + ip_path = conn.remote_module.which('ip') + ifconfig_path = None if ip_path else conn.remote_module.which('ifconfig') + if ip_path: + cmd1, _, _ = remoto.process.check( + conn, + [ + '{0}'.format(ip_path), + 'link', + 'show', + ], + ) + cmd2, _, _ = remoto.process.check( + conn, + [ + '{0}'.format(ip_path), + 'addr', + 'show', + ], + ) + ifaces = _interfaces_ip(b'\n'.join(cmd1).decode('utf-8') + '\n' + + b'\n'.join(cmd2).decode('utf-8')) + elif ifconfig_path: + cmd, _, _ = remoto.process.check( + conn, + [ + '{0}'.format(ifconfig_path), + '-a', + ] + ) + ifaces = _interfaces_ifconfig('\n'.join(cmd)) + return ifaces + + +def _interfaces_ip(out): + """ + Uses ip to return a dictionary of interfaces with various information about + each (up/down state, ip address, netmask, and hwaddr) + """ + ret = dict() + + def parse_network(value, cols): + """ + Return a tuple of ip, netmask, broadcast + based on the current set of cols + """ + brd = None + if '/' in value: # we have a CIDR in this address + ip, cidr = value.split('/') # pylint: disable=C0103 + else: + ip = value # pylint: disable=C0103 + cidr = 32 + + if type_ == 'inet': + mask = cidr_to_ipv4_netmask(int(cidr)) + if 'brd' in cols: + brd = cols[cols.index('brd') + 1] + elif type_ == 'inet6': + mask = cidr + return (ip, mask, brd) + + groups = re.compile('\r?\n\\d').split(out) + for group in groups: + iface = None + data = dict() + + for line in group.splitlines(): + if ' ' not in line: + continue + match = re.match(r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>', line) + if match: + iface, parent, attrs = match.groups() + if 'UP' in attrs.split(','): + data['up'] = True + else: + data['up'] = False + if parent: + data['parent'] = parent + continue + + cols = line.split() + if len(cols) >= 2: + type_, value = tuple(cols[0:2]) + iflabel = cols[-1:][0] + if type_ in ('inet', 'inet6'): + if 'secondary' not in cols: + ipaddr, netmask, broadcast = parse_network(value, cols) + if type_ == 'inet': + if 'inet' not in data: + data['inet'] = list() + addr_obj = dict() + addr_obj['address'] = ipaddr + addr_obj['netmask'] = netmask + addr_obj['broadcast'] = broadcast + addr_obj['label'] = iflabel + data['inet'].append(addr_obj) + elif type_ == 'inet6': + if 'inet6' not in data: + data['inet6'] = list() + addr_obj = dict() + addr_obj['address'] = ipaddr + addr_obj['prefixlen'] = netmask + data['inet6'].append(addr_obj) + else: + if 'secondary' not in data: + data['secondary'] = list() + ip_, mask, brd = parse_network(value, cols) + data['secondary'].append({ + 'type': type_, + 'address': ip_, + 'netmask': mask, + 'broadcast': brd, + 'label': iflabel, + }) + del ip_, mask, brd + elif type_.startswith('link'): + data['hwaddr'] = value + if iface: + ret[iface] = data + del iface, data + return ret + + +def _interfaces_ifconfig(out): + """ + Uses ifconfig to return a dictionary of interfaces with various information + about each (up/down state, ip address, netmask, and hwaddr) + """ + ret = dict() + + piface = re.compile(r'^([^\s:]+)') + pmac = re.compile('.*?(?:HWaddr|ether|address:|lladdr) ([0-9a-fA-F:]+)') + pip = re.compile(r'.*?(?:inet addr:|inet )(.*?)\s') + pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)') + pmask = re.compile(r'.*?(?:Mask:|netmask )(?:((?:0x)?[0-9a-fA-F]{8})|([\d\.]+))') + pmask6 = re.compile(r'.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+)).*') + pupdown = re.compile('UP') + pbcast = re.compile(r'.*?(?:Bcast:|broadcast )([\d\.]+)') + + groups = re.compile('\r?\n(?=\\S)').split(out) + for group in groups: + data = dict() + iface = '' + updown = False + for line in group.splitlines(): + miface = piface.match(line) + mmac = pmac.match(line) + mip = pip.match(line) + mip6 = pip6.match(line) + mupdown = pupdown.search(line) + if miface: + iface = miface.group(1) + if mmac: + data['hwaddr'] = mmac.group(1) + if mip: + if 'inet' not in data: + data['inet'] = list() + addr_obj = dict() + addr_obj['address'] = mip.group(1) + mmask = pmask.match(line) + if mmask: + if mmask.group(1): + mmask = _number_of_set_bits_to_ipv4_netmask( + int(mmask.group(1), 16)) + else: + mmask = mmask.group(2) + addr_obj['netmask'] = mmask + mbcast = pbcast.match(line) + if mbcast: + addr_obj['broadcast'] = mbcast.group(1) + data['inet'].append(addr_obj) + if mupdown: + updown = True + if mip6: + if 'inet6' not in data: + data['inet6'] = list() + addr_obj = dict() + addr_obj['address'] = mip6.group(1) or mip6.group(2) + mmask6 = pmask6.match(line) + if mmask6: + addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2) + data['inet6'].append(addr_obj) + data['up'] = updown + ret[iface] = data + del data + return ret + + +def _number_of_set_bits_to_ipv4_netmask(set_bits): # pylint: disable=C0103 + """ + Returns an IPv4 netmask from the integer representation of that mask. + + Ex. 0xffffff00 -> '255.255.255.0' + """ + return cidr_to_ipv4_netmask(_number_of_set_bits(set_bits)) + + +def _number_of_set_bits(x): + """ + Returns the number of bits that are set in a 32bit int + """ + # Taken from http://stackoverflow.com/a/4912729. Many thanks! + x -= (x >> 1) & 0x55555555 + x = ((x >> 2) & 0x33333333) + (x & 0x33333333) + x = ((x >> 4) + x) & 0x0f0f0f0f + x += x >> 8 + x += x >> 16 + return x & 0x0000003f + + +def cidr_to_ipv4_netmask(cidr_bits): + """ + Returns an IPv4 netmask + """ + try: + cidr_bits = int(cidr_bits) + if not 1 <= cidr_bits <= 32: + return '' + except ValueError: + return '' + + netmask = '' + for idx in range(4): + if idx: + netmask += '.' + if cidr_bits >= 8: + netmask += '255' + cidr_bits -= 8 + else: + netmask += '{0:d}'.format(256 - (2 ** (8 - cidr_bits))) + cidr_bits = 0 + return netmask + + +def get_request(url): + try: + return urlopen(url) + except HTTPError as err: + LOG.error('repository might not be available yet') + raise RuntimeError('%s, failed to fetch %s' % (err, url)) + + +def get_chacra_repo(shaman_url): + """ + From a Shaman URL, get the chacra url for a repository, read the + contents that point to the repo and return it as a string. + """ + shaman_response = get_request(shaman_url) + chacra_url = shaman_response.geturl() + chacra_response = get_request(chacra_url) + + return chacra_response.read() diff --git a/ceph_deploy/util/packages.py b/ceph_deploy/util/packages.py new file mode 100644 index 0000000..a998264 --- /dev/null +++ b/ceph_deploy/util/packages.py @@ -0,0 +1,74 @@ +from ceph_deploy.exc import ExecutableNotFound +from ceph_deploy.util import system, versions +from ceph_deploy.lib import remoto + + +class Ceph(object): + """ + Determine different aspects of the Ceph package, like ``version`` and path + ``executable``. Although mostly provide a version object that helps for + parsing and comparing. + """ + + def __init__(self, conn, _check=None): + self.conn = conn + self._check = _check or remoto.process.check + + @property + def installed(self): + """ + If the ``ceph`` executable exists, then Ceph is installed. Should + probably be revisited if different components do not have the ``ceph`` + executable (this is currently provided by ``ceph-common``). + """ + return bool(self.executable) + + @property + def executable(self): + try: + return system.executable_path(self.conn, 'ceph') + except ExecutableNotFound: + return None + + def _get_version_output(self): + """ + Ignoring errors, call `ceph --version` and return only the version + portion of the output. For example, output like:: + + ceph version 9.0.1-1234kjd (asdflkj2k3jh234jhg) + + Would return:: + + 9.0.1-1234kjd + """ + if not self.executable: + return '' + command = [self.executable, '--version'] + out, _, _ = self._check(self.conn, command) + try: + return out.decode('utf-8').split()[2] + except IndexError: + return '' + + @property + def version(self): + """ + Return a version object (see + :mod:``ceph_deploy.util.versions.NormalizedVersion``) + """ + return versions.parse_version(self._get_version_output) + + +# callback helpers + +def ceph_is_installed(module): + """ + A helper callback to be executed after the connection is made to ensure + that Ceph is installed. + """ + ceph_package = Ceph(module.conn) + if not ceph_package.installed: + host = module.conn.hostname + raise RuntimeError( + 'ceph needs to be installed in remote host: %s' % host + ) diff --git a/ceph_deploy/util/paths/__init__.py b/ceph_deploy/util/paths/__init__.py new file mode 100644 index 0000000..287a551 --- /dev/null +++ b/ceph_deploy/util/paths/__init__.py @@ -0,0 +1,3 @@ +from . import mon # noqa +from . import osd # noqa +from . import gpg # noqa diff --git a/ceph_deploy/util/paths/gpg.py b/ceph_deploy/util/paths/gpg.py new file mode 100644 index 0000000..d9d950b --- /dev/null +++ b/ceph_deploy/util/paths/gpg.py @@ -0,0 +1,8 @@ +from ceph_deploy.util import constants + +def url(key_type, protocol="https"): + return "{protocol}://{url}{key_type}.asc".format( + protocol=protocol, + url=constants.gpg_key_base_url, + key_type=key_type + ) diff --git a/ceph_deploy/util/paths/mon.py b/ceph_deploy/util/paths/mon.py new file mode 100644 index 0000000..0c252d5 --- /dev/null +++ b/ceph_deploy/util/paths/mon.py @@ -0,0 +1,84 @@ +""" +Common paths for mon, based on the constant file paths defined in +``ceph_deploy.util.constants``. +All functions return a string representation of the absolute path +construction. +""" +from os.path import join + +from ceph_deploy.util import constants + + +def base(cluster): + cluster = "%s-" % cluster + return join(constants.mon_path, cluster) + + +def path(cluster, hostname): + """ + Example usage:: + + >>> from ceph_deploy.util.paths import mon + >>> mon.path('mycluster', 'hostname') + /var/lib/ceph/mon/mycluster-myhostname + """ + return "%s%s" % (base(cluster), hostname) + + +def done(cluster, hostname): + """ + Example usage:: + + >>> from ceph_deploy.util.paths import mon + >>> mon.done('mycluster', 'hostname') + /var/lib/ceph/mon/mycluster-myhostname/done + """ + return join(path(cluster, hostname), 'done') + + +def init(cluster, hostname, init): + """ + Example usage:: + + >>> from ceph_deploy.util.paths import mon + >>> mon.init('mycluster', 'hostname', 'init') + /var/lib/ceph/mon/mycluster-myhostname/init + """ + return join(path(cluster, hostname), init) + + +def keyring(cluster, hostname): + """ + Example usage:: + + >>> from ceph_deploy.util.paths import mon + >>> mon.keyring('mycluster', 'myhostname') + /var/lib/ceph/tmp/mycluster-myhostname.mon.keyring + """ + keyring_file = '%s-%s.mon.keyring' % (cluster, hostname) + return join(constants.tmp_path, keyring_file) + + +def asok(cluster, hostname): + """ + Example usage:: + + >>> from ceph_deploy.util.paths import mon + >>> mon.asok('mycluster', 'myhostname') + /var/run/ceph/mycluster-mon.myhostname.asok + """ + asok_file = '%s-mon.%s.asok' % (cluster, hostname) + return join(constants.base_run_path, asok_file) + + +def monmap(cluster, hostname): + """ + Example usage:: + + >>> from ceph_deploy.util.paths import mon + >>> mon.monmap('mycluster', 'myhostname') + /var/lib/ceph/tmp/mycluster.myhostname.monmap + """ + monmap + mon_map_file = '%s.%s.monmap' % (cluster, hostname) + return join(constants.tmp_path, mon_map_file) diff --git a/ceph_deploy/util/paths/osd.py b/ceph_deploy/util/paths/osd.py new file mode 100644 index 0000000..18d7502 --- /dev/null +++ b/ceph_deploy/util/paths/osd.py @@ -0,0 +1,13 @@ +""" +Comosd paths for osd, based on the constant file paths defined in +``ceph_deploy.util.constants``. +All functions return a string representation of the absolute path +construction. +""" +from os.path import join +from ceph_deploy.util import constants + + +def base(cluster): + cluster = "%s-" % cluster + return join(constants.osd_path, cluster) diff --git a/ceph_deploy/util/pkg_managers.py b/ceph_deploy/util/pkg_managers.py new file mode 100644 index 0000000..feb167f --- /dev/null +++ b/ceph_deploy/util/pkg_managers.py @@ -0,0 +1,429 @@ +import os +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse + +from ceph_deploy.lib import remoto +from ceph_deploy.util import templates + + +class PackageManager(object): + """ + Base class for all Package Managers + """ + + def __init__(self, remote_conn): + self.remote_info = remote_conn + self.remote_conn = remote_conn.conn + + def _run(self, cmd, **kw): + return remoto.process.run( + self.remote_conn, + cmd, + **kw + ) + + def _check(self, cmd, **kw): + return remoto.process.check( + self.remote_conn, + cmd, + **kw + ) + + def install(self, packages, **kw): + """Install packages on remote node""" + raise NotImplementedError() + + def remove(self, packages, **kw): + """Uninstall packages on remote node""" + raise NotImplementedError() + + def clean(self): + """Clean metadata/cache""" + raise NotImplementedError() + + def add_repo_gpg_key(self, url): + """Add given GPG key for repo verification""" + raise NotImplementedError() + + def add_repo(self, name, url, **kw): + """Add/rewrite a repo file""" + raise NotImplementedError() + + def remove_repo(self, name): + """Remove a repo definition""" + raise NotImplementedError() + + +class RPMManagerBase(PackageManager): + """ + Base class to hold common pieces of Yum and DNF + """ + + executable = None + name = None + + def install(self, packages, **kw): + if isinstance(packages, str): + packages = [packages] + + extra_flags = kw.pop('extra_install_flags', None) + cmd = [ + self.executable, + '-y', + 'install', + ] + if extra_flags: + if isinstance(extra_flags, str): + extra_flags = [extra_flags] + cmd.extend(extra_flags) + + cmd.extend(packages) + return self._run(cmd) + + def remove(self, packages, **kw): + if isinstance(packages, str): + packages = [packages] + + extra_flags = kw.pop('extra_remove_flags', None) + cmd = [ + self.executable, + '-y', + '-q', + 'remove', + ] + if extra_flags: + if isinstance(extra_flags, str): + extra_flags = [extra_flags] + cmd.extend(extra_flags) + cmd.extend(packages) + return self._run(cmd) + + def clean(self, item=None): + item = item or 'all' + cmd = [ + self.executable, + 'clean', + item, + ] + + return self._run(cmd) + + def add_repo_gpg_key(self, url): + cmd = ['rpm', '--import', url] + self._run(cmd) + + def add_repo(self, name, url, **kw): + gpg_url = kw.pop('gpg_url', None) + if gpg_url: + self.add_repo_gpg_key(gpg_url) + gpgcheck=1 + else: + gpgcheck=0 + + # RPM repo defaults + description = kw.pop('description', '%s repo' % name) + enabled = kw.pop('enabled', 1) + proxy = kw.pop('proxy', '') # will get ignored if empty + _type = 'repo-md' + baseurl = url.strip('/') # Remove trailing slashes + + ceph_repo_content = templates.custom_repo( + reponame=name, + name=description, + baseurl=baseurl, + enabled=enabled, + gpgcheck=gpgcheck, + _type=_type, + gpgkey=gpg_url, + proxy=proxy, + **kw + ) + + self.remote_conn.remote_module.write_yum_repo( + ceph_repo_content, + '%s.repo' % name + ) + + def remove_repo(self, name): + filename = os.path.join( + '/etc/yum.repos.d', + '%s.repo' % name + ) + self.remote_conn.remote_module.unlink(filename) + + +class DNF(RPMManagerBase): + """ + The DNF Package manager + """ + + executable = 'dnf' + name = 'dnf' + + def install(self, packages, **kw): + extra_install_flags = kw.pop('extra_install_flags', []) + if '--best' not in extra_install_flags: + extra_install_flags.append('--best') + super(DNF, self).install( + packages, + extra_install_flags=extra_install_flags, + **kw + ) + + +class Yum(RPMManagerBase): + """ + The Yum Package manager + """ + + executable = 'yum' + name = 'yum' + + +class Apt(PackageManager): + """ + Apt package management + """ + + executable = [ + 'env', + 'DEBIAN_FRONTEND=noninteractive', + 'DEBIAN_PRIORITY=critical', + 'apt-get', + '--assume-yes', + '-q', + ] + name = 'apt' + + def install(self, packages, **kw): + if isinstance(packages, str): + packages = [packages] + + extra_flags = kw.pop('extra_install_flags', None) + cmd = self.executable + [ + '--no-install-recommends', + 'install' + ] + + if extra_flags: + if isinstance(extra_flags, str): + extra_flags = [extra_flags] + cmd.extend(extra_flags) + cmd.extend(packages) + return self._run(cmd) + + def remove(self, packages, **kw): + if isinstance(packages, str): + packages = [packages] + + extra_flags = kw.pop('extra_remove_flags', None) + cmd = self.executable + [ + '-f', + '--force-yes', + 'remove' + ] + if extra_flags: + if isinstance(extra_flags, str): + extra_flags = [extra_flags] + cmd.extend(extra_flags) + + cmd.extend(packages) + return self._run(cmd) + + def clean(self): + cmd = self.executable + ['update'] + return self._run(cmd) + + def add_repo_gpg_key(self, url): + gpg_path = url.split('file://')[-1] + if not url.startswith('file://'): + cmd = ['wget', '-O', 'release.asc', url ] + self._run(cmd, stop_on_nonzero=False) + gpg_file = 'release.asc' if not url.startswith('file://') else gpg_path + cmd = ['apt-key', 'add', gpg_file] + self._run(cmd) + + def add_repo(self, name, url, **kw): + gpg_url = kw.pop('gpg_url', None) + if gpg_url: + self.add_repo_gpg_key(gpg_url) + + safe_filename = '%s.list' % name.replace(' ', '-') + mode = 0o644 + if urlparse(url).password: + mode = 0o600 + self.remote_conn.logger.info( + "Creating repo file with mode 0600 due to presence of password" + ) + self.remote_conn.remote_module.write_sources_list( + url, + self.remote_info.codename, + safe_filename, + mode + ) + + # Add package pinning for this repo + fqdn = urlparse(url).hostname + self.remote_conn.remote_module.set_apt_priority(fqdn) + + def remove_repo(self, name): + safe_filename = '%s.list' % name.replace(' ', '-') + filename = os.path.join( + '/etc/apt/sources.list.d', + safe_filename + ) + self.remote_conn.remote_module.unlink(filename) + + +class Zypper(PackageManager): + """ + Zypper package management + """ + + executable = [ + 'zypper', + '--non-interactive', + '--quiet' + ] + name = 'zypper' + + def install(self, packages, **kw): + if isinstance(packages, str): + packages = [packages] + + extra_flags = kw.pop('extra_install_flags', None) + cmd = self.executable + ['install'] + if extra_flags: + if isinstance(extra_flags, str): + extra_flags = [extra_flags] + cmd.extend(extra_flags) + cmd.extend(packages) + return self._run(cmd) + + def remove(self, packages, **kw): + if isinstance(packages, str): + packages = [packages] + + extra_flags = kw.pop('extra_remove_flags', None) + cmd = self.executable + ['--ignore-unknown', 'remove'] + if extra_flags: + if isinstance(extra_flags, str): + extra_flags = [extra_flags] + cmd.extend(extra_flags) + cmd.extend(packages) + stdout, stderr, exitrc = self._check( + cmd, + **kw + ) + # exitrc is 104 when package(s) not installed. + if not exitrc in [0, 104]: + raise RuntimeError("Failed to execute command: %s" % " ".join(cmd)) + return + + def clean(self): + cmd = self.executable + ['refresh'] + return self._run(cmd) + + +class Pacman(PackageManager): + """ + Pacman package management + """ + + executable = [ + 'pacman', + '--noconfirm', + ] + name = 'pacman' + + def install(self, packages, **kw): + if isinstance(packages, str): + packages = [packages] + + extra_flags = kw.pop('extra_install_flags', None) + cmd = self.executable + [ + '-Sy', + ] + + if extra_flags: + if isinstance(extra_flags, str): + extra_flags = [extra_flags] + cmd.extend(extra_flags) + cmd.extend(packages) + return self._run(cmd) + + def remove(self, packages, **kw): + if isinstance(packages, str): + packages = [packages] + + extra_flags = kw.pop('extra_remove_flags', None) + cmd = self.executable + [ + '-R' + ] + if extra_flags: + if isinstance(extra_flags, str): + extra_flags = [extra_flags] + cmd.extend(extra_flags) + + cmd.extend(packages) + return self._run(cmd) + + def clean(self): + cmd = self.executable + ['-Syy'] + return self._run(cmd) + + def add_repo_gpg_key(self, url): + cmd = ['pacman-key', '-a', url] + self._run(cmd) + + +class AptRpm(PackageManager): + """ + Apt-Rpm package management + """ + + executable = [ + 'apt-get', + '-y', + '-q', + '-V', + ] + name = 'apt' + + def install(self, packages, **kw): + if isinstance(packages, str): + packages = [packages] + + extra_flags = kw.pop('extra_install_flags', None) + cmd = self.executable + ['install'] + + if extra_flags: + if isinstance(extra_flags, str): + extra_flags = [extra_flags] + cmd.extend(extra_flags) + cmd.extend(packages) + return self._run(cmd) + + def remove(self, packages, **kw): + if isinstance(packages, str): + packages = [packages] + + + extra_flags = kw.pop('extra_remove_flags', None) + cmd = self.executable + [ + '-y', + 'remove' + ] + if extra_flags: + if isinstance(extra_flags, str): + extra_flags = [extra_flags] + cmd.extend(extra_flags) + + cmd.extend(packages) + return self._run(cmd) + + def clean(self): + cmd = self.executable + ['update'] + return self._run(cmd) diff --git a/ceph_deploy/util/ssh.py b/ceph_deploy/util/ssh.py new file mode 100644 index 0000000..5576371 --- /dev/null +++ b/ceph_deploy/util/ssh.py @@ -0,0 +1,32 @@ +import logging +from ceph_deploy.lib import remoto +from ceph_deploy.connection import get_local_connection + + +def can_connect_passwordless(hostname): + """ + Ensure that current host can SSH remotely to the remote + host using the ``BatchMode`` option to prevent a password prompt. + + That attempt will error with an exit status of 255 and a ``Permission + denied`` message or a``Host key verification failed`` message. + """ + # Ensure we are not doing this for local hosts + if not remoto.backends.needs_ssh(hostname): + return True + + logger = logging.getLogger(hostname) + with get_local_connection(logger) as conn: + # Check to see if we can login, disabling password prompts + command = ['ssh', '-CT', '-o', 'BatchMode=yes', hostname, 'true'] + out, err, retval = remoto.process.check(conn, command, stop_on_error=False) + permission_denied_error = 'Permission denied ' + host_key_verify_error = 'Host key verification failed.' + has_key_error = False + for line in err: + if permission_denied_error in line or host_key_verify_error in line: + has_key_error = True + + if retval == 255 and has_key_error: + return False + return True diff --git a/ceph_deploy/util/system.py b/ceph_deploy/util/system.py new file mode 100644 index 0000000..cff1305 --- /dev/null +++ b/ceph_deploy/util/system.py @@ -0,0 +1,180 @@ +from ceph_deploy.exc import ExecutableNotFound +from ceph_deploy.lib import remoto + + +def executable_path(conn, executable): + """ + Remote validator that accepts a connection object to ensure that a certain + executable is available returning its full path if so. + + Otherwise an exception with thorough details will be raised, informing the + user that the executable was not found. + """ + executable_path = conn.remote_module.which(executable) + if not executable_path: + raise ExecutableNotFound(executable, conn.hostname) + return executable_path + + +def is_systemd(conn): + """ + Attempt to detect if a remote system is a systemd one or not + by looking into ``/proc`` just like the ceph init script does:: + + # detect systemd + # SYSTEMD=0 + grep -qs systemd /proc/1/comm && SYSTEMD=1 + """ + return conn.remote_module.grep( + 'systemd', + '/proc/1/comm' + ) + + +def is_upstart(conn): + """ + This helper should only used as a fallback (last resort) as it is not + guaranteed that it will be absolutely correct. + """ + # it may be possible that we may be systemd and the caller never checked + # before so lets do that + if is_systemd(conn): + return False + + # get the initctl executable, if it doesn't exist we can't proceed so we + # are probably not upstart + initctl = conn.remote_module.which('initctl') + if not initctl: + return False + + # finally, try and get output from initctl that might hint this is an upstart + # system. On a Ubuntu 14.04.2 system this would look like: + # $ initctl version + # init (upstart 1.12.1) + stdout, stderr, _ = remoto.process.check( + conn, + [initctl, 'version'], + ) + result_string = b' '.join(stdout) + if b'upstart' in result_string: + return True + return False + + +def enable_service(conn, service='ceph'): + """ + Enable a service on a remote host depending on the type of init system. + Obviously, this should be done for RHEL/Fedora/CentOS systems. + + This function does not do any kind of detection. + """ + if is_systemd(conn): + remoto.process.run( + conn, + [ + 'systemctl', + 'enable', + '{service}'.format(service=service), + ] + ) + else: + remoto.process.run( + conn, + [ + 'chkconfig', + '{service}'.format(service=service), + 'on', + ] + ) + + +def disable_service(conn, service='ceph'): + """ + Disable a service on a remote host depending on the type of init system. + Obviously, this should be done for RHEL/Fedora/CentOS systems. + + This function does not do any kind of detection. + """ + if is_systemd(conn): + # Without the check, an error is raised trying to disable an + # already disabled service + if is_systemd_service_enabled(conn, service): + remoto.process.run( + conn, + [ + 'systemctl', + 'disable', + '{service}'.format(service=service), + ] + ) + + +def stop_service(conn, service='ceph'): + """ + Stop a service on a remote host depending on the type of init system. + Obviously, this should be done for RHEL/Fedora/CentOS systems. + + This function does not do any kind of detection. + """ + if is_systemd(conn): + # Without the check, an error is raised trying to stop an + # already stopped service + if is_systemd_service_active(conn, service): + remoto.process.run( + conn, + [ + 'systemctl', + 'stop', + '{service}'.format(service=service), + ] + ) + + +def start_service(conn, service='ceph'): + """ + Stop a service on a remote host depending on the type of init system. + Obviously, this should be done for RHEL/Fedora/CentOS systems. + + This function does not do any kind of detection. + """ + if is_systemd(conn): + remoto.process.run( + conn, + [ + 'systemctl', + 'start', + '{service}'.format(service=service), + ] + ) + + +def is_systemd_service_active(conn, service='ceph'): + """ + Detects if a systemd service is active or not. + """ + _, _, returncode = remoto.process.check( + conn, + [ + 'systemctl', + 'is-active', + '--quiet', + '{service}'.format(service=service), + ] + ) + return returncode == 0 + + +def is_systemd_service_enabled(conn, service='ceph'): + """ + Detects if a systemd service is enabled or not. + """ + _, _, returncode = remoto.process.check( + conn, + [ + 'systemctl', + 'is-enabled', + '--quiet', + '{service}'.format(service=service), + ] + ) + return returncode == 0 diff --git a/ceph_deploy/util/templates.py b/ceph_deploy/util/templates.py new file mode 100644 index 0000000..b54f7ac --- /dev/null +++ b/ceph_deploy/util/templates.py @@ -0,0 +1,94 @@ + + +ceph_repo = """[ceph] +name=Ceph packages for $basearch +baseurl={repo_url}/$basearch +enabled=1 +gpgcheck={gpgcheck} +priority=1 +type=rpm-md +gpgkey={gpg_url} + +[ceph-noarch] +name=Ceph noarch packages +baseurl={repo_url}/noarch +enabled=1 +gpgcheck={gpgcheck} +priority=1 +type=rpm-md +gpgkey={gpg_url} + +[ceph-source] +name=Ceph source packages +baseurl={repo_url}/SRPMS +enabled=0 +gpgcheck={gpgcheck} +type=rpm-md +gpgkey={gpg_url} +""" + +zypper_repo = """[ceph] +name=Ceph packages +type=rpm-md +baseurl={repo_url} +gpgcheck={gpgcheck} +gpgkey={gpg_url} +enabled=1 +""" + + +def custom_repo(**kw): + """ + Repo files need special care in that a whole line should not be present + if there is no value for it. Because we were using `format()` we could + not conditionally add a line for a repo file. So the end result would + contain a key with a missing value (say if we were passing `None`). + + For example, it could look like:: + + [ceph repo] + name= ceph repo + proxy= + gpgcheck= + + Which breaks. This function allows us to conditionally add lines, + preserving an order and be more careful. + + Previously, and for historical purposes, this is how the template used + to look:: + + custom_repo = + [{repo_name}] + name={name} + baseurl={baseurl} + enabled={enabled} + gpgcheck={gpgcheck} + type={_type} + gpgkey={gpgkey} + proxy={proxy} + + """ + lines = [] + + # by using tuples (vs a dict) we preserve the order of what we want to + # return, like starting with a [repo name] + tmpl = ( + ('reponame', '[%s]'), + ('name', 'name=%s'), + ('baseurl', 'baseurl=%s'), + ('enabled', 'enabled=%s'), + ('gpgcheck', 'gpgcheck=%s'), + ('_type', 'type=%s'), + ('gpgkey', 'gpgkey=%s'), + ('proxy', 'proxy=%s'), + ('priority', 'priority=%s'), + ) + + for line in tmpl: + tmpl_key, tmpl_value = line # key values from tmpl + + # ensure that there is an actual value (not None nor empty string) + if tmpl_key in kw and kw.get(tmpl_key) not in (None, ''): + lines.append(tmpl_value % kw.get(tmpl_key)) + + return '\n'.join(lines) diff --git a/ceph_deploy/util/versions.py b/ceph_deploy/util/versions.py new file mode 100644 index 0000000..810eb38 --- /dev/null +++ b/ceph_deploy/util/versions.py @@ -0,0 +1,47 @@ + + +class NormalizedVersion(object): + """ + A class to provide a clean interface for setting/retrieving distinct + version parts divided into major, minor, and patch (following convnetions + from semver (see http://semver.org/) + + Since a lot of times version parts need to be compared, it provides for + `int` representations of their string counterparts, with some sanitization + processing. + + Defaults to '0' or 0 (int) values when values are not set or parsing fails. + """ + + def __init__(self, raw_version): + self.raw_version = raw_version.strip() + self.major = '0' + self.minor = '0' + self.patch = '0' + self.garbage = '' + self.int_major = 0 + self.int_minor = 0 + self.int_patch = 0 + self._version_map = {} + self._set_versions() + + def _set_int_versions(self): + version_map = dict( + major=self.major, + minor=self.minor, + patch=self.patch, + garbage=self.garbage) + + # safe int versions that remove non-numerical chars + # for example 'rc1' in a version like '1-rc1 + for name, value in version_map.items(): + if '-' in value: # get rid of garbage like -dev1 or -rc1 + value = value.split('-')[0] + value = float(''.join(c for c in value if c.isdigit()) or 0) + int_name = "int_%s" % name + setattr(self, int_name, value) + + def _set_versions(self): + split_version = (self.raw_version.split('.') + ["0"]*4)[:4] + self.major, self.minor, self.patch, self.garbage = split_version + self._set_int_versions() diff --git a/ceph_deploy/validate.py b/ceph_deploy/validate.py new file mode 100644 index 0000000..8ef5e73 --- /dev/null +++ b/ceph_deploy/validate.py @@ -0,0 +1,16 @@ +import argparse +import re + + +ALPHANUMERIC_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*$') + + +def alphanumeric(s): + """ + Enforces string to be alphanumeric with leading alpha. + """ + if not ALPHANUMERIC_RE.match(s): + raise argparse.ArgumentTypeError( + 'argument must start with a letter and contain only letters and numbers', + ) + return s diff --git a/debian/ceph-deploy.install b/debian/ceph-deploy.install new file mode 100644 index 0000000..cec4ab6 --- /dev/null +++ b/debian/ceph-deploy.install @@ -0,0 +1 @@ +./scripts/ceph-deploy /usr/bin diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 0000000..e63c544 --- /dev/null +++ b/debian/changelog @@ -0,0 +1,371 @@ +ceph-deploy (2.0.1) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Tue, 19 Jun 2018 17:54:36 +0000 + +ceph-deploy (2.0.0) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Wed, 17 Jan 2018 13:17:46 +0000 + +ceph-deploy (1.5.39) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Fri, 01 Sep 2017 11:45:54 +0000 + +ceph-deploy (1.5.38) stable; urgency=medium + + * New upstream release + + -- Ceph Release Team Thu, 25 May 2017 12:35:46 +0000 + +ceph-deploy (1.5.37) stable; urgency=medium + + * New upstream release + + -- Alfredo Deza Tue, 03 Jan 2017 21:19:14 +0000 + +ceph-deploy (1.5.36) stable; urgency=medium + + * New upstream release + + -- Alfredo Deza Tue, 30 Aug 2016 11:47:41 +0000 + +ceph-deploy (1.5.35) stable; urgency=medium + + * New upstream release + + -- Alfredo Deza Mon, 15 Aug 2016 13:15:02 +0000 + +ceph-deploy (1.5.34) stable; urgency=medium + + * New upstream release + + -- Alfredo Deza Tue, 07 Jun 2016 17:06:26 +0000 + +ceph-deploy (1.5.33) stable; urgency=medium + + * New upstream release + + -- Alfredo Deza Fri, 22 Apr 2016 12:36:09 +0000 + +ceph-deploy (1.5.32) stable; urgency=medium + + * New upstream release + + -- Alfredo Deza Wed, 13 Apr 2016 14:21:57 +0000 + +ceph-deploy (1.5.31) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Mon, 04 Jan 2016 18:46:26 +0000 + +ceph-deploy (1.5.30) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Fri, 11 Dec 2015 21:09:05 +0000 + +ceph-deploy (1.5.29) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Wed, 02 Dec 2015 18:21:15 +0000 + +ceph-deploy (1.5.28) stable; urgency=low + + * New upstream release + + -- Travis Rhoden Wed, 26 Aug 2015 11:25:15 -0700 + +ceph-deploy (1.5.27) stable; urgency=low + + * New upstream release + + -- Travis Rhoden Wed, 05 Aug 2015 15:51:53 -0700 + +ceph-deploy (1.5.26) stable; urgency=low + + * New upstream release + + -- Travis Rhoden Mon, 20 Jul 2015 11:09:38 -0700 + +ceph-deploy (1.5.25) stable; urgency=low + + * New upstream release + + -- Travis Rhoden Tue, 26 May 2015 10:38:53 -0700 + +ceph-deploy (1.5.24) stable; urgency=low + + * New upstream release + + -- Travis Rhoden Mon, 18 May 2015 13:35:00 -0700 + +ceph-deploy (1.5.23) stable; urgency=low + + * New upstream release + + -- Travis Rhoden Tue, 07 Apr 2015 17:06:35 -0700 + +ceph-deploy (1.5.22) stable; urgency=low + + * New upstream release + + -- Travis Rhoden Mon, 09 Mar 2015 08:14:20 -0700 + +ceph-deploy (1.5.21) stable; urgency=low + + * New upstream release + + -- Travis Rhoden Wed, 10 Dec 2014 07:05:42 -0800 + +ceph-deploy (1.5.20) stable; urgency=low + + * New upstream release + + -- Travis Rhoden Thu, 13 Nov 2014 08:08:46 -0800 + +ceph-deploy (1.5.19) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Wed, 29 Oct 2014 07:19:41 -0700 + +ceph-deploy (1.5.18) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Thu, 09 Oct 2014 10:37:06 -0700 + +ceph-deploy (1.5.18) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Thu, 09 Oct 2014 09:38:44 -0700 + +ceph-deploy (1.5.17) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Mon, 06 Oct 2014 09:15:34 -0700 + +ceph-deploy (1.5.16) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Tue, 30 Sep 2014 07:25:13 -0700 + +ceph-deploy (1.5.15) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Fri, 12 Sep 2014 12:30:50 -0700 + +ceph-deploy (1.5.14) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Tue, 09 Sep 2014 13:51:23 -0700 + +ceph-deploy (1.5.13) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Wed, 03 Sep 2014 05:38:57 -0700 + +ceph-deploy (1.5.12) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Mon, 25 Aug 2014 13:04:17 -0700 + +ceph-deploy (1.5.12) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Mon, 25 Aug 2014 12:40:48 -0700 + +ceph-deploy (1.5.11) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Wed, 13 Aug 2014 05:29:28 -0700 + +ceph-deploy (1.5.10) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Thu, 31 Jul 2014 10:45:11 -0700 + +ceph-deploy (1.5.9) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Mon, 14 Jul 2014 10:12:18 -0700 + +ceph-deploy (1.5.8) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Wed, 09 Jul 2014 15:51:46 +0000 + +ceph-deploy (1.5.7) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Tue, 01 Jul 2014 20:54:52 +0000 + +ceph-deploy (1.5.6) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Tue, 01 Jul 2014 15:22:02 +0000 + +ceph-deploy (1.5.5) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Tue, 10 Jun 2014 14:12:23 +0000 + +ceph-deploy (1.5.4) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Fri, 06 Jun 2014 15:45:10 +0000 + +ceph-deploy (1.5.3) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Fri, 30 May 2014 12:56:14 +0000 + +ceph-deploy (1.5.2) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Wed, 07 May 2014 18:09:23 +0000 + +ceph-deploy (1.5.1) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Thu, 01 May 2014 16:09:56 +0000 + +ceph-deploy (1.5.0) stable; urgency=low + + * New upstream release + + -- Alfredo Deza Fri, 25 Apr 2014 20:15:18 +0000 + +ceph-deploy (1.4.0-1) UNRELEASED; urgency=low + + * New upstream release + + -- Alfredo Deza Wed, 19 Mar 2014 14:32:28 +0000 + +ceph-deploy (1.3.5-1) stable; urgency=low + + * New upstream release + + -- Ken Dreyer Wed, 05 Feb 2014 19:56:18 +0000 + +ceph-deploy (1.3.4-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Thu, 02 Jan 2014 17:01:21 -0800 + +ceph-deploy (1.3.3-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Tue, 26 Nov 2013 19:21:04 +0000 + +ceph-deploy (1.3.2-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Wed, 13 Nov 2013 00:22:12 +0000 + +ceph-deploy (1.3.1-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Wed, 06 Nov 2013 20:02:54 +0000 + +ceph-deploy (1.3-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Fri, 01 Nov 2013 05:28:02 +0000 + +ceph-deploy (1.2.7) stable; urgency=low + + * New upstream release + + -- Gary Lowell Mon, 07 Oct 2013 18:33:45 +0000 + +ceph-deploy (1.2.6-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Wed, 18 Sep 2013 09:26:57 -0700 + +ceph-deploy (1.2.5-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Tue, 17 Sep 2013 19:25:43 -0700 + +ceph-deploy (1.2.4-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Tue, 17 Sep 2013 11:19:59 -0700 + +ceph-deploy (1.2.3) precise; urgency=low + + * New upstream release + + -- Gary Lowell Thu, 29 Aug 2013 15:20:22 -0700 + +ceph-deploy (1.2.2) precise; urgency=low + + * New upstream release + + -- Gary Lowell Thu, 22 Aug 2013 12:26:56 -0700 + +ceph-deploy (1.2.1-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Thu, 15 Aug 2013 15:19:33 -0700 + +ceph-deploy (1.2-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Mon, 12 Aug 2013 16:59:09 -0700 + +ceph-deploy (1.1-1) precise; urgency=low + + * New upstream release + + -- Gary Lowell Tue, 18 Jun 2013 11:07:00 -0700 + +ceph-deploy (1.0-1) stable; urgency=low + + * New upstream release + + -- Gary Lowell Fri, 24 May 2013 11:57:40 +0800 + +ceph-deploy (0.0.1-1) unstable; urgency=low + + * Initial release. + + -- Gary Lowell Mon, 10 Mar 2013 18:38:40 +0800 diff --git a/debian/compat b/debian/compat new file mode 100644 index 0000000..7f8f011 --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +7 diff --git a/debian/control b/debian/control new file mode 100644 index 0000000..ee3c694 --- /dev/null +++ b/debian/control @@ -0,0 +1,26 @@ +Source: ceph-deploy +Maintainer: Sage Weil +Uploaders: Sage Weil +Section: admin +Priority: optional +Build-Depends: debhelper (>= 7), python-setuptools, git +X-Python-Version: >= 2.6 +Standards-Version: 3.9.2 +Homepage: http://ceph.com/ +Vcs-Git: git://github.com/ceph/ceph-deploy.git +Vcs-Browser: https://github.com/ceph/ceph-deploy + +Package: ceph-deploy +Architecture: all +Depends: python, + python-argparse, + python-setuptools, + python-remoto, + ${misc:Depends}, + ${python:Depends} +Description: Ceph-deploy is an easy to use configuration tool + for the Ceph distributed storage system. + . + This package includes the programs and libraries to support + simple ceph cluster deployment. + diff --git a/debian/copyright b/debian/copyright new file mode 100644 index 0000000..93bc530 --- /dev/null +++ b/debian/copyright @@ -0,0 +1,3 @@ +Files: * +Copyright: (c) 2004-2012 by Sage Weil +License: LGPL2.1 (see /usr/share/common-licenses/LGPL-2.1) diff --git a/debian/rules b/debian/rules new file mode 100755 index 0000000..3b877fa --- /dev/null +++ b/debian/rules @@ -0,0 +1,12 @@ +#!/usr/bin/make -f + +# Uncomment this to turn on verbose mode. +export DH_VERBOSE=1 +export DEB_PYTHON_INSTALL_ARGS_ALL += --install-lib=/usr/share/ceph-deploy + +%: + dh $@ --buildsystem python_distutils --with python2 + +override_dh_clean: + rm -rf ceph_deploy/lib/remoto + dh_clean diff --git a/debian/source/format b/debian/source/format new file mode 100644 index 0000000..d3827e7 --- /dev/null +++ b/debian/source/format @@ -0,0 +1 @@ +1.0 diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..f8e0867 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,177 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ceph-deploy.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ceph-deploy.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/ceph-deploy" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ceph-deploy" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/docs/source/_static/.empty b/docs/source/_static/.empty new file mode 100644 index 0000000..e69de29 diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Book.eot b/docs/source/_themes/ceph/static/font/ApexSans-Book.eot new file mode 100644 index 0000000..332c8cb Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Book.eot differ diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Book.svg b/docs/source/_themes/ceph/static/font/ApexSans-Book.svg new file mode 100644 index 0000000..8af9af2 --- /dev/null +++ b/docs/source/_themes/ceph/static/font/ApexSans-Book.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Book.ttf b/docs/source/_themes/ceph/static/font/ApexSans-Book.ttf new file mode 100644 index 0000000..42a0084 Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Book.ttf differ diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Book.woff b/docs/source/_themes/ceph/static/font/ApexSans-Book.woff new file mode 100644 index 0000000..681a70e Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Book.woff differ diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Medium.eot b/docs/source/_themes/ceph/static/font/ApexSans-Medium.eot new file mode 100644 index 0000000..e06fd21 Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Medium.eot differ diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Medium.svg b/docs/source/_themes/ceph/static/font/ApexSans-Medium.svg new file mode 100644 index 0000000..6c624ec --- /dev/null +++ b/docs/source/_themes/ceph/static/font/ApexSans-Medium.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Medium.ttf b/docs/source/_themes/ceph/static/font/ApexSans-Medium.ttf new file mode 100644 index 0000000..44c281e Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Medium.ttf differ diff --git a/docs/source/_themes/ceph/static/font/ApexSans-Medium.woff b/docs/source/_themes/ceph/static/font/ApexSans-Medium.woff new file mode 100644 index 0000000..b7c8819 Binary files /dev/null and b/docs/source/_themes/ceph/static/font/ApexSans-Medium.woff differ diff --git a/docs/source/_themes/ceph/static/nature.css_t b/docs/source/_themes/ceph/static/nature.css_t new file mode 100644 index 0000000..394a633 --- /dev/null +++ b/docs/source/_themes/ceph/static/nature.css_t @@ -0,0 +1,325 @@ +/* + * nature.css_t + * ~~~~~~~~~~~~ + * + * Sphinx stylesheet -- nature theme. + * + * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +@font-face { + font-family: 'ApexSansMedium'; + src: url('font/ApexSans-Medium.eot'); + src: url('font/ApexSans-Medium.eot?#iefix') format('embedded-opentype'), + url('font/ApexSans-Medium.woff') format('woff'), + url('font/ApexSans-Medium.ttf') format('truetype'), + url('font/ApexSans-Medium.svg#FontAwesome') format('svg'); + font-weight: normal; + font-style: normal; +} + +@font-face { + font-family: 'ApexSansBook'; + src: url('font/ApexSans-Book.eot'); + src: url('font/ApexSans-Book.eot?#iefix') format('embedded-opentype'), + url('font/ApexSans-Book.woff') format('woff'), + url('font/ApexSans-Book.ttf') format('truetype'), + url('font/ApexSans-Book.svg#FontAwesome') format('svg'); + font-weight: normal; + font-style: normal; +} + +body { + font: 14px/1.4 Helvetica, Arial, sans-serif; + background-color: #E6E8E8; + color: #37424A; + margin: 0; + padding: 0; + border-top: 5px solid #F05C56; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 330px; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.document { + background-color: #ffffff; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 0 30px 30px 30px; +} + +div.footer { + color: #222B31; + width: 100%; + padding: 13px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #444; + text-decoration: underline; +} + +div.related { + background-color: #80D2DC; + line-height: 32px; + color: #37424A; + // text-shadow: 0px 1px 0 #444; + font-size: 100%; + border-top: #9C4850 5px solid; +} + +div.related a { + color: #37424A; + text-decoration: none; +} + +div.related a:hover { + color: #fff; + // text-decoration: underline; +} + +div.sphinxsidebar { + // font-size: 100%; + line-height: 1.5em; + width: 330px; +} + +div.sphinxsidebarwrapper{ + padding: 20px 0; + background-color: #efefef; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: ApexSansMedium; + color: #e6e8e8; + font-size: 1.2em; + font-weight: normal; + margin: 0; + padding: 5px 10px; + background-color: #5e6a71; + // text-shadow: 1px 1px 0 white; + text-transform: uppercase; +} + +div.sphinxsidebar h4{ + font-size: 1.1em; +} + +div.sphinxsidebar h3 a { + color: #e6e8e8; +} + + +div.sphinxsidebar p { + color: #888; + padding: 5px 20px; +} + +div.sphinxsidebar p.topless { +} + +div.sphinxsidebar ul { + margin: 10px 5px 10px 20px; + padding: 0; + color: #000; +} + +div.sphinxsidebar a { + color: #444; +} + +div.sphinxsidebar input { + border: 1px solid #ccc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar input[type=text]{ + margin-left: 20px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #F05C56; + text-decoration: none; +} + +a:hover { + color: #F05C56; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + // font-family: ApexSansMedium; + // background-color: #80D2DC; + // font-weight: normal; + // color: #37424a; + margin: 30px 0px 10px 0px; + padding: 5px 0 5px 0px; + // text-shadow: 0px 1px 0 white; + text-transform: uppercase; +} + +div.body h1 { font: 20px/2.0 ApexSansBook; color: #37424A; border-top: 20px solid white; margin-top: 0; } +div.body h2 { font: 18px/1.8 ApexSansMedium; background-color: #5E6A71; color: #E6E8E8; padding: 5px 10px; } +div.body h3 { font: 16px/1.6 ApexSansMedium; color: #37424A; } +div.body h4 { font: 14px/1.4 Helvetica, Arial, sans-serif; color: #37424A; } +div.body h5 { font: 12px/1.2 Helvetica, Arial, sans-serif; color: #37424A; } +div.body h6 { font-size: 100%; color: #37424A; } + +// div.body h2 { font-size: 150%; background-color: #E6E8E8; color: #37424A; } +// div.body h3 { font-size: 120%; background-color: #E6E8E8; color: #37424A; } +// div.body h4 { font-size: 110%; background-color: #E6E8E8; color: #37424A; } +// div.body h5 { font-size: 100%; background-color: #E6E8E8; color: #37424A; } +// div.body h6 { font-size: 100%; background-color: #E6E8E8; color: #37424A; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + line-height: 1.5em; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.highlight{ + background-color: white; +} + +div.note { + background-color: #e6e8e8; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #efefef; +} + +div.warning { + background-color: #F05C56; + border: 1px solid #9C4850; + color: #fff; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 10px; + background-color: White; + color: #222; + line-height: 1.2em; + border: 1px solid #5e6a71; + font-size: 1.1em; + margin: 1.5em; + -webkit-box-shadow: 1px 1px 1px #e6e8e8; + -moz-box-shadow: 1px 1px 1px #e6e8e8; +} + +tt { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ + font-size: 15px; + font-family: monospace; +} + +.viewcode-back { + font-family: Arial, sans-serif; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; +} + +table.docutils { + margin: 1.5em; +} + +div.sidebar { + border: 1px solid #5E6A71; + background-color: #E6E8E8; +} + +div.admonition.tip { + background-color: #80D2DC; + border: 1px solid #55AEBA; +} + +div.admonition.important { + background-color: #F05C56; + border: 1px solid #9C4850; + color: #fff; +} + +div.tip tt.literal { + background-color: #55aeba; + color: #fff; +} + +div.important tt.literal { + background-color: #9C4850; + color: #fff; +} + +h2 .literal { + color: #fff; + background-color: #37424a; +} + +dl.glossary dt { + font-size: 1.0em; + padding-top:20px; + +} \ No newline at end of file diff --git a/docs/source/_themes/ceph/theme.conf b/docs/source/_themes/ceph/theme.conf new file mode 100644 index 0000000..1cc4004 --- /dev/null +++ b/docs/source/_themes/ceph/theme.conf @@ -0,0 +1,4 @@ +[theme] +inherit = basic +stylesheet = nature.css +pygments_style = tango diff --git a/docs/source/admin.rst b/docs/source/admin.rst new file mode 100644 index 0000000..8ae7395 --- /dev/null +++ b/docs/source/admin.rst @@ -0,0 +1,26 @@ +.. _admin: + +admin +======= +The ``admin`` subcommand provides an interface to add to the cluster's admin +node. + +Example +------- +To make a node and admin node run:: + + ceph-deploy admin ADMIN [ADMIN..] + +This places the the cluster configuration and the admin keyring on the remote +nodes. + +Admin node definition +--------------------- + +The definition of an admin node is that both the cluster configuration file +and the admin keyring. Both of these files are stored in the directory +/etc/ceph and thier prefix is that of the cluster name. + +The default ceph cluster name is "ceph". So with a cluster with a default name +the admin keyring is named /etc/ceph/ceph.client.admin.keyring while cluster +configuration file is named /etc/ceph/ceph.conf. diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst new file mode 100644 index 0000000..13cd4e4 --- /dev/null +++ b/docs/source/changelog.rst @@ -0,0 +1,613 @@ +Changelog +========= + +2.0 +--- + +2.0.2 +^^^^^ +16-Jul-2018 + +* Bump the ``remoto`` requirement that fixes the ``expand_env`` bug + + +2.0.1 +^^^^^ +19-Jun-2018 + +* Add support for archlinux +* Support IPV6 addresses in monitors +* Add debug argument when calling disk zap +* Ensure remote executables are files (vs. possible dirs) +* Run ``apt-get update`` before installs +* Default to mimic release +* Use INFO log levels for disk list +* Fix ``UnboundLocalError`` when createing mds/mgr with bad hosts +* Improve distro detection for Arch Linux +* Add epilog text + + +2.0.0 +^^^^^ +16-Jan-2018 + +* Backward incompatible API changes for OSD creation - will use ceph-volume and + no longer consume ceph-disk. +* Remove python-distribute dependency +* Use /etc/os-release as a fallback when ``linux_distribution()`` doesn't work +* Drop dmcrypt support (unsupported by ceph-volume for now) +* Allow debug modes for ceph-volume + + +1.5 +--- + +1.5.39 +^^^^^^ +1-Sep-2017 + +* Remove ``--cluster`` options, default to ``ceph`` always +* Add ``--filestore`` since ``ceph-disk`` defaults to bluestore +* Start testing against Python 3.5 +* Support Debian 9 and 10 intalls +* Better handling on package conflicts when upgrading/re-installing + + +1.5.38 +^^^^^^ +19-May-2017 + +* Allow unsigned deb packages from mirrors +* Detect systemd before sysvinit in centos +* Fix UnboundLocalError when installing in debian with custom repo flags +* gatherkeys to give mgr "allow * " permissions +* specify block.db and block.wal for bluestore +* be able to install ceph-mgr +* bootstrap mgr keys +* cleanup mds key creation +* Virtuozzo Linux support +* update osd and mds caps + + +1.5.37 +^^^^^^ +03-Jan-2017 + +* Use the ``--cluster`` flag on monitor commands (defaulting to 'ceph' if + unspecfied) +* After adding a monitor, ensure it is started regardless of init system +* Allow Oracle Linux Server to be deployed to. +* Fix issue when calling gatherkeys where a log argument was missing +* Use the new development services for installation (from chacra.ceph.com and + shaman.ceph.com URLs) +* Try to decode bytes only on Python 3 when writing files on remote hosts + + +1.5.36 +^^^^^^ +29-Aug-2016 + +* Prefer to use ``load_raw`` to avoid mangling ceph.conf content. +* Improve systemd/sysvinit detection for both CentOS and RHEL +* Gatherkeys should try to get an existing key without caps, in case they don't + match + +1.5.35 +^^^^^^ +15-Aug-2016 + +* Add compatibility for bytes/strings with Python 3 +* Fix errors in argparse default behavior (error messages, incomplete commands) +* Add Python 3.4 to tox +* Python 3 changes to workaround configparser issues +* Use the configured username when using rsync to a remote host (local repo + support) +* Install Python 3 with the bootstrap sciprt +* Bump remoto requirement to 0.0.29 +* Include admin.rst and gatherkeys.rst in the TOC index +* Handle Ceph package split in Ubuntu +* Add a ``--nogpgcheck`` option to disable checks on local repos +* Improve sysvinit/systemd checks by not including 'ceph' in the path +* Install Diamond when calling ``ceph-deploy calamari connect`` +* Zypper fixes for purging: allows removal of multiple packages + + +07-Jun-2016 +1.5.34 +^^^^^^ +07-Jun-2016 + +* Do not call partx/partprobe when zapping disks +* No longer allow using ext4 +* Default to systemd for SUSE +* Remove usage of rcceph (for SUSE) +* No longer depend on automatic ``ceph-create-keys``, use the monitors to fetch + keys. +* Use ``0.0.28`` from remoto + +1.5.33 +^^^^^^ +22-Apr-2016 + +* Default to Jewel for releases + +1.5.32 +^^^^^^ +13-Apr-2016 + +* Improve systemd detection for Ubuntu releases. +* Rename ceph-deploy log to include the cluster name +* Bluestore support +* Disable timeouts for pkg install/remove operations (they can take a long + time) +* Remove deprecated ceph.conf configuration "filestore xattr use omap = true" + +1.5.31 +^^^^^^ +04-Jan-2016 + +* Use the new remoto version (0.0.27) that fixes an error when dealing with + remote output. + +1.5.30 +^^^^^^ +11-Dec-2015 + +* Default to the "infernalis" release. +* Fix an issue when trying to destroy/stop monitors on systemd servers + +1.5.29 +^^^^^^ +2-Dec-2015 + +* Add support for ``--dev-commit `` +* Add ``--test`` option for installing ceph-test package +* Enable Ceph on ``osd create`` +* Remove bootstrap-rgw key when forgetkeys is used +* Prefer systemd over upstart in newer Ubuntu +* Use download.ceph.com directly +* Use better examples in default cephdeploy.conf file +* Cleanup functions for uninstall and purge (simplifying code) +* Use https for download.cep.com +* Fix gitbuilder hosts to avoid using https +* Do not udevadm trigger because ceph-disk does it already +* Download gpg keys from download.ceph.com +* Specify a PID location for monitors +* Fix invalid path for release keys in test +* Add timestamp to log output + +1.5.28 +^^^^^^ +26-Aug-2015 + +* Fix issue when importing GPG keys on Centos 6 introduced in 1.5.27. +* Support systemd and sysvinit on RHEL, Fedora, and CentOS, when systemd + is present in the Ceph packages. +* Simplify steps taken when adding a monitor with ``ceph-deploy mon add``. + Eliminates a 5-minute hang when moving from 1 monitor to 2. +* Make sure that Ceph is installed on a remote node before trying to enable + a Ceph daemon. + +1.5.27 +^^^^^^ +05-Aug-2015 + +* New ``repo`` top-level command for adding and removing repos. +* Ability to install subset of ceph packages based on CLI switches like + ``--cli``, ``--rgw``, etc. +* Initial support for systemd. Ceph on Fedora 22 only. +* Fixed an issue that prevented package upgrades when using DNF. +* No longer installs yum-priorities-plugin when using DNF. + +1.5.26 +^^^^^^ +20-Jul-2015 + +* Make parsing of boolean values in config file overrides work. +* Output value of all ceph-deploy options upon invocation. +* Point to git.ceph.com for GPG keys. +* Make GPG key fetching work on Debian Wheezy. +* Allow ceph-deploy to work on Mint distro. +* Improved help menu output during subcommand context. +* Point to SUSE downstream packages by default on SUSE distros since + ceph.com does not host packages for SUSE anymore.. +* Some initial groundwork for installing Ceph daemons that will no longer + run as root user. +* Add support for DNF package manager (Fedora >= 22 only). +* Echo RGW default port number after ``ceph-deploy rgw create``. + +1.5.25 +^^^^^^ +26-May-2015 + +* **CVE-2015-4053**: Make sure that the admin keyring is mode 0600 after being + pushed with the ``ceph-deploy admin`` command. +* Improved SUSE install and purge. +* Make sure that package name 'ceph-radosgw' is used everywhere for RPM systems + instead of 'radosgw'. + +1.5.24 +^^^^^^ +18-May-2015 + +* Use version 0.0.25 of ``remoto`` that fixes an issue where output would be cut + (https://github.com/alfredodeza/remoto/issues/15). +* Automatically prefix custom RGW daemon names with 'rgw.' +* Log an error message when deploying MDS in RHEL distros fails as it may not + be supported. +* More robust vendor.py script (tries ceph.com and GitHub) +* Create /var/lib/ceph/radosgw directory on remote host if not present +* Enable/start ceph-radosgw service on RPM systems instead of radosgw +* Add flags to support install of specific daemons (OSD, MON, RGW, MDS) only + Note that the packaging changes for this in upstream Ceph are still pending +* removing installation of 'calamari-minions' repo upon + 'ceph-deploy calamari connect' +* enable ceph-mds service correctly on systemd +* Check for sysvinit and custom cluster name on 'ceph-deploy new' command + +1.5.23 +^^^^^^ +07-Apr-2015 + +* Default to Hammer on install. +* Add ``rgw`` command to easily create rgw instances. +* Automatically install the radosgw package. +* Remove unimplemented subcommands from CLI and help. +* **CVE-2015-3010**: Fix an issue where keyring permissions were + world readable (thanks Owen Synge). +* Fix an issue preventing all but the first host given to + ``install --repo`` from being used. + +1.5.22 +^^^^^^ +09-Mar-2015 + +* Enable ``check_obsoletes`` in Yum priorities plugin when deploying + upstream Ceph on RPM-based distros. +* Require ``--release`` flag to install upstream Ceph on RHEL. +* Uninstall ``ceph-common`` on Fedora. + +1.5.21 +^^^^^^ +10-Dec-2014 + +* Fix distro detection for CentOS and Scientific Linux, which was + preventing installation of EPEL repo as a prerequisite. +* Default to Giant on install. +* Fix an issue where ``gatherkeys`` did not exit non-zero when + keys were not found. + +1.5.20 +^^^^^^ +13-Nov-2014 + +* log stderr and stdout in the same order as they happen remotely. + +1.5.19 +^^^^^^ +29-Oct-2014 + +* Create temporary ceph.conf files in ``/etc/ceph`` to avoid issues with + SELinux. + +1.5.18 +^^^^^^ +09-Oct-2014 + +* Fix issue for enabling the OSD service in el-like distros. +* Create a monitor keyring if it doesn't exist. + +1.5.17 +^^^^^^ +06-Oct-2014 + +* Do not ask twice for passwords when calling ``new``. +* Ensure priorities are installed and enforced for custom repositories. + +1.5.16 +^^^^^^ +30-Sep-2014 + +* Enable services on ``el`` distros when deploying Ceph daemons. +* Smarter detection of ``sudo`` need on remote nodes (prevents issues when + running ceph-deploy as ``root`` or with ``sudo``. +* Fix an issue where Debian Sid would break ceph-deploy failing Distro + detection. + +1.5.15 +^^^^^^ +12-Sep-2014 + +* If ``wget`` is installed don't try to install it regardless. + +1.5.14 +^^^^^^ +09-Sep-2014 + +* Do not override environment variables on remote hosts, preserve them and + extend the ``$PATH`` if not explicitly told not to. + +1.5.13 +^^^^^^ +03-Sep-2014 + +* Fix missing priority plugin in YUM for Fedora when installing +* Implement --public-network and --cluster-network with remote IP validation +* Fixed an issue where errors before the logger was setup would be silenced. + +1.5.12 +^^^^^^ +25-Aug-2014 + +* Better traceback reporting with logging. +* Close stderr/stdout when ceph-deploy completes operations (silences odd + tracebacks) +* Allow to re-use a ceph.conf file with ``--ceph-conf`` global flag +* Be able to concatenate and seed keyring files with ``--keyrings`` + +1.5.11 +^^^^^^ +25-Aug-2014 + +* Fix a problem where CentOS7 is not matched correctly against repos (Thanks + Tom Walsh) + +1.5.10 +^^^^^^ +31-Jul-2014 + +* Use ``ceph-disk`` with high verbosity +* Don't require ``ceph-common`` on EL distros +* Use ``ceph-disk zap`` instead of re-implementing it +* Use proper paths for ``zypper`` (Thanks Owen Synge) +* More robust ``init`` detection for Ubuntu (Thanks Joao Eduardo Luis) +* Allow to install repo files only +* Work with inconsistent repo sections for Emperor when setting priorities + +1.5.9 +^^^^^ +14-Jul-2014 + +* Allow to optionally set the ``fsid`` when calling ``new`` +* Correctly select sysvinit or systemd for Suse versions (Thanks Owen Synge) +* Use correct version of remoto (``0.0.19``) that holds the ``None`` global fix +* Fix new naming scheme for CentOS platforms that prevented CentOS 7 installs + +1.5.8 +^^^^^ +09-Jul-2014 + +* Create a flake8/pep8/linting job so that we prevent Undefined errors +* Add partprobe/partx calls when zapping disks +* Fix RHEL7 installation issues (url was using el6 incorrectly) (Thanks David Vossel) +* Warn when an executable is not found +* Fix an ``AttributeError`` in execnet (see https://github.com/alfredodeza/execnet/issues/1) + +1.5.7 +^^^^^ +01-Jul-2014 + +* Fix ``NameError`` on osd.py from an undefined variable +* Fix a calamari connect problem when installing on multiple hosts + +1.5.6 +^^^^^ +01-Jul-2014 + +* Optionally avoid vendoring libraries for upstream package maintainers. +* Fix RHEL7 installation issue that was pulling ``el6`` packages (Thanks David Vossel) + +1.5.5 +^^^^^ +10-Jun-2014 + +* Normalize repo file header calls. Fixes breakage on Calamari repos. + +1.5.4 +^^^^^ +10-Jun-2014 + +* Improve help by adding online doc link +* allow cephdeploy.conf to set priorities in repos +* install priorities plugin for yum distros +* set the right priority for ceph.repo and warn about this + +1.5.3 +^^^^^ +30-May-2014 + +* Another fix for IPV6: write correct ``mon_host`` in ceph.conf +* Support ``proxy`` settings for repo files in YUM +* Better error message when ceph.conf is not found +* Refuse to install custom cluster names on sysvinit systems (not supported) +* Remove quiet flags from package manager's install calls to avoid timing out +* Use the correct URL repo when installing for RHEL + +1.5.2 +^^^^^ +09-May-2014 + +* Remove ``--`` from the command to install packages. (Thanks Vincenzo Pii) +* Default to Firefly as the latest, stable Ceph version + +1.5.1 +^^^^^ +01-May-2014 + +* Fixes a broken ``osd`` command that had the wrong attribute in the conn + object + +1.5.0 +^^^^^ +28-Apr-2014 + +* Warn if ``requiretty`` is causing issues +* Support IPV6 host resolution (Thanks Frode Nordahl) +* Fix incorrect paths for local cephdeploy.conf +* Support subcommand overrides defined in cephdeploy.conf +* When installing on CentOS/RHEL call ``yum clean all`` +* Check OSD status when deploying to catch possible issues +* Add a ``--local-mirror`` flag for installation that syncs files +* Implement ``osd list`` to list remote osds +* Fix install issues on Suse (Thanks Owen Synge) + +1.4 +----- + +1.4.0 +^^^^^ +* uninstall ceph-release and clean cache in CentOS +* Add ability to add monitors to an existing cluster +* Deprecate use of ``--stable`` for releases, introduce ``--release`` +* Eat some tracebacks that may appear when closing remote connections +* Enable default ceph-deploy configurations for repo handling +* Fix wrong URL for rpm installs with ``--testing`` flag + +1.3 +--- + +1.3.5 +^^^^^ +* Support Debian SID for installs +* Error nicely when hosts cannot be resolved +* Return a non-zero exit status when monitors have not formed quorum +* Use the new upstream library for remote connections (execnet 1.2) +* Ensure proper read permissions for ceph.conf when pushing configs +* clean up color logging for non-tty sessions +* do not reformat configs when pushing, pushes are now as-is +* remove dry-run flag that did nothing + +1.3.4 +^^^^^ +* ``/etc/ceph`` now gets completely removed when using ``purgedata``. +* Refuse to perform ``purgedata`` if ceph is installed +* Add more details when a given platform is not supported +* Use new Ceph auth settings for ``ceph.conf`` +* Remove old journal size settings from ``ceph.conf`` +* Add a new subcommand: ``pkg`` to install/remove packages from hosts + + +1.3.3 +^^^^^ +* Add repo mirror support with ``--repo-url`` and ``--gpg-url`` +* Remove dependency on the ``which`` command +* Fix problem when removing ``/var/lib/ceph`` and OSDs are still mounted +* Make sure all tmp files are closed before moving, fixes issue when creating + keyrings and conf files +* Complete remove the lsb module + + +1.3.2 +^^^^^ +* ``ceph-deploy new`` will now attempt to copy SSH keys if necessary unless it + it disabled. +* Default to Emperor version of ceph when installing. + +1.3.1 +^^^^^ +* Use ``shutil.move`` to overwrite files from temporary ones (Thanks Mark + Kirkwood) +* Fix failure to ``wget`` GPG keys on Debian and Debian-based distros when + installing + +1.3.0 +^^^^^ +* Major refactoring for all the remote connections in ceph-deploy. With global + and granular timeouts. +* Raise the log level for missing keyrings +* Allow ``--username`` to be used for connecting over SSH +* Increase verbosity when MDS fails, include the exit code +* Do not remove ``/etc/ceph``, just the contents +* Use ``rcceph`` instead of service for SUSE +* Fix lack of ``--cluster`` usage on monitor error checks +* ensure we correctly detect Debian releases + +1.2 +--- + +1.2.7 +^^^^^ +* Ensure local calls to ceph-deploy do not attempt to ssh. +* ``mon create-initial`` command to deploy all defined mons, wait for them to + form quorum and finally to gatherkeys. +* Improve help menu for mon commands. +* Add ``--fs-type`` option to ``disk`` and ``osd`` commands (Thanks Benoit + Knecht) +* Make sure we are using ``--cluster`` for remote configs when starting ceph +* Fix broken ``mon destroy`` calls using the new hostname resolution helper +* Add a helper to catch common monitor errors (reporting the status of a mon) +* Normalize all configuration options in ceph-deploy (Thanks Andrew Woodward) +* Use a ``cuttlefish`` compatible ``mon_status`` command +* Make ``osd activate`` use the new remote connection libraries for improved + readability. +* Make ``disk zap`` also use the new remote connection libraries. +* Handle any connection errors that may came up when attempting to get into + remote hosts. + +1.2.6 +^^^^^ +* Fixes a problem witha closed connection for Debian distros when creating + a mon. + +1.2.5 +^^^^^ +* Fix yet another hanging problem when starting monitors. Closing the + connection now before we even start them. + +1.2.4 +^^^^^ +* Improve ``osd help`` menu with path information +* Really discourage the use of ``ceph-deploy new [IP]`` +* Fix hanging remote requests +* Add ``mon status`` output when creating monitors +* Fix Debian install issue (wrong parameter order) (Thanks Sayid Munawar) +* ``osd`` commands will be more verbose when deploying them +* Issue a warning when provided hosts do not match ``hostname -s`` remotely +* Create two flags for altering/not-altering source repos at install time: + ``--adjust-repos`` and ``--no-adjust-repos`` +* Do not do any ``sudo`` commands if user is root +* Use ``mon status`` for every ``mon`` deployment and detect problems with + monitors. +* Allow to specify ``host:fqdn/ip`` for all mon commands (Thanks Dmitry + Borodaenko) +* Be consistent for hostname detection (Thanks Dmitry Borodaenko) +* Fix hanging problem on remote hosts + +1.2.3 +^^^^^ +* Fix non-working ``disk list`` +* ``check_call`` utility fixes ``$PATH`` issues. +* Use proper exit codes from the ``main()`` CLI function +* Do not error when attempting to add the EPEL repos. +* Do not complain when using IP:HOST pairs +* Report nicely when ``HOST:DISK`` is not used when zapping. + +1.2.2 +^^^^^ +* Do not force usage of lsb_release, fallback to + ``platform.linux_distribution()`` +* Ease installation in CentOS/Scientific by adding the EPEL repo + before attempting to install Ceph. +* Graceful handling of pushy connection issues due to host + address resolution +* Honor the usage of ``--cluster`` when calling osd prepare. + +1.2.1 +^^^^^ +* Print the help when no arguments are passed +* Add a ``--version`` flag +* Show the version in the help menu +* Catch ``DeployError`` exceptions nicely with the logger +* Fix blocked command when calling ``mon create`` +* default to ``dumpling`` for installs +* halt execution on remote exceptions + +1.2.0 +^^^^^ +* Better logging output +* Remote logging for individual actions for ``install`` and ``mon create`` +* Install ``ca-certificates`` on all Debian-based distros +* Honor the usage of ``--cluster`` +* Do not ``rm -rf`` monitor logs when destroying +* Error out when ``ceph-deploy new [IP]`` is used +* Log the ceph version when installing diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..b841234 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- +# +# ceph-deploy documentation build configuration file, created by +# sphinx-quickstart on Mon Oct 21 09:32:42 2013. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.append(os.path.abspath('_themes')) +sys.path.insert(0, os.path.abspath('..')) +import ceph_deploy + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.intersphinx', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'contents' + +# General information about the project. +project = u'ceph-deploy' +copyright = u'2013, Inktank' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = ceph_deploy.__version__ +# The full version, including alpha/beta/rc tags. +release = ceph_deploy.__version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'ceph' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = ['_themes'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +html_use_smartypants = False + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'ceph-deploydoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ('index', 'ceph-deploy.tex', u'ceph-deploy Documentation', + u'Inktank', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'ceph-deploy', u'ceph-deploy Documentation', + [u'Inktank'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'ceph-deploy', u'ceph-deploy Documentation', + u'Inktank', 'ceph-deploy', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + + +# XXX Uncomment when we are ready to link to ceph docs +# Example configuration for intersphinx: refer to the Python standard library. +#intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/docs/source/conf.rst b/docs/source/conf.rst new file mode 100644 index 0000000..6eee392 --- /dev/null +++ b/docs/source/conf.rst @@ -0,0 +1,175 @@ +.. _conf: + +Ceph Deploy Configuration +========================= +Starting with version 1.4, ceph-deploy uses a configuration file that can be +one of: + +* ``cephdeploy.conf`` (in the current directory) +* ``$HOME/.cephdeploy.conf`` (hidden in the user's home directory) + +This configuration file allows for setting certain ceph-deploy behavior that +would be difficult to set on the command line or that it might be cumbersome to +do. + +The file itself follows the INI style of configurations which means that it +consists of sections (in brackets) that may contain any number of key/value +pairs. + +If a configuration file is not found in the current working directory nor in +the user's home dir, ceph-deploy will proceed to create one in the home +directory. + +This is how a default configuration file would look like:: + + # + # ceph-deploy configuration file + # + + [ceph-deploy-global] + # Overrides for some of ceph-deploy's global flags, like verbosity or cluster + # name + + [ceph-deploy-install] + # Overrides for some of ceph-deploy's install flags, like version of ceph to + # install + + + # + # Repositories section + # + + # yum repos: + # [myrepo] + # baseurl = https://user:pass@example.org/rhel6 + # gpgurl = https://example.org/keys/release.asc + # default = True + # extra-repos = cephrepo # will install the cephrepo file too + # + # [cephrepo] + # name=ceph repo noarch packages + # baseurl=http://ceph.com/rpm-emperor/el6/noarch + # enabled=1 + # gpgcheck=1 + # type=rpm-md + # gpgkey=https://download.ceph.com/keys/release.asc + + # apt repos: + # [myrepo] + # baseurl = https://user:pass@example.org/ + # gpgurl = https://example.org/keys/release.asc + # default = True + # extra-repos = cephrepo # will install the cephrepo file too + # + # [cephrepo] + # baseurl=http://ceph.com/rpm-emperor/el6/noarch + # gpgkey=https://download.ceph.com/keys/release.asc + +.. conf_sections: + +Sections +-------- +To work with ceph-deploy configurations, it is important to note that all +sections that relate to ceph-deploy's flags and state are prefixed with +``ceph-deploy-`` followed by the subcommand or by ``global`` if it is something +that belongs to the global flags. + +Any other section that is not prefixed with ``ceph-deploy-`` is considered +a repository. + +Repositories can be very complex to describe and most of the time (specially +for yum repositories) they can be very verbose too. + +Setting Default Flags or Values +------------------------------- +Because the configuration loading allows specifying the same flags as in the +CLI it is possible to set defaults. For example, assuming that a user always +wants to install Ceph the following way (that doesn't create/modify remote repo +files):: + + ceph-deploy install --no-adjust-repos {nodes} + +This can be the default behavior by setting it in the right section in the +configuration file, which should look like this:: + + [ceph-deploy-install] + adjust_repos = False + +The default for ``adjust_repos`` is ``True``, but because we are changing this +to ``False`` the CLI will now have this behavior changed without the need to +pass any flag. + +Repository Sections +------------------- +Keys will depend on the type of package manager that will use it. Certain keys +for yum are required (like ``baseurl``) and some others like ``gpgcheck`` are +optional. + +For both yum and apt these would be all the required keys in a repository section: + +* baseurl +* gpgkey + +If a required key is not present ceph-deploy will abort the installation +process with an error identifying the section and key what was missing. + +In yum the repository name is taken from the section, so if the section is +``[foo]``, then the name of the repository will be ``foo repo`` and the +filename written to ``/etc/yum.repos.d/`` will be ``foo.repo``. + +For apt, the same happens except the directory location changes to: +``/etc/apt/sources.list.d/`` and the file becomes ``foo.list``. + + +Optional values for yum +----------------------- +**name**: A descriptive name for the repository. If not provided ``{repo +section} repo`` is used + +**enabled**: Defaults to ``1`` + +**gpgcheck**: Defaults to ``1`` + +**type**: Defaults to ``rpm-md`` + +**gpgcheck**: Defaults to ``1`` + + +Default Repository +------------------ +For installations where a default repository is needed a key can be added to +that section to indicate it is the default one:: + + [myrepo] + default = true + +When a default repository is detected it is mentioned in the log output and +ceph will get install from that one repository at the end. + +Extra Repositories +------------------ +If other repositories need to be installed aside from the main one, a key +should be added to represent that need with a comma separated value with the +name of the sections of the other repositories (just like the example +configuration file demonstrates):: + + [myrepo] + baseurl = https://user:pass@example.org/rhel6 + gpgurl = https://example.org/keys/release.asc + default = True + extra-repos = cephrepo # will install the cephrepo file too + + [cephrepo] + name=ceph repo noarch packages + baseurl=http://ceph.com/rpm-emperor/el6/noarch + enabled=1 + gpgcheck=1 + type=rpm-md + gpgkey=https://download.ceph.com/keys/release.asc + +In this case, the repository called ``myrepo`` defines the ``extra-repos`` key +with just one extra one: ``cephrepo``. + +This extra repository must exist as a section in the configuration file. After +the main one is added all the extra ones defined will follow. Installation of +Ceph will only happen with the main repository. diff --git a/docs/source/contents.rst b/docs/source/contents.rst new file mode 100644 index 0000000..c80d8cc --- /dev/null +++ b/docs/source/contents.rst @@ -0,0 +1,18 @@ +Content Index +============= + +.. toctree:: + :maxdepth: 2 + + index.rst + new.rst + install.rst + mon.rst + rgw.rst + mds.rst + conf.rst + pkg.rst + repo.rst + changelog.rst + admin.rst + gatherkeys.rst diff --git a/docs/source/gatherkeys.rst b/docs/source/gatherkeys.rst new file mode 100644 index 0000000..6a1bdea --- /dev/null +++ b/docs/source/gatherkeys.rst @@ -0,0 +1,55 @@ +.. _gatherkeys: + +========== +gatherkeys +========== + +The ``gatherkeys`` subcommand provides an interface to get with a cluster's +cephx bootstrap keys. + +keyrings +======== +The ``gatherkeys`` subcommand retrieves the following keyrings. + +ceph.mon.keyring +---------------- +This keyring is used by all mon nodes to communicate with other mon nodes. + +ceph.client.admin.keyring +------------------------- +This keyring is ceph client commands by default to administer the ceph cluster. + +ceph.bootstrap-osd.keyring +-------------------------- +This keyring is used to generate cephx keyrings for OSD instances. + +ceph.bootstrap-mds.keyring +-------------------------- +This keyring is used to generate cephx keyrings for MDS instances. + +ceph.bootstrap-rgw.keyring +-------------------------- +This keyring is used to generate cephx keyrings for RGW instances. + +Example +======= +The ``gatherkeys`` subcommand contacts the mon and creates or retrieves existing +keyrings from the mon internal store. To run:: + + ceph-deploy gatherkeys MON [MON..] + +You can optionally add as many mon nodes to the command line as desired. The +``gatherkeys`` subcommand will succeed on the first mon to respond successfully +with all the keyrings. + +Backing up of old keyrings +========================== + +If old keyrings exist in the current working directory that do not match the +retrieved keyrings these old keyrings will be renamed with a time stamp +extention so you will not loose valuable keyrings. + +.. note:: Before version v1.5.33 ceph-deploy relied upon ``ceph-create-keys`` + and did not backup existing keys. Using ``ceph-create-keys`` produced + a side effect of deploying all bootstrap keys on the mon node so + making all mon nodes admin nodes. diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..3ba0f99 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,315 @@ +======================================================== + ceph-deploy -- Deploy Ceph with minimal infrastructure +======================================================== + +``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to +the servers, ``sudo``, and some Python. It runs fully on your +workstation, requiring no servers, databases, or anything like that. + +If you set up and tear down Ceph clusters a lot, and want minimal +extra bureaucracy, this is for you. + +.. _what this tool is not: + +What this tool is not +--------------------- +It is not a generic deployment system, it is only for Ceph, and is designed +for users who want to quickly get Ceph running with sensible initial settings +without the overhead of installing Chef, Puppet or Juju. + +It does not handle client configuration beyond pushing the Ceph config file +and users who want fine-control over security settings, partitions or directory +locations should use a tool such as Chef or Puppet. + + +Installation +============ +Depending on what type of usage you are going to have with ``ceph-deploy`` you +might want to look into the different ways to install it. For automation, you +might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would +probably install from the OS packages or from the Python Package Index. + +Python Package Index +-------------------- +If you are familiar with Python install tools (like ``pip`` and +``easy_install``) you can easily install ``ceph-deploy`` like:: + + pip install ceph-deploy + + +It should grab all the dependencies for you and install into the current user's +environment. + +We highly recommend using ``virtualenv`` and installing dependencies in +a contained way. + + +DEB +--- +All new releases of ``ceph-deploy`` are pushed to all ``ceph`` DEB release +repos. + +The DEB release repos are found at:: + + http://ceph.com/debian-{release} + http://ceph.com/debian-testing + +This means, for example, that installing ``ceph-deploy`` from +http://ceph.com/debian-giant will install the same version as from +http://ceph.com/debian-firefly or http://ceph.com/debian-testing. + +RPM +--- +All new releases of ``ceph-deploy`` are pushed to all ``ceph`` RPM release +repos. + +The RPM release repos are found at:: + + http://ceph.com/rpm-{release} + http://ceph.com/rpm-testing + +Make sure you add the proper one for your distribution (i.e. el7 vs rhel7). + +This means, for example, that installing ``ceph-deploy`` from +http://ceph.com/rpm-giant will install the same version as from +http://ceph.com/rpm-firefly or http://ceph.com/rpm-testing. + +bootstrapping +------------- +To get the source tree ready for use, run this once:: + + ./bootstrap + +You can symlink the ``ceph-deploy`` script in this somewhere +convenient (like ``~/bin``), or add the current directory to ``PATH``, +or just always type the full path to ``ceph-deploy``. + + +SSH and Remote Connections +========================== +``ceph-deploy`` will attempt to connect via SSH to hosts when the hostnames do +not match the current host's hostname. For example, if you are connecting to +host ``node1`` it will attempt an SSH connection as long as the current host's +hostname is *not* ``node1``. + +ceph-deploy at a minimum requires that the machine from which the script is +being run can ssh as root without password into each Ceph node. + +To enable this generate a new ssh keypair for the root user with no passphrase +and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in:: + + /root/.ssh/authorized_keys + +and ensure that the following lines are in the sshd config:: + + PermitRootLogin yes + PermitEmptyPasswords yes + +The machine running ceph-deploy does not need to have the Ceph packages +installed unless it needs to admin the cluster directly using the ``ceph`` +command line tool. + + +usernames +--------- +When not specified the connection will be done with the same username as the +one executing ``ceph-deploy``. This is useful if the same username is shared in +all the nodes but can be cumbersome if that is not the case. + +A way to avoid this is to define the correct usernames to connect with in the +SSH config, but you can also use the ``--username`` flag as well:: + + ceph-deploy --username ceph install node1 + +``ceph-deploy`` then in turn would use ``ceph@node1`` to connect to that host. + +This would be the same expectation for any action that warrants a connection to +a remote host. + + +Managing an existing cluster +============================ + +You can use ceph-deploy to provision nodes for an existing cluster. +To grab a copy of the cluster configuration file (normally +``ceph.conf``):: + + ceph-deploy config pull HOST + +You will usually also want to gather the encryption keys used for that +cluster:: + + ceph-deploy gatherkeys MONHOST + +At this point you can skip the steps below that create a new cluster +(you already have one) and optionally skip installation and/or monitor +creation, depending on what you are trying to accomplish. + + +Installing packages +=================== +For detailed information on installation instructions refer to the :ref:`install` +section. + +Proxy or Firewall Installs +-------------------------- +If attempting to install behind a firewall or through a proxy you can +use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes +to the distro's repository in order to install the packages and it will go +straight to package installation. + +That will allow an environment without internet access to point to *its own +repositories*. This means that those repositories will need to be properly +setup (and mirrored with all the necessary dependencies) before attempting an +install. + +Another alternative is to set the `wget` env variables to point to the right +hosts, for example:: + + http_proxy=http://host:port + ftp_proxy=http://host:port + https_proxy=http://host:port + + +Creating a new configuration +============================ + +To create a new configuration file and secret key, decide what hosts +will run ``ceph-mon``, and run:: + + ceph-deploy new MON [MON..] + +For detailed information on new instructions refer to the :ref:`new` +section. + +For detailed information on ``new`` subcommand refer to the +:ref:`mon` section. + +Deploying monitors +================== + +To actually deploy ``ceph-mon`` to the hosts you chose, run:: + + ceph-deploy mon create HOST [HOST..] + +Without explicit hosts listed, hosts in ``mon_initial_members`` in the +config file are deployed. That is, the hosts you passed to +``ceph-deploy new`` are the default value here. + +For detailed information on ``mon`` subcommand refer to the +:ref:`mon` section. + +Gather keys +=========== + +To gather authentication keys (for administering the cluster and +bootstrapping new nodes) to the local directory, run:: + + ceph-deploy gatherkeys HOST [HOST...] + +where ``HOST`` is one of the monitor hosts. + +Once these keys are in the local directory, you can provision new OSDs etc. + +For detailed information on ``gatherkeys`` subcommand refer to the +:ref:`gatherkeys` section. + +Admin hosts +=========== + +To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring`` +keyring so that it can administer the cluster, run:: + + ceph-deploy admin HOST [HOST ...] + +Older versions of ceph-deploy automatically added the admin keyring to +all mon nodes making them admin nodes. For detailed information on the +admin command refer to the :ref:`admin` section. + +For detailed information on ``admin`` subcommand refer to the +:ref:`admin` section. + +Deploying OSDs +============== + +To create an OSD on a remote node, run:: + + ceph-deploy osd create HOST --data /path/to/device + +Alternatively, ``--data`` can accept a logical volume in the format of +``vg/lv`` + +After that, the hosts will be running OSDs for the given data disks or logical +volumes. For other OSD devices like journals (when using ``--filestore``) or +``block.db``, and ``block.wal``, these need to be logical volumes or GPT +partitions. + +.. note:: Partitions aren't created by this tool, they must be created + beforehand + + +Forget keys +=========== + +The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in +the local directory. If you are worried about them being there for security +reasons, run:: + + ceph-deploy forgetkeys + +and they will be removed. If you need them again later to deploy additional +nodes, simply re-run:: + + ceph-deploy gatherkeys HOST [HOST...] + +and they will be retrieved from an existing monitor node. + +Multiple clusters +================= + +All of the above commands take a ``--cluster=NAME`` option, allowing +you to manage multiple clusters conveniently from one workstation. +For example:: + + ceph-deploy --cluster=us-west new + vi us-west.conf + ceph-deploy --cluster=us-west mon + +FAQ +=== + +Before anything +--------------- +Make sure you have the latest version of ``ceph-deploy``. It is actively +developed and releases are coming weekly (on average). The most recent versions +of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check +with your package manager and update if there is anything new. + +Why is feature X not implemented? +--------------------------------- +Usually, features are added when/if it is sensible for someone that wants to +get started with ceph and said feature would make sense in that context. If +you believe this is the case and you've read "`what this tool is not`_" and +still think feature ``X`` should exist in ceph-deploy, open a feature request +in the ceph tracker: http://tracker.ceph.com/projects/ceph-deploy/issues + +A command gave me an error, what is going on? +--------------------------------------------- +Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host +that you have configured when creating the initial config. If a given command +is not working as expected try to run the command that failed in the remote +host and assert the behavior there. + +If the behavior in the remote host is the same, then it is probably not +something wrong with ``ceph-deploy`` per-se. Make sure you capture the output +of both the ``ceph-deploy`` output and the output of the command in the remote +host. + +Issues with monitors +-------------------- +If your monitors are not starting, make sure that the ``{hostname}`` you used +when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s`` +in the remote host. + +Newer versions of ``ceph-deploy`` should warn you if the results are different +but that might prevent the monitors from reaching quorum. diff --git a/docs/source/install.rst b/docs/source/install.rst new file mode 100644 index 0000000..8291f88 --- /dev/null +++ b/docs/source/install.rst @@ -0,0 +1,219 @@ + +.. _install: + +install +=========== +A few different distributions are supported with some flags to allow some +customization for installing ceph on remote nodes. + +Supported distributions: + +* Ubuntu +* Debian +* Fedora +* RedHat +* CentOS +* Suse +* Scientific Linux +* Arch Linux + +Before any action is taken, a platform detection call is done to make sure that +the platform that will get ceph installed is the correct one. If the platform +is not supported no further actions will proceed and an error message will be +displayed, similar to:: + + [ceph_deploy][ERROR ] UnsupportedPlatform: Platform is not supported: Mandriva + + +.. _install-stable-releases: + + +.. _note: + Although ceph-deploy installs some extra dependencies, do note that those + are not going to be uninstalled. For example librbd1 and librados which + qemu-kvm depends on, and removing it would cause issues for qemu-kvm. + +Distribution Notes +------------------ + +RPMs +^^^^ +On RPM-based distributions, ``yum-plugin-priorities`` is installed to make sure +that upstream ceph.com repos have a higher priority than distro repos. + +Because of packaging splits that are present in downstream repos that may not +be present in ceph.com repos, ``ceph-deploy`` enables the ``check_obsoletes`` +flag for the Yum priorities plugin. + +.. versionchanged:: 1.5.22 + Enable ``check_obsoletes`` by default + +RHEL +^^^^ +When installing packages on systems running Red Hat Enterprise Linux (RHEL), +``ceph-deploy`` will not install the latest upstream release by default. On other +distros, running ``ceph-deploy install`` without the ``--release`` flag will +install the latest upstream release by default (i.e. firefly, giant, etc). On +RHEL, the ``--release`` flag *must* be used if you wish to use the upstream +packages hosted on http://ceph.com. + +.. versionchanged:: 1.5.22 + Require ``--release`` flag to get upstream packages on RHEL + +Specific Releases +----------------- +By default the *latest* release is assumed. This value changes when +newer versions are available. If you are automating deployments it is better to +specify exactly what release you need:: + + ceph-deploy install --release emperor {host} + + +Note that the ``--stable`` flag for specifying a Ceph release is deprecated and +should no longer be used starting from version 1.3.6. + +.. versionadded:: 1.4.0 + +.. _install-unstable-releases: + +Unstable releases +----------------- +If you need to test cutting edge releases or a specific feature of ceph that +has yet to make it to a stable release you can specify this as well with +ceph-deploy with a couple of flags. + +To get the latest development release:: + + ceph-deploy install --testing {host} + +For a far more granular approach, you may want to specify a branch or a tag +from the repository, if none specified it fall backs to the latest commit in +master:: + + ceph-deploy install --dev {branch or tag} {host} + + +.. _install-behind-firewall: + +Behind Firewall +--------------- +For restrictive environments there are a couple of options to be able to +install ceph. + +If hosts have had some customizations with custom repositories and all is +needed is to proceed with a install of ceph, we can skip altering the source +repositories like:: + + ceph-deploy install --no-adjust-repos {host} + +Note that you will need to have working repositories that have all the +dependencies that ceph needs. In some distributions, other repos (besides the +ceph repos) will be added, like EPEL for CentOS. + +However, if there is a ceph repo mirror already set up you can point to it +before installation proceeds. For this specific action you will need two +arguments passed in (or optionally use environment variables). + +The repository URL and the GPG URL can be specified like this:: + + ceph-deploy install --repo-url {http mirror} --gpg-url {http gpg url} {host} + +Optionally, you can use the following environment variables: + +* ``CEPH_DEPLOY_REPO_URL`` +* ``CEPH_DEPLOY_GPG_URL`` + +Those values will be used to write to the ceph ``sources.list`` (in Debian and +Debian-based distros) or the ``yum.repos`` file for RPM distros and will skip +trying to compose the right URL for the release being installed. + +.. note:: + It is currently not possible to specify what version/release is to be + installed when ``--repo-url`` is used. + +It is strongly suggested that both flags be provided. However, the +``--gpg-url`` will default to the current one in the ceph repository:: + + https://download.ceph.com/keys/release.asc + +.. versionadded:: 1.3.3 + + +Local Mirrors +------------- +``ceph-deploy`` supports local mirror installation by syncing a repository to +remote servers and configuring correctly the remote hosts to install directly +from those local paths (as opposed to going through the network). + +The one requirement for this option to work is to have a ``release.asc`` at the +top of the directory that holds the repository files. + +That file is used by Ceph as the key for its signed packages and it is usually +retrieved from:: + + https://download.ceph.com/keys/release.asc + +This is how it would look the process to get Ceph installed from a local +repository in an admin host:: + + $ ceph-deploy install --local-mirror ~/tmp/rpm-mirror/ceph.com/rpm-emperor/el6 node2 + [ceph_deploy.cli][INFO ] Invoked (1.4.1): /bin/ceph-deploy install --local-mirror /Users/alfredo/tmp/rpm-mirror/ceph.com/rpm-emperor/el6 node2 + [ceph_deploy.install][DEBUG ] Installing stable version emperor on cluster ceph hosts node2 + [ceph_deploy.install][DEBUG ] Detecting platform for host node2 ... + [node2][DEBUG ] connected to host: node2 + [node2][DEBUG ] detect platform information from remote host + [node2][DEBUG ] detect machine type + [ceph_deploy.install][INFO ] Distro info: CentOS 6.4 Final + [node2][INFO ] installing ceph on node2 + [node2][INFO ] syncing file: noarch/ceph-deploy-1.3-0.noarch.rpm + [node2][INFO ] syncing file: noarch/ceph-deploy-1.3.1-0.noarch.rpm + [node2][INFO ] syncing file: noarch/ceph-deploy-1.3.2-0.noarch.rpm + [node2][INFO ] syncing file: noarch/ceph-release-1-0.el6.noarch.rpm + [node2][INFO ] syncing file: noarch/index.html + [node2][INFO ] syncing file: noarch/index.html?C=D;O=A + [node2][INFO ] syncing file: noarch/index.html?C=D;O=D + [node2][INFO ] syncing file: noarch/index.html?C=M;O=A + ... + [node2][DEBUG ] + [node2][DEBUG ] Installed: + [node2][DEBUG ] ceph.x86_64 0:0.72.1-0.el6 + [node2][DEBUG ] + [node2][DEBUG ] Complete! + [node2][INFO ] Running command: sudo ceph --version + [node2][DEBUG ] ceph version 0.72.1 + (4d923861868f6a15dcb33fef7f50f674997322de) + +.. versionadded:: 1.5.0 + + +Repo file only +-------------- +The ``install`` command has a flag that offers flexibility for installing +"repo files" only, avoiding installation of ceph and its dependencies. + +These "repo files" are the configuration files for package managers ("yum" or +"apt" for example) that point to the right repository information so that +certain packages become available. + +For APT these files would be `list files` and for YUM they would be `repo +files`. Regardless of the package manager, ceph-deploy is able to install this +file correctly so that the Ceph packages are available. This is useful in +a situation where a massive upgrade is needed and ``ceph-deploy`` would be too +slow to install sequentially in every host. + +Repositories are specified in the ``cephdeploy.conf`` (or +``$HOME/.cephdeploy.conf``) file. If a specific repository section is needed, +it can be specified with the ``--release`` flag:: + + ceph-deploy install --repo --release firefly {HOSTS} + +The above command would install the ``firefly`` repo file in every ``{HOST}`` +specified. + +If a repository section exists with the ``default = True`` flag, there is no +need to specify anything else and the repo file can be installed simply by +passing in the hosts:: + + ceph-deploy install --repo {HOSTS} + +.. versionadded:: 1.5.10 diff --git a/docs/source/mds.rst b/docs/source/mds.rst new file mode 100644 index 0000000..c7b1b10 --- /dev/null +++ b/docs/source/mds.rst @@ -0,0 +1,20 @@ +.. _mds: + +mds +======= +The ``mds`` subcommand provides an interface to interact with a cluster's +CephFS Metadata servers. + +create +---------- +Deploy MDS instances by specifying directly like:: + + ceph-deploy mds create node1 node2 node3 + +This will create an MDS on the given node(s) and start the +corresponding service. + +The MDS instances will default to having a name corresponding to the hostname +where it runs. For example, ``mds.node1``. + +.. note:: Removing MDS instances is not yet supported diff --git a/docs/source/mon.rst b/docs/source/mon.rst new file mode 100644 index 0000000..7942500 --- /dev/null +++ b/docs/source/mon.rst @@ -0,0 +1,106 @@ +.. _mon: + +mon +======= +The ``mon`` subcommand provides an interface to interact with a cluster's +monitors. The tool makes a few assumptions that are needed to implement the +most common scenarios. Monitors are usually very particular in what they need +to work correctly. + +.. note:: Before version v1.5.33 ceph-deploy relied upon ``ceph-create-keys``. + Using ``ceph-create-keys`` produced a side effect of deploying all + bootstrap keys on the mon node so making all mon nodes admin nodes. + This can be recreated by running the admin command on all mon nodes + see :ref:`admin` section. + +create-initial +------------------ +Will deploy for monitors defined in ``mon initial members``, wait until +they form quorum and then ``gatherkeys``, reporting the monitor status along +the process. If monitors don't form quorum the command will eventually +time out. + +This is the *preferred* way of initially deploying monitors since it will +compound a few of the steps needed together while looking for possible issues +along the way. + +:: + + ceph-deploy mon create-initial + + +create +---------- +Deploy monitors by specifying directly like:: + + ceph-deploy mon create node1 node2 node3 + +If no hosts are passed it will default to use the `mon initial members` +defined in the configuration. + +Please note that if this is an initial monitor deployment, the preferred way +is to use ``create-initial``. + + +add +------- +Add a monitor to an existing cluster:: + + ceph-deploy mon add node1 + +Since monitor hosts can have different network interfaces, this command allows +you to specify the interface IP in a few different ways. + +**``--address``**: this will explicitly override any configured address for +that host. Usage:: + + ceph-deploy mon add node1 --address 192.168.1.10 + + +**ceph.conf**: If a section for the node that is being added exists and it +defines a ``mon addr`` key. For example:: + + [mon.node1] + mon addr = 192.168.1.10 + +**resolving/dns**: if the monitor address is not defined in the configuration file +nor overridden in the command-line it will fall-back to resolving the address +of the provided host. + +.. warning:: If the monitor host has multiple addresses you should specify + the address directly to ensure the right IP is used. Please + note, only one node can be added at a time. + +.. versionadded:: 1.4.0 + + +destroy +----------- +Completely remove monitors on a remote host. Requires hostname(s) as +arguments:: + + ceph-deploy mon destroy node1 node2 node3 + + +--keyrings +-------------- +Both ``create`` and ``create-initial`` subcommands can be used with the +``--keyrings`` flag that accepts a path to search for keyring files. + +When this flag is used it will then look into the passed in path for files that +end with ``.keyring`` and will proceed to concatenate them in memory and seed +them to the monitor being created in the remote mode. + +This is useful when having several different keyring files that are needed at +initial setup, but normally, ceph-deploy will only use the +``$cluster.mon.keyring`` file for initial seeding. + +To keep things in order, create a directory and use that directory to store all +the keyring files that are needed. This is how the commands would look like for +a directory called ``keyrings``:: + + ceph-deploy mon --keyrings keyrings create-initial + +Or for the ``create`` sub-command:: + + ceph-deploy mon --keyrings keyrings create {nodes} diff --git a/docs/source/new.rst b/docs/source/new.rst new file mode 100644 index 0000000..e71d4dd --- /dev/null +++ b/docs/source/new.rst @@ -0,0 +1,75 @@ +.. _new: + +new +======= +This subcommand is used to generate a working ``ceph.conf`` file that will +contain important information for provisioning nodes and/or adding them to +a cluster. + + +SSH Keys +-------- +Ideally, all nodes will be pre-configured to have their passwordless access +from the machine executing ``ceph-deploy`` but you can also take advantage of +automatic detection of this when calling the ``new`` subcommand. + +Once called, it will try to establish an SSH connection to the hosts passed +into the ``new`` subcommand, and determine if it can (or cannot) connect +without a password prompt. + +If it can't proceed, it will try to copy *existing* keys to the remote host, if +those do not exist, then passwordless ``rsa`` keys will be generated for the +current user and those will get used. + +This feature can be overridden in the ``new`` subcommand like:: + + ceph-deploy new --no-ssh-copykey + +.. versionadded:: 1.3.2 + + +Creating a new configuration +---------------------------- + +To create a new configuration file and secret key, decide what hosts +will run ``ceph-mon``, and run:: + + ceph-deploy new MON [MON..] + +listing the hostnames of the monitors. Each ``MON`` can be + + * a simple hostname. It must be DNS resolvable without the fully + qualified domain name. + * a fully qualified domain name. The hostname is assumed to be the + leading component up to the first ``.``. + * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified + domain name or IP address. For example, ``foo``, + ``foo.example.com``, ``foo:something.example.com``, and + ``foo:1.2.3.4`` are all valid. Note, however, that the hostname + should match that configured on the host ``foo``. + +The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your +current directory. + + +Edit initial cluster configuration +---------------------------------- + +You want to review the generated ``ceph.conf`` file and make sure that +the ``mon_host`` setting contains the IP addresses you would like the +monitors to bind to. These are the IPs that clients will initially +contact to authenticate to the cluster, and they need to be reachable +both by external client-facing hosts and internal cluster daemons. + + +--cluster-network --public-network +---------------------------------- +Are used to provide subnets so that nodes can communicate within that +network. If passed, validation will occur by looking at the remote IP addresses +and making sure that at least one of those addresses is valid for the given +subnet. + +Those values will also be added to the generated ``ceph.conf``. If IPs are not +correct (or not in the subnets specified) an error will be raised. + +.. versionadded:: 1.5.13 diff --git a/docs/source/pkg.rst b/docs/source/pkg.rst new file mode 100644 index 0000000..2f70d08 --- /dev/null +++ b/docs/source/pkg.rst @@ -0,0 +1,58 @@ + +.. _pkg: + +pkg +======= +Provides a simple interface to install or remove packages on a remote host (or +a number of remote hosts). + +Packages to install or remove *must* be comma separated when there are more +than one package in the argument. + +.. note:: + This feature only supports installing on same distributions. You cannot + install a given package on different distributions at the same time. + + +.. _pkg-install: + +--install +------------- +This flag will use the package (or packages) passed in to perform an installation using +the distribution package manager in a non-interactive way. Package managers +that tend to ask for confirmation will not prompt. + +An example call to install a few packages on 2 hosts (with hostnames like +``node1`` and ``node2``) would look like:: + + ceph-deploy pkg --install vim,zsh node1 node2 + [ceph_deploy.cli][INFO ] Invoked (1.3.3): /bin/ceph-deploy pkg --install vim,zsh node1 node2 + [node1][DEBUG ] connected to host: node1 + [node1][DEBUG ] detect platform information from remote host + [node1][DEBUG ] detect machine type + [ceph_deploy.pkg][INFO ] Distro info: Ubuntu 12.04 precise + [node1][INFO ] installing packages on node1 + [node1][INFO ] Running command: sudo env DEBIAN_FRONTEND=noninteractive apt-get -q install --assume-yes vim zsh + ... + + +.. _pkg-remove: + +--remove +------------ +This flag will use the package (or packages) passed in to remove them using +the distribution package manager in a non-interactive way. Package managers +that tend to ask for confirmation will not prompt. + +An example call to remove a few packages on 2 hosts (with hostnames like +``node1`` and ``node2``) would look like:: + + + [ceph_deploy.cli][INFO ] Invoked (1.3.3): /bin/ceph-deploy pkg --remove vim,zsh node1 node2 + [node1][DEBUG ] connected to host: node1 + [node1][DEBUG ] detect platform information from remote host + [node1][DEBUG ] detect machine type + [ceph_deploy.pkg][INFO ] Distro info: Ubuntu 12.04 precise + [node1][INFO ] removing packages from node1 + [node1][INFO ] Running command: sudo apt-get -q remove -f -y --force-yes -- vim zsh + ... diff --git a/docs/source/repo.rst b/docs/source/repo.rst new file mode 100644 index 0000000..e862048 --- /dev/null +++ b/docs/source/repo.rst @@ -0,0 +1,77 @@ +.. _repo: + +repo +===== +Provides a simple interface for installing or removing new Apt or RPM repo files. + +Apt repo files are added in ``/etc/apt/sources.list.d``, while RPM repo files +are added in ``/etc/yum.repos.d``. + +.. _repo-install: + +Installing repos +---------------- + +Repos can be defined through CLI arguments, or they can be defined in cephdeploy.conf +and referenced by name. + +The general format for adding a repo is:: + + ceph-deploy repo --repo-url --gpg-url [host [host ...]] + +As an example of adding the Ceph rpm-hammer repo for EL7:: + + ceph-deploy repo --repo-url http://ceph.com/rpm-hammer/el7/x86_64/ --gpg-url 'https://download.ceph.com/keys/release.asc' ceph HOST1 + +In this example, the repo-name is ``ceph``, and the file ``/etc/yum.repos.d/ceph.repo`` +will be created. Because ``--gpg-url`` was passed, the repo will have ``gpgcheck=1`` +and will reference the given GPG key. + +For APT, the equivalent example would be:: + + ceph-deploy repo --repo-url http://ceph.com/debian-hammer --gpg-url 'https://download.ceph.com/keys/release.asc' ceph HOST1 + +If a repo was defined in cephdeploy.conf, like the following:: + + [ceph-mon] + name=Ceph-MON + baseurl=https://cephmirror.com/hammer/el7/x86_64 + gpgkey=https://cephmirror.com/release.asc + gpgcheck=1 + proxy=_none_ + +This could be installed with this command:: + + ceph-deploy repo ceph-mon HOST1 + +``ceph-deploy repo`` will always check to see if a matching repo name exists in +cephdeploy.conf first. + +It is possible that repos may be password protected, and a URL may be structured like so:: + + https://:@host.com/... + +In this case, Apt repositories will be created with mode ``0600`` to make +sure the password is not world-readable. You can also use the +``CEPH_DEPLOY_REPO_URL`` and ``CEPH_DEPLOY_GPG_URL`` environment variables in lieu +of ``--repo-url`` and ``--gpg-url`` to avoid placing sensitive credentials on the +command line (and thus visible in the process table). + +.. note:: + The writing of a repo file as mode 0600 when a password is present is only done + for Apt repos currently. + +.. _repo-remove: + +Removing +-------- + +Repos are simply removed by name. The general format for adding a repo is:: + + ceph-deploy repo --remove [host [host...]] + +To remove a repo at ``/etc/yum.repos.d/ceph.repo``, do:: + + ceph-deploy repo --remove ceph HOST1 + +.. versionadded:: 1.5.27 diff --git a/docs/source/rgw.rst b/docs/source/rgw.rst new file mode 100644 index 0000000..4a060c5 --- /dev/null +++ b/docs/source/rgw.rst @@ -0,0 +1,36 @@ +.. _rgw: + +rgw +======= +The ``rgw`` subcommand provides an interface to interact with a cluster's +RADOS Gateway instances. + +create +---------- +Deploy RGW instances by specifying directly like:: + + ceph-deploy rgw create node1 node2 node3 + +This will create an instance of RGW on the given node(s) and start the +corresponding service. The daemon will listen on the default port of 7480. + +The RGW instances will default to having a name corresponding to the hostname +where it runs. For example, ``rgw.node1``. + +If a custom name is desired for the RGW daemon, it can be specific like:: + + ceph-deploy rgw create node1:foo + +Custom names are automatically prefixed with "rgw.", so the resulting daemon +name would be "rgw.foo". + +.. note:: If an error is presented about the ``bootstrap-rgw`` keyring not being + found, that is because the ``bootstrap-rgw`` only been auto-created on + new clusters starting with the Hammer release. + +.. versionadded:: 1.5.23 + +.. note:: Removing RGW instances is not yet supported + +.. note:: Changing the port on which RGW will listen at deployment time is not yet + supported. diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..7d7303e --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,3 @@ +pytest >=2.1.3 +tox >=1.2 +mock >=1.0.1 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ + diff --git a/scripts/build-debian.sh b/scripts/build-debian.sh new file mode 100755 index 0000000..5b59670 --- /dev/null +++ b/scripts/build-debian.sh @@ -0,0 +1,87 @@ +#! /bin/sh + +# Tag tree and update version number in change log and +# in setup.py before building. + +REPO=debian-repo +COMPONENT=main +KEYID=${KEYID:-03C3951A} # default is autobuild keyid +DEB_DIST="sid wheezy squeeze quantal precise oneiric natty raring" +DEB_BUILD=$(lsb_release -s -c) +RELEASE=0 + +if [ X"$1" = X"--release" ] ; then + echo "Release Build" + RELEASE=1 +fi + +if [ ! -d debian ] ; then + echo "Are we in the right directory" + exit 1 +fi + +if gpg --list-keys 2>/dev/null | grep -q ${KEYID} ; then + echo "Signing packages and repo with ${KEYID}" +else + echo "Package signing key (${KEYID}) not found" + echo "Have you set \$GNUPGHOME ? " + exit 3 +fi + +# Clean up any leftover builds +rm -f ../ceph-deploy*.dsc ../ceph-deploy*.changes ../ceph-deploy*.deb ../ceph-deploy.tgz +rm -rf ./debian-repo + +# Apply backport tag if release build +# I am going to jump out the window if this is not fixed and removed from the source +# of this package. There is absolutely **NO** reason why we need to hard code the +# DEBEMAIL like this. +if [ $RELEASE -eq 1 ] ; then + DEB_VERSION=$(dpkg-parsechangelog | sed -rne 's,^Version: (.*),\1, p') + BP_VERSION=${DEB_VERSION}${BPTAG} + DEBEMAIL="alfredo.deza@inktank.com" dch -D $DIST --force-distribution -b -v "$BP_VERSION" "$comment" + dpkg-source -b . +fi + +# Build Package +echo "Building for dist: $DEB_BUILD" +dpkg-buildpackage -k$KEYID +if [ $? -ne 0 ] ; then + echo "Build failed" + exit 2 +fi + +# Build Repo +PKG=../ceph-deploy*.changes +mkdir -p $REPO/conf +if [ -e $REPO/conf/distributions ] ; then + rm -f $REPO/conf/distributions +fi + +for DIST in $DEB_DIST ; do + cat <> $REPO/conf/distributions +Codename: $DIST +Suite: stable +Components: $COMPONENT +Architectures: amd64 armhf i386 source +Origin: Inktank +Description: Ceph distributed file system +DebIndices: Packages Release . .gz .bz2 +DscIndices: Sources Release .gz .bz2 +Contents: .gz .bz2 +SignWith: $KEYID + +EOF +done + +echo "Adding package to repo, dist: $DEB_BUILD ($PKG)" +reprepro --ask-passphrase -b $REPO -C $COMPONENT --ignore=undefinedtarget --ignore=wrongdistribution include $DEB_BUILD $PKG + +#for DIST in $DEB_DIST +#do +# [ "$DIST" = "$DEB_BUILD" ] && continue +# echo "Copying package to dist: $DIST" +# reprepro -b $REPO --ignore=undefinedtarget --ignore=wrongdistribution copy $DIST $DEB_BUILD ceph-deploy +#done + +echo "Done" diff --git a/scripts/build-rpm.sh b/scripts/build-rpm.sh new file mode 100755 index 0000000..9b330e4 --- /dev/null +++ b/scripts/build-rpm.sh @@ -0,0 +1,59 @@ +#! /bin/sh + +# Tag tree and update version number in change log and +# in setup.py before building. + +REPO=rpm-repo +KEYID=${KEYID:-03C3951A} # Default is autobuild-key +BUILDAREA=./rpmbuild +DIST=el6 +RPM_BUILD=$(lsb_release -s -c) + +if [ ! -e setup.py ] ; then + echo "Are we in the right directory" + exit 1 +fi + +if gpg --list-keys 2>/dev/null | grep -q ${KEYID} ; then + echo "Signing packages and repo with ${KEYID}" +else + echo "Package signing key (${KEYID}) not found" + echo "Have you set \$GNUPGHOME ? " + exit 3 +fi + +if ! CREATEREPO=`which createrepo` ; then + echo "Please install the createrepo package" + exit 4 +fi + +# Create Tarball +python setup.py sdist --formats=bztar + +# Build RPM +mkdir -p rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} +BUILDAREA=`readlink -fn ${BUILDAREA}` ### rpm wants absolute path +cp ceph-deploy.spec ${BUILDAREA}/SPECS +cp dist/*.tar.bz2 ${BUILDAREA}/SOURCES +echo "buildarea is: ${BUILDAREA}" +rpmbuild -ba --define "_topdir ${BUILDAREA}" --define "_unpackaged_files_terminate_build 0" ${BUILDAREA}/SPECS/ceph-deploy.spec + +# create repo +DEST=${REPO}/${DIST} +mkdir -p ${REPO}/${DIST} +cp -r ${BUILDAREA}/*RPMS ${DEST} + +# Sign all the RPMs for this release +rpm_list=`find ${REPO} -name "*.rpm" -print` +rpm --addsign --define "_gpg_name ${KEYID}" $rpm_list + +# Construct repodata +for dir in ${DEST}/SRPMS ${DEST}/RPMS/* +do + if [ -d $dir ] ; then + createrepo $dir + gpg --detach-sign --armor -u ${KEYID} $dir/repodata/repomd.xml + fi +done + +exit 0 diff --git a/scripts/ceph-deploy b/scripts/ceph-deploy new file mode 100755 index 0000000..cc8dd62 --- /dev/null +++ b/scripts/ceph-deploy @@ -0,0 +1,21 @@ +#!/usr/bin/env python +import os +import platform +import sys +""" +ceph-deploy - admin tool for ceph +""" + +if os.path.exists('/usr/share/pyshared/ceph_deploy'): + sys.path.insert(0,'/usr/share/pyshared/ceph_deploy') +elif os.path.exists('/usr/share/ceph-deploy'): + sys.path.insert(0,'/usr/share/ceph-deploy') +elif os.path.exists('/usr/share/pyshared/ceph-deploy'): + sys.path.insert(0,'/usr/share/pyshared/ceph-deploy') +elif os.path.exists('/usr/lib/python2.6/site-packages/ceph_deploy'): + sys.path.insert(0,'/usr/lib/python2.6/site-packages/ceph_deploy') + +from ceph_deploy.cli import main + +if __name__ == '__main__': + sys.exit(main()) diff --git a/scripts/jenkins-build b/scripts/jenkins-build new file mode 100755 index 0000000..9d0d212 --- /dev/null +++ b/scripts/jenkins-build @@ -0,0 +1,53 @@ +#!/bin/sh + +# This is the script that runs inside Jenkins. +# http://jenkins.ceph.com/job/ceph-deploy/ + +set -x +set -e + +# Jenkins will set $RELEASE as a parameter in the job configuration. +if $RELEASE ; then + # This is a formal release. Sign it with the release key. + export GNUPGHOME=/home/jenkins-build/build/gnupg.ceph-release/ + export KEYID=17ED316D +else + # This is an automatic build. Sign it with the autobuild key. + export GNUPGHOME=/home/jenkins-build/build/gnupg.autobuild/ + export KEYID=03C3951A +fi + +HOST=$(hostname --short) +echo "Building on ${HOST}" +echo " DIST=${DIST}" +echo " BPTAG=${BPTAG}" +echo " KEYID=${KEYID}" +echo " WS=$WORKSPACE" +echo " PWD=$(pwd)" +echo " BRANCH=$BRANCH" + +case $HOST in +gitbuilder-*-rpm*) + rm -rf rpm-repo dist/* build/rpmbuild + ./scripts/build-rpm.sh --release + if [ $? -eq 0 ] ; then + cd $WORKSPACE + mkdir -p dist + fi + ;; +gitbuilder-cdep-deb* | tala* | mira*) + rm -rf debian-repo + rm -rf dist + rm -f ../*.changes ../*.dsc ../*.gz ../*.diff + ./scripts/build-debian.sh --release + if [ $? -eq 0 ] ; then + cd $WORKSPACE + mkdir -p dist + mv ../*.changes ../*.dsc ../*.deb ../*.tar.gz dist/. + fi + ;; +*) + echo "Can't determine build host type" + exit 4 + ;; +esac diff --git a/scripts/jenkins-pull-requests-build b/scripts/jenkins-pull-requests-build new file mode 100644 index 0000000..24f2867 --- /dev/null +++ b/scripts/jenkins-pull-requests-build @@ -0,0 +1,14 @@ +#!/bin/sh + +set -x +set -e + +# This is the script that runs inside Jenkins for each pull request. +# http://jenkins.ceph.com/job/ceph-deploy-pull-requests/ + +virtualenv --version +virtualenv venv +. venv/bin/activate +#venv/bin/python setup.py develop +venv/bin/pip install tox +venv/bin/tox -rv diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..b9f9db4 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[tool:pytest] +norecursedirs = .* _* virtualenv diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..1a465b9 --- /dev/null +++ b/setup.py @@ -0,0 +1,68 @@ +from setuptools import setup, find_packages +import os +import sys +import ceph_deploy + + +def read(fname): + path = os.path.join(os.path.dirname(__file__), fname) + f = open(path) + return f.read() + + +install_requires = ['remoto'] +pyversion = sys.version_info[:2] +if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1): + install_requires.append('argparse') + + +setup( + name='ceph-deploy', + version=ceph_deploy.__version__, + packages=find_packages(), + + author='Inktank', + author_email='ceph-devel@vger.kernel.org', + description='Deploy Ceph with minimal infrastructure', + long_description=read('README.rst'), + license='MIT', + keywords='ceph deploy', + url="https://github.com/ceph/ceph-deploy", + + install_requires=[ + 'setuptools', + ] + install_requires, + + tests_require=[ + 'pytest >=2.1.3', + 'mock >=1.0b1', + ], + + entry_points={ + + 'console_scripts': [ + 'ceph-deploy = ceph_deploy.cli:main', + ], + + 'ceph_deploy.cli': [ + 'new = ceph_deploy.new:make', + 'install = ceph_deploy.install:make', + 'uninstall = ceph_deploy.install:make_uninstall', + 'purge = ceph_deploy.install:make_purge', + 'purgedata = ceph_deploy.install:make_purge_data', + 'mon = ceph_deploy.mon:make', + 'gatherkeys = ceph_deploy.gatherkeys:make', + 'osd = ceph_deploy.osd:make', + 'disk = ceph_deploy.osd:make_disk', + 'mds = ceph_deploy.mds:make', + 'mgr = ceph_deploy.mgr:make', + 'forgetkeys = ceph_deploy.forgetkeys:make', + 'config = ceph_deploy.config:make', + 'admin = ceph_deploy.admin:make', + 'pkg = ceph_deploy.pkg:make', + 'rgw = ceph_deploy.rgw:make', + 'repo = ceph_deploy.repo:make', + ], + + }, + ) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..6ec796f --- /dev/null +++ b/tox.ini @@ -0,0 +1,33 @@ +[tox] +envlist = py27, py35, py36, flake8 +skip_missing_interpreters = true + +[testenv] +deps= + pytest + mock==1.0.1 +setenv = + CEPH_DEPLOY_TEST = 1 +commands=py.test -v {posargs:ceph_deploy/tests} + +[testenv:docs] +basepython=python +changedir=docs/source +deps=sphinx +commands= + sphinx-build -W -b html -d {envtmpdir}/doctrees . {envtmpdir}/html + +[testenv:flake8] +deps=flake8 +commands=flake8 --select=F,E9 --exclude=vendor {posargs:ceph_deploy} + +# Note that ``remoto`` is not added as a dependency here as it is assumed +# that the tester will have the distro version of remoto installed + +[testenv:py26-novendor] +sitepackages=True +deps= + +[testenv:py27-novendor] +sitepackages=True +deps= diff --git a/vendor.py b/vendor.py new file mode 100644 index 0000000..9d49efa --- /dev/null +++ b/vendor.py @@ -0,0 +1,112 @@ +from __future__ import print_function + +import subprocess +import os +from os import path +import re +import traceback +import sys + + +error_msg = """ +This library depends on sources fetched when packaging that failed to be +retrieved. + +This means that it will *not* work as expected. Errors encountered: +""" + + +def run(cmd): + print('[vendoring] Running command: %s' % ' '.join(cmd)) + try: + result = subprocess.Popen( + cmd, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE + ) + except Exception: + # if building with python2.5 this makes it compatible + _, error, _ = sys.exc_info() + print_error([], traceback.format_exc(error).split('\n')) + raise SystemExit(1) + + if result.wait(): + print_error(result.stdout.readlines(), result.stderr.readlines()) + + return result.returncode + + +def print_error(stdout, stderr): + print('*'*80) + print(error_msg) + for line in stdout: + print(line) + for line in stderr: + print(line) + print('*'*80) + + +def vendor_library(name, version, cmd=None): + this_dir = path.dirname(path.abspath(__file__)) + vendor_dest = path.join(this_dir, 'ceph_deploy/lib/vendor/%s' % name) + vendor_init = path.join(vendor_dest, '__init__.py') + vendor_src = path.join(this_dir, name) + vendor_module = path.join(vendor_src, name) + current_dir = os.getcwd() + + if path.exists(vendor_src): + run(['rm', '-rf', vendor_src]) + + if path.exists(vendor_init): + # The following read/regex is done so that we can parse module metadata without the need + # to import it. Module metadata is defined as variables with double underscores. We are + # particularly insteresting in the version string, so we look into single or double quoted + # values, like: __version__ = '1.0' + module_file = open(vendor_init).read() + metadata = dict(re.findall(r"__([a-z]+)__\s*=\s*['\"]([^'\"]*)['\"]", module_file)) + if metadata.get('version') != version: + run(['rm', '-rf', vendor_dest]) + + if not path.exists(vendor_dest): + rc = run(['git', 'clone', 'git://git.ceph.com/%s' % name]) + if rc: + print("%s: git clone failed using ceph.com url with rc %s, trying github.com" % (path.basename(__file__), rc)) + run(['git', 'clone', 'https://github.com/ceph/%s.git' % name]) + os.chdir(vendor_src) + run(['git', 'checkout', version]) + if cmd: + run(cmd) + run(['mv', vendor_module, vendor_dest]) + os.chdir(current_dir) + + +def clean_vendor(name): + """ + Ensure that vendored code/dirs are removed, possibly when packaging when + the environment flag is set to avoid vendoring. + """ + this_dir = path.dirname(path.abspath(__file__)) + vendor_dest = path.join(this_dir, 'ceph_deploy/lib/vendor/%s' % name) + run(['rm', '-rf', vendor_dest]) + + +def vendorize(vendor_requirements): + """ + This is the main entry point for vendorizing requirements. It expects + a list of tuples that should contain the name of the library and the + version. + + For example, a library ``foo`` with version ``0.0.1`` would look like:: + + vendor_requirements = [ + ('foo', '0.0.1'), + ] + """ + + for library in vendor_requirements: + if len(library) == 2: + name, version = library + cmd = None + elif len(library) == 3: # a possible cmd we need to run + name, version, cmd = library + vendor_library(name, version, cmd)