]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-deploy.git/commitdiff
remove remote_compile decorator
authorAlfredo Deza <alfredo.deza@inktank.com>
Mon, 14 Oct 2013 19:30:25 +0000 (15:30 -0400)
committerAlfredo Deza <alfredo.deza@inktank.com>
Tue, 15 Oct 2013 12:51:58 +0000 (08:51 -0400)
Signed-off-by: Alfredo Deza <alfredo.deza@inktank.com>
93 files changed:
.gitignore [new file with mode: 0644]
CHANGELOG.rst [new file with mode: 0644]
LICENSE [new file with mode: 0644]
MANIFEST.in [new file with mode: 0644]
README.rst [new file with mode: 0644]
bootstrap [new file with mode: 0755]
ceph-deploy.spec [new file with mode: 0644]
ceph_deploy/__init__.py [new file with mode: 0644]
ceph_deploy/admin.py [new file with mode: 0644]
ceph_deploy/cli.py [new file with mode: 0644]
ceph_deploy/cliutil.py [new file with mode: 0644]
ceph_deploy/conf.py [new file with mode: 0644]
ceph_deploy/config.py [new file with mode: 0644]
ceph_deploy/connection.py [new file with mode: 0644]
ceph_deploy/exc.py [new file with mode: 0644]
ceph_deploy/forgetkeys.py [new file with mode: 0644]
ceph_deploy/gatherkeys.py [new file with mode: 0644]
ceph_deploy/hosts/__init__.py [new file with mode: 0644]
ceph_deploy/hosts/centos/__init__.py [new file with mode: 0644]
ceph_deploy/hosts/centos/install.py [new file with mode: 0644]
ceph_deploy/hosts/centos/mon/__init__.py [new file with mode: 0644]
ceph_deploy/hosts/centos/mon/create.py [new file with mode: 0644]
ceph_deploy/hosts/centos/uninstall.py [new file with mode: 0644]
ceph_deploy/hosts/common.py [new file with mode: 0644]
ceph_deploy/hosts/debian/__init__.py [new file with mode: 0644]
ceph_deploy/hosts/debian/install.py [new file with mode: 0644]
ceph_deploy/hosts/debian/mon/__init__.py [new file with mode: 0644]
ceph_deploy/hosts/debian/mon/create.py [new file with mode: 0644]
ceph_deploy/hosts/debian/uninstall.py [new file with mode: 0644]
ceph_deploy/hosts/fedora/__init__.py [new file with mode: 0644]
ceph_deploy/hosts/fedora/install.py [new file with mode: 0644]
ceph_deploy/hosts/fedora/mon/__init__.py [new file with mode: 0644]
ceph_deploy/hosts/fedora/mon/create.py [new file with mode: 0644]
ceph_deploy/hosts/fedora/uninstall.py [new file with mode: 0644]
ceph_deploy/hosts/remotes.py [new file with mode: 0644]
ceph_deploy/hosts/suse/__init__.py [new file with mode: 0644]
ceph_deploy/hosts/suse/install.py [new file with mode: 0644]
ceph_deploy/hosts/suse/mon/__init__.py [new file with mode: 0644]
ceph_deploy/hosts/suse/mon/create.py [new file with mode: 0644]
ceph_deploy/hosts/suse/uninstall.py [new file with mode: 0644]
ceph_deploy/install.py [new file with mode: 0644]
ceph_deploy/lib/__init__.py [new file with mode: 0644]
ceph_deploy/lsb.py [new file with mode: 0644]
ceph_deploy/mds.py [new file with mode: 0644]
ceph_deploy/memoize.py [new file with mode: 0644]
ceph_deploy/misc.py [new file with mode: 0644]
ceph_deploy/mon.py [new file with mode: 0644]
ceph_deploy/new.py [new file with mode: 0644]
ceph_deploy/osd.py [new file with mode: 0644]
ceph_deploy/sudo_pushy.py [new file with mode: 0644]
ceph_deploy/tests/__init__.py [new file with mode: 0644]
ceph_deploy/tests/conftest.py [new file with mode: 0644]
ceph_deploy/tests/directory.py [new file with mode: 0644]
ceph_deploy/tests/fakes.py [new file with mode: 0644]
ceph_deploy/tests/test_cli.py [new file with mode: 0644]
ceph_deploy/tests/test_cli_install.py [new file with mode: 0644]
ceph_deploy/tests/test_cli_mon.py [new file with mode: 0644]
ceph_deploy/tests/test_cli_new.py [new file with mode: 0644]
ceph_deploy/tests/test_cli_osd.py [new file with mode: 0644]
ceph_deploy/tests/test_conf.py [new file with mode: 0644]
ceph_deploy/tests/test_mon.py [new file with mode: 0644]
ceph_deploy/tests/unit/hosts/test_hosts.py [new file with mode: 0644]
ceph_deploy/tests/unit/test_mon.py [new file with mode: 0644]
ceph_deploy/tests/unit/util/test_arg_validators.py [new file with mode: 0644]
ceph_deploy/tests/unit/util/test_constants.py [new file with mode: 0644]
ceph_deploy/tests/unit/util/test_paths.py [new file with mode: 0644]
ceph_deploy/tests/unit/util/test_pkg_managers.py [new file with mode: 0644]
ceph_deploy/util/__init__.py [new file with mode: 0644]
ceph_deploy/util/arg_validators.py [new file with mode: 0644]
ceph_deploy/util/constants.py [new file with mode: 0644]
ceph_deploy/util/decorators.py [new file with mode: 0644]
ceph_deploy/util/log.py [new file with mode: 0644]
ceph_deploy/util/paths/__init__.py [new file with mode: 0644]
ceph_deploy/util/paths/mon.py [new file with mode: 0644]
ceph_deploy/util/pkg_managers.py [new file with mode: 0644]
ceph_deploy/util/wrappers.py [new file with mode: 0644]
ceph_deploy/validate.py [new file with mode: 0644]
debian/ceph-deploy.install [new file with mode: 0644]
debian/changelog [new file with mode: 0644]
debian/compat [new file with mode: 0644]
debian/control [new file with mode: 0644]
debian/copyright [new file with mode: 0644]
debian/rules [new file with mode: 0755]
debian/source/format [new file with mode: 0644]
requirements-dev.txt [new file with mode: 0644]
requirements.txt [new file with mode: 0644]
scripts/build-debian.sh [new file with mode: 0755]
scripts/build-rpm.sh [new file with mode: 0755]
scripts/ceph-deploy [new file with mode: 0755]
setup.cfg [new file with mode: 0644]
setup.py [new file with mode: 0644]
tox.ini [new file with mode: 0644]
vendor.py [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..9594060
--- /dev/null
@@ -0,0 +1,18 @@
+*~
+.#*
+## the next line needs to start with a backslash to avoid looking like
+## a comment
+\#*#
+.*.swp
+
+*.pyc
+*.pyo
+*.egg-info
+/build
+/dist
+
+/virtualenv
+/.tox
+
+/ceph-deploy
+/*.conf
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644 (file)
index 0000000..4449fee
--- /dev/null
@@ -0,0 +1,88 @@
+
+1.2.7
+-----
+* Ensure local calls to ceph-deploy do not attempt to ssh.
+* ``mon create-initial`` command to deploy all defined mons, wait for them to
+  form quorum and finally to gatherkeys.
+* Improve help menu for mon commands.
+* Add ``--fs-type`` option to ``disk`` and ``osd`` commands (Thanks Benoit
+  Knecht)
+* Make sure we are using ``--cluster`` for remote configs when starting ceph
+* Fix broken ``mon destroy`` calls using the new hostname resolution helper
+* Add a helper to catch common monitor errors (reporting the status of a mon)
+* Normalize all configuration options in ceph-deploy (Thanks Andrew Woodward)
+* Use a ``cuttlefish`` compatible ``mon_status`` command
+* Make ``osd activate`` use the new remote connection libraries for improved
+  readability.
+* Make ``disk zap`` also use the new remote connection libraries.
+* Handle any connection errors that may came up when attempting to get into
+  remote hosts.
+
+1.2.6
+-----
+* Fixes a problem witha closed connection for Debian distros when creating
+  a mon.
+
+1.2.5
+-----
+* Fix yet another hanging problem when starting monitors. Closing the
+  connection now before we even start them.
+
+1.2.4
+-----
+* Improve ``osd help`` menu with path information
+* Really discourage the use of ``ceph-deploy new [IP]``
+* Fix hanging remote requests
+* Add ``mon status`` output when creating monitors
+* Fix Debian install issue (wrong parameter order) (Thanks Sayid Munawar)
+* ``osd`` commands will be more verbose when deploying them
+* Issue a warning when provided hosts do not match ``hostname -s`` remotely
+* Create two flags for altering/not-altering source repos at install time:
+  ``--adjust-repos`` and ``--no-adjust-repos``
+* Do not do any ``sudo`` commands if user is root
+* Use ``mon status`` for every ``mon`` deployment and detect problems with
+  monitors.
+* Allow to specify ``host:fqdn/ip`` for all mon commands (Thanks Dmitry
+  Borodaenko)
+* Be consistent for hostname detection (Thanks Dmitry Borodaenko)
+* Fix hanging problem on remote hosts
+
+1.2.3
+-----
+* Fix non-working ``disk list``
+* ``check_call`` utility fixes ``$PATH`` issues.
+* Use proper exit codes from the ``main()`` CLI function
+* Do not error when attempting to add the EPEL repos.
+* Do not complain when using IP:HOST pairs
+* Report nicely when ``HOST:DISK`` is not used when zapping.
+
+1.2.2
+-----
+* Do not force usage of lsb_release, fallback to
+  ``platform.linux_distribution()``
+* Ease installation in CentOS/Scientific by adding the EPEL repo
+  before attempting to install Ceph.
+* Graceful handling of pushy connection issues due to host
+  address resolution
+* Honor the usage of ``--cluster`` when calling osd prepare.
+
+1.2.1
+-----
+* Print the help when no arguments are passed
+* Add a ``--version`` flag
+* Show the version in the help menu
+* Catch ``DeployError`` exceptions nicely with the logger
+* Fix blocked command when calling ``mon create``
+* default to ``dumpling`` for installs
+* halt execution on remote exceptions
+
+
+1.2
+---
+* Better logging output
+* Remote logging for individual actions for ``install`` and ``mon create``
+* Install ``ca-certificates`` on all Debian-based distros
+* Honor the usage of ``--cluster``
+* Do not ``rm -rf`` monitor logs when destroying
+* Error out when ``ceph-deploy new [IP]`` is used
+* Log the ceph version when installing
diff --git a/LICENSE b/LICENSE
new file mode 100644 (file)
index 0000000..26624cf
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2012 Inktank Storage, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644 (file)
index 0000000..22710c1
--- /dev/null
@@ -0,0 +1,5 @@
+include *.rst
+include LICENSE
+include scripts/ceph-deploy
+include vendor.py
+prune ceph_deploy/test
diff --git a/README.rst b/README.rst
new file mode 100644 (file)
index 0000000..9ae83f4
--- /dev/null
@@ -0,0 +1,311 @@
+========================================================
+ ceph-deploy -- Deploy Ceph with minimal infrastructure
+========================================================
+
+``ceph-deploy`` is a way to deploy Ceph relying on just SSH access to
+the servers, ``sudo``, and some Python. It runs fully on your
+workstation, requiring no servers, databases, or anything like that.
+
+If you set up and tear down Ceph clusters a lot, and want minimal
+extra bureaucracy, this is for you.
+
+.. _what this tool is not:
+
+What this tool is not
+---------------------
+It is not a generic deployment system, it is only for Ceph, and is designed
+for users who want to quickly get Ceph running with sensible initial settings
+without the overhead of installing Chef, Puppet or Juju.
+
+It does not handle client configuration beyond pushing the Ceph config file
+and users who want fine-control over security settings, partitions or directory
+locations should use a tool such as Chef or Puppet.
+
+
+Installation
+============
+Depending on what type of usage you are going to have with ``ceph-deploy`` you
+might want to look into the different ways to install it. For automation, you
+might want to ``bootstrap`` directly. Regular users of ``ceph-deploy`` would
+probably install from the OS packages or from the Python Package Index.
+
+Python Package Index
+--------------------
+If you are familiar with Python install tools (like ``pip`` and
+``easy_install``) you can easily install ``ceph-deploy`` like::
+
+    pip install ceph-deploy
+
+or::
+
+    easy_install ceph-deploy
+
+
+It should grab all the dependencies for you and install into the current user's
+environment.
+
+We highly recommend using ``virtualenv`` and installing dependencies in
+a contained way.
+
+
+DEB
+---
+The DEB repo can be found at http://ceph.com/packages/ceph-extras/debian/
+
+But they can also be found for ``ceph`` releases in the ``ceph`` repos like::
+
+     ceph.com/debian-{release}
+     ceph.com/debian-testing
+
+RPM
+---
+The RPM repos can be found at http://ceph.com/packages/ceph-extras/rpm/
+
+Make sure you add the proper one for your distribution.
+
+But they can also be found for ``ceph`` releases in the ``ceph`` repos like::
+
+     ceph.com/rpm-{release}
+     ceph.com/rpm-testing
+
+
+bootstraping
+------------
+To get the source tree ready for use, run this once::
+
+  ./bootstrap
+
+You can symlink the ``ceph-deploy`` script in this somewhere
+convenient (like ``~/bin``), or add the current directory to ``PATH``,
+or just always type the full path to ``ceph-deploy``.
+
+ceph-deploy at a minimum requires that the machine from which the script is
+being run can ssh as root without password into each Ceph node.
+
+To enable this generate a new ssh keypair for the root user with no passphrase
+and place the public key (``id_rsa.pub`` or ``id_dsa.pub``) in::
+
+    /root/.ssh/authorized_keys
+
+and ensure that the following lines are in the sshd config::
+
+    PermitRootLogin yes
+    PermitEmptyPasswords yes
+
+The machine running ceph-deploy does not need to have the Ceph packages installed
+unless it needs to admin the cluster directly using the ``ceph`` command line tool.
+
+Managing an existing cluster
+============================
+
+You can use ceph-deploy to provision nodes for an existing cluster.
+To grab a copy of the cluster configuration file (normally
+``ceph.conf``)::
+
+ ceph-deploy config pull HOST
+
+You will usually also want to gather the encryption keys used for that
+cluster::
+
+    ceph-deploy gatherkeys MONHOST
+
+At this point you can skip the steps below that create a new cluster
+(you already have one) and optionally skip instalation and/or monitor
+creation, depending on what you are trying to accomplish.
+
+
+Creating a new cluster
+======================
+
+Creating a new configuration
+----------------------------
+
+To create a new configuration file and secret key, decide what hosts
+will run ``ceph-mon``, and run::
+
+  ceph-deploy new MON [MON..]
+
+listing the hostnames of the monitors.  Each ``MON`` can be
+
+ * a simple hostname.  It must be DNS resolvable without the fully
+   qualified domain name.
+ * a fully qualified domain name.  The hostname is assumed to be the
+   leading component up to the first ``.``.
+ * a ``HOST:FQDN`` pair, of both the hostname and a fully qualified
+   domain name or IP address.  For example, ``foo``,
+   ``foo.example.com``, ``foo:something.example.com``, and
+   ``foo:1.2.3.4`` are all valid.  Note, however, that the hostname
+   should match that configured on the host ``foo``.
+
+The above will create a ``ceph.conf`` and ``ceph.mon.keyring`` in your
+current directory.
+
+
+Edit initial cluster configuration
+----------------------------------
+
+You want to review the generated ``ceph.conf`` file and make sure that
+the ``mon_host`` setting contains the IP addresses you would like the
+monitors to bind to.  These are the IPs that clients will initially
+contact to authenticate to the cluster, and they need to be reachable
+both by external client-facing hosts and internal cluster daemons.
+
+Installing packages
+===================
+
+To install the Ceph software on the servers, run::
+
+  ceph-deploy install HOST [HOST..]
+
+This installs the current default *stable* release. You can choose a
+different release track with command line options, for example to use
+a release candidate::
+
+  ceph-deploy install --testing HOST
+
+Or to test a development branch::
+
+  ceph-deploy install --dev=wip-mds-now-works-no-kidding HOST [HOST..]
+
+
+Proxy or Firewall Installs
+--------------------------
+If attempting to install behind a firewall or through a proxy you can
+use the ``--no-adjust-repos`` that will tell ceph-deploy to skip any changes
+to the distro's repository in order to install the packages and it will go
+straight to package installation.
+
+That will allow an environment without internet access to point to *its own
+repositories*. This means that those repositories will need to be properly
+setup (and mirrored with all the necessary dependencies) before attempting an
+install.
+
+Another alternative is to set the `wget` env variables to point to the right
+hosts, for example::
+
+    http_proxy=http://host:port
+    ftp_proxy=http://host:port
+    https_proxy=http://host:port
+
+
+
+Deploying monitors
+==================
+
+To actually deploy ``ceph-mon`` to the hosts you chose, run::
+
+  ceph-deploy mon create HOST [HOST..]
+
+Without explicit hosts listed, hosts in ``mon_initial_members`` in the
+config file are deployed. That is, the hosts you passed to
+``ceph-deploy new`` are the default value here.
+
+Gather keys
+===========
+
+To gather authenticate keys (for administering the cluster and
+bootstrapping new nodes) to the local directory, run::
+
+  ceph-deploy gatherkeys HOST [HOST...]
+
+where ``HOST`` is one of the monitor hosts.
+
+Once these keys are in the local directory, you can provision new OSDs etc.
+
+
+Deploying OSDs
+==============
+
+To prepare a node for running OSDs, run::
+
+  ceph-deploy osd create HOST:DISK[:JOURNAL] [HOST:DISK[:JOURNAL] ...]
+
+After that, the hosts will be running OSDs for the given data disks.
+If you specify a raw disk (e.g., ``/dev/sdb``), partitions will be
+created and GPT labels will be used to mark and automatically activate
+OSD volumes.  If an existing partition is specified, the partition
+table will not be modified.  If you want to destroy the existing
+partition table on DISK first, you can include the ``--zap-disk``
+option.
+
+If there is already a prepared disk or directory that is ready to become an
+OSD, you can also do::
+
+    ceph-deploy osd activate HOST:DIR[:JOURNAL] [...]
+
+This is useful when you are managing the mounting of volumes yourself.
+
+
+Admin hosts
+===========
+
+To prepare a host with a ``ceph.conf`` and ``ceph.client.admin.keyring``
+keyring so that it can administer the cluster, run::
+
+  ceph-deploy admin HOST [HOST ...]
+
+Forget keys
+===========
+
+The ``new`` and ``gatherkeys`` put some Ceph authentication keys in keyrings in
+the local directory.  If you are worried about them being there for security
+reasons, run::
+
+  ceph-deploy forgetkeys
+
+and they will be removed.  If you need them again later to deploy additional
+nodes, simply re-run::
+
+  ceph-deploy gatherkeys HOST [HOST...]
+
+and they will be retrieved from an existing monitor node.
+
+Multiple clusters
+=================
+
+All of the above commands take a ``--cluster=NAME`` option, allowing
+you to manage multiple clusters conveniently from one workstation.
+For example::
+
+  ceph-deploy --cluster=us-west new
+  vi us-west.conf
+  ceph-deploy --cluster=us-west mon
+
+FAQ
+===
+
+Before anything
+---------------
+Make sure you have the latest version of ``ceph-deploy``. It is actively
+developed and releases are coming weekly (on average). The most recent versions
+of ``ceph-deploy`` will have a ``--version`` flag you can use, otherwise check
+with your package manager and update if there is anything new.
+
+Why is feature X not implemented?
+---------------------------------
+Usually, features are added when/if it is sensible for someone that wants to
+get started with ceph and said feature would make sense in that context.  If
+you believe this is the case and you've read "`what this tool is not`_" and
+still think feature ``X`` should exist in ceph-deploy, open a feature request
+in the ceph tracker: http://tracker.ceph.com/projects/devops/issues
+
+A command gave me an error, what is going on?
+---------------------------------------------
+Most of the commands for ``ceph-deploy`` are meant to be run remotely in a host
+that you have configured when creating the initial config. If a given command
+is not working as expected try to run the command that failed in the remote
+host and assert the behavior there.
+
+If the behavior in the remote host is the same, then it is probably not
+something wrong with ``ceph-deploy`` per-se. Make sure you capture the output
+of both the ``ceph-deploy`` output and the output of the command in the remote
+host.
+
+Issues with monitors
+--------------------
+If your monitors are not starting, make sure that the ``{hostname}`` you used
+when you ran ``ceph-deploy mon create {hostname}`` match the actual ``hostname -s``
+in the remote host.
+
+Newer versions of ``ceph-deploy`` should warn you if the results are different
+but that might prevent the monitors from reaching quorum.
diff --git a/bootstrap b/bootstrap
new file mode 100755 (executable)
index 0000000..55def90
--- /dev/null
+++ b/bootstrap
@@ -0,0 +1,58 @@
+#!/bin/sh
+set -e
+
+if command -v lsb_release >/dev/null 2>&1; then
+    case "$(lsb_release --id --short)" in
+       Ubuntu|Debian)
+           for package in python-virtualenv; do
+               if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then
+                    # add a space after old values
+                   missing="${missing:+$missing }$package"
+               fi
+           done
+           if [ -n "$missing" ]; then
+                       echo "$0: missing required packages, please install them:" 1>&2
+                       echo "  sudo apt-get install $missing"
+                       exit 1
+           fi
+           ;;
+    esac
+
+    case "$(lsb_release --id --short | awk '{print $1}')" in
+       openSUSE|SUSE)
+       for package in python-virtualenv; do
+               if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
+                       missing="${missing:+$missing }$package"
+               fi
+       done
+       if [ -n "$missing" ]; then
+               echo "$0: missing required packages, please install them:" 1>&2
+               echo "  sudo zypper install $missing"
+               exit 1
+       fi
+       ;;
+    esac
+
+else
+       if [ -f /etc/redhat-release ]; then
+               case "$(cat /etc/redhat-release | awk '{print $1}')" in
+                       CentOS)
+                               for package in python-virtualenv; do
+                               if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
+                                       missing="${missing:+$missing }$package"
+                               fi
+                               done
+                               if [ -n "$missing" ]; then
+                                       echo "$0: missing required packages, please install them:" 1>&2
+                                       echo "  sudo yum install $missing"
+                               exit 1
+                               fi
+                               ;;
+               esac
+       fi
+fi
+
+test -d virtualenv || virtualenv virtualenv
+./virtualenv/bin/python setup.py develop
+./virtualenv/bin/pip install -r requirements.txt -r requirements-dev.txt
+test -e ceph-deploy || ln -s virtualenv/bin/ceph-deploy .
diff --git a/ceph-deploy.spec b/ceph-deploy.spec
new file mode 100644 (file)
index 0000000..f79f826
--- /dev/null
@@ -0,0 +1,80 @@
+#
+# spec file for package ceph-deploy
+#
+
+%if ! (0%{?fedora} > 12 || 0%{?rhel} > 5)
+%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
+%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
+%endif
+
+#################################################################################
+# common
+#################################################################################
+Name:          ceph-deploy
+Version:       1.2.7
+Release:       0
+Summary:       Admin and deploy tool for Ceph
+License:       MIT
+Group:         System/Filesystems
+URL:           http://ceph.com/
+Source0:       %{name}-%{version}.tar.bz2
+BuildRoot:      %{_tmppath}/%{name}-%{version}-build
+BuildRequires:  python-devel
+BuildRequires:  python-distribute
+BuildRequires: python-setuptools
+BuildRequires: python-virtualenv
+BuildRequires:  python-mock
+BuildRequires:  python-tox
+%if 0%{?suse_version}
+BuildRequires: python-pytest
+%else
+BuildRequires: pytest
+%endif
+BuildRequires:  git
+Requires:       python-argparse
+Requires:       pushy >= 0.5.3
+Requires:       python-distribute
+#Requires:      lsb-release
+#Requires:      ceph
+%if 0%{?suse_version} && 0%{?suse_version} <= 1110
+%{!?python_sitelib: %global python_sitelib %(python -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
+%else
+BuildArch:      noarch
+%endif
+
+#################################################################################
+# specific
+#################################################################################
+%if 0%{defined suse_version}
+%py_requires
+%endif
+
+%if 0%{?rhel}
+BuildRequires:         python >= %{pyver}
+Requires:      python >= %{pyver}
+%endif
+
+%description
+An easy to use admin tool for deploy ceph storage clusters.
+
+%prep
+#%setup -q -n %{name}
+%setup -q
+
+%build
+#python setup.py build
+
+%install
+python setup.py install --prefix=%{_prefix} --root=%{buildroot}
+install -m 0755 -D scripts/ceph-deploy $RPM_BUILD_ROOT/usr/bin
+
+%clean
+[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf "$RPM_BUILD_ROOT"
+
+%files
+%defattr(-,root,root)
+%doc LICENSE README.rst 
+%{_bindir}/ceph-deploy
+%{python_sitelib}/*
+
+%changelog
diff --git a/ceph_deploy/__init__.py b/ceph_deploy/__init__.py
new file mode 100644 (file)
index 0000000..35d767a
--- /dev/null
@@ -0,0 +1,3 @@
+
+__version__ = '1.2.7'
+
diff --git a/ceph_deploy/admin.py b/ceph_deploy/admin.py
new file mode 100644 (file)
index 0000000..36d2a4d
--- /dev/null
@@ -0,0 +1,66 @@
+import logging
+
+from cStringIO import StringIO
+
+from . import exc
+from . import conf
+from .cliutil import priority
+from . import hosts
+
+LOG = logging.getLogger(__name__)
+
+
+def admin(args):
+    cfg = conf.load(args)
+    conf_data = StringIO()
+    cfg.write(conf_data)
+
+    try:
+        with file('%s.client.admin.keyring' % args.cluster, 'rb') as f:
+            keyring = f.read()
+    except:
+        raise RuntimeError('%s.client.admin.keyring not found' %
+                           args.cluster)
+
+    errors = 0
+    for hostname in args.client:
+        LOG.debug('Pushing admin keys and conf to %s', hostname)
+        try:
+            distro = hosts.get(hostname)
+            hostname = distro.conn.remote_module.shortname()
+
+            distro.conn.remote_module.write_conf(
+                args.cluster,
+                conf_data.getvalue(),
+                args.overwrite_conf,
+            )
+
+            distro.conn.remote_module.write_file(
+                '/etc/ceph/%s.client.admin.keyring' % args.cluster,
+                keyring
+            )
+
+            distro.conn.exit()
+
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to configure %d admin hosts' % errors)
+
+
+@priority(70)
+def make(parser):
+    """
+    Push configuration and client.admin key to a remote host.
+    """
+    parser.add_argument(
+        'client',
+        metavar='HOST',
+        nargs='*',
+        help='host to configure for ceph administration',
+        )
+    parser.set_defaults(
+        func=admin,
+        )
diff --git a/ceph_deploy/cli.py b/ceph_deploy/cli.py
new file mode 100644 (file)
index 0000000..54d9426
--- /dev/null
@@ -0,0 +1,134 @@
+import pkg_resources
+import argparse
+import logging
+import textwrap
+import sys
+from string import join
+
+import ceph_deploy
+from . import exc
+from . import validate
+from .util import log
+from .util.decorators import catches
+
+LOG = logging.getLogger(__name__)
+
+
+__header__ = textwrap.dedent("""
+    -^-
+   /   \\
+   |O o|  ceph-deploy v%s
+   ).-.(
+  '/|||\`
+  | '|` |
+    '|`
+""" % ceph_deploy.__version__)
+
+
+def get_parser():
+    parser = argparse.ArgumentParser(
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+        description='Easy Ceph deployment\n\n%s' % __header__,
+        )
+    verbosity = parser.add_mutually_exclusive_group(required=False)
+    verbosity.add_argument(
+        '-v', '--verbose',
+        action='store_true', dest='verbose', default=False,
+        help='be more verbose',
+        )
+    verbosity.add_argument(
+        '-q', '--quiet',
+        action='store_true', dest='quiet',
+        help='be less verbose',
+        )
+    parser.add_argument(
+        '-n', '--dry-run',
+        action='store_true', dest='dry_run',
+        help='do not perform any action, but report what would be done',
+        )
+    parser.add_argument(
+        '--version',
+        action='version',
+        version='%s' % ceph_deploy.__version__,
+        help='the current installed version of ceph-deploy',
+        )
+    parser.add_argument(
+        '--overwrite-conf',
+        action='store_true',
+        help='overwrite an existing conf file on remote host (if present)',
+        )
+    parser.add_argument(
+        '--cluster',
+        metavar='NAME',
+        help='name of the cluster',
+        type=validate.alphanumeric,
+        )
+    sub = parser.add_subparsers(
+        title='commands',
+        metavar='COMMAND',
+        help='description',
+        )
+    entry_points = [
+        (ep.name, ep.load())
+        for ep in pkg_resources.iter_entry_points('ceph_deploy.cli')
+        ]
+    entry_points.sort(
+        key=lambda (name, fn): getattr(fn, 'priority', 100),
+        )
+    for (name, fn) in entry_points:
+        p = sub.add_parser(
+            name,
+            description=fn.__doc__,
+            help=fn.__doc__,
+            )
+        # ugly kludge but i really want to have a nice way to access
+        # the program name, with subcommand, later
+        p.set_defaults(prog=p.prog)
+        fn(p)
+    parser.set_defaults(
+        # we want to hold on to this, for later
+        prog=parser.prog,
+        cluster='ceph',
+        )
+    return parser
+
+
+@catches((KeyboardInterrupt, RuntimeError, exc.DeployError,))
+def main(args=None, namespace=None):
+    parser = get_parser()
+
+    if len(sys.argv) < 2:
+        parser.print_help()
+        sys.exit()
+    else:
+        args = parser.parse_args(args=args, namespace=namespace)
+
+    console_loglevel = logging.DEBUG  # start at DEBUG for now
+    if args.quiet:
+        console_loglevel = logging.WARNING
+    if args.verbose:
+        console_loglevel = logging.DEBUG
+
+    # Console Logger
+    sh = logging.StreamHandler()
+    sh.setFormatter(log.color_format())
+    sh.setLevel(console_loglevel)
+
+    # File Logger
+    fh = logging.FileHandler('{cluster}.log'.format(cluster=args.cluster))
+    fh.setLevel(logging.DEBUG)
+    fh.setFormatter(logging.Formatter(log.BASE_FORMAT))
+
+    # because we're in a module already, __name__ is not the ancestor of
+    # the rest of the package; use the root as the logger for everyone
+    root_logger = logging.getLogger()
+
+    # allow all levels at root_logger, handlers control individual levels
+    root_logger.setLevel(logging.DEBUG)
+
+    root_logger.addHandler(sh)
+    root_logger.addHandler(fh)
+
+    LOG.info("Invoked (%s): %s" %(ceph_deploy.__version__,
+                                  join(sys.argv, " ")))
+    return args.func(args)
diff --git a/ceph_deploy/cliutil.py b/ceph_deploy/cliutil.py
new file mode 100644 (file)
index 0000000..d273f31
--- /dev/null
@@ -0,0 +1,8 @@
+def priority(num):
+    """
+    Decorator to add a `priority` attribute to the function.
+    """
+    def add_priority(fn):
+        fn.priority = num
+        return fn
+    return add_priority
diff --git a/ceph_deploy/conf.py b/ceph_deploy/conf.py
new file mode 100644 (file)
index 0000000..63ea0ba
--- /dev/null
@@ -0,0 +1,71 @@
+import ConfigParser
+import contextlib
+
+from . import exc
+
+
+class _TrimIndentFile(object):
+    def __init__(self, fp):
+        self.fp = fp
+
+    def readline(self):
+        line = self.fp.readline()
+        return line.lstrip(' \t')
+
+
+class CephConf(ConfigParser.RawConfigParser):
+    def optionxform(self, s):
+        s = s.replace('_', ' ')
+        s = '_'.join(s.split())
+        return s
+
+    def safe_get(self, section, key):
+        """
+        Attempt to get a configuration value from a certain section
+        in a ``cfg`` object but returning None if not found. Avoids the need
+        to be doing try/except {ConfigParser Exceptions} every time.
+        """
+        try:
+            #Use full parent function so we can replace it in the class 
+            # if desired
+            return ConfigParser.RawConfigParser.get(self, section, key)
+        except (ConfigParser.NoSectionError,
+                ConfigParser.NoOptionError):
+            return None
+
+
+def parse(fp):
+    cfg = CephConf()
+    ifp = _TrimIndentFile(fp)
+    cfg.readfp(ifp)
+    return cfg
+
+
+def load(args):
+    path = '{cluster}.conf'.format(cluster=args.cluster)
+    try:
+        f = file(path)
+    except IOError as e:
+        raise exc.ConfigError(e)
+    else:
+        with contextlib.closing(f):
+            return parse(f)
+
+
+def write_conf(cluster, conf, overwrite):
+    """ write cluster configuration to /etc/ceph/{cluster}.conf """
+    import os
+
+    path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster)
+    tmp = '{path}.{pid}.tmp'.format(path=path, pid=os.getpid())
+
+    if os.path.exists(path):
+        with file(path, 'rb') as f:
+            old = f.read()
+            if old != conf and not overwrite:
+                raise RuntimeError('config file %s exists with different content; use --overwrite-conf to overwrite' % path)
+    with file(tmp, 'w') as f:
+        f.write(conf)
+        f.flush()
+        os.fsync(f)
+    os.rename(tmp, path)
diff --git a/ceph_deploy/config.py b/ceph_deploy/config.py
new file mode 100644 (file)
index 0000000..697891c
--- /dev/null
@@ -0,0 +1,105 @@
+import logging
+from cStringIO import StringIO
+import os.path
+
+from . import exc
+from . import conf
+from .cliutil import priority
+from . import hosts
+
+LOG = logging.getLogger(__name__)
+
+
+def config_push(args):
+    cfg = conf.load(args)
+    conf_data = StringIO()
+    cfg.write(conf_data)
+
+    errors = 0
+    for hostname in args.client:
+        LOG.debug('Pushing config to %s', hostname)
+        try:
+            distro = hosts.get(hostname)
+
+            distro.conn.remote_module.write_conf(
+                args.cluster,
+                conf_data.getvalue(),
+                args.overwrite_conf,
+            )
+
+            distro.conn.exit()
+
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to config %d hosts' % errors)
+
+
+def config_pull(args):
+
+    topath = '{cluster}.conf'.format(cluster=args.cluster)
+    frompath = '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster)
+
+    errors = 0
+    for hostname in args.client:
+        try:
+            LOG.debug('Checking %s for %s', hostname, frompath)
+            distro = hosts.get(hostname)
+            conf_file_contents = distro.conn.remote_module.get_file(frompath)
+
+            if conf_file_contents is not None:
+                LOG.debug('Got %s from %s', frompath, hostname)
+                if os.path.exists(topath):
+                    with file(topath, 'rb') as f:
+                        existing = f.read()
+                        if existing != conf_file_contents and not args.overwrite_conf:
+                            LOG.error('local config file %s exists with different content; use --overwrite-conf to overwrite' % topath)
+                            raise
+
+                with file(topath, 'w') as f:
+                    f.write(conf_file_contents)
+                return
+            distro.conn.exit()
+            LOG.debug('Empty or missing %s on %s', frompath, hostname)
+        except:
+            LOG.error('Unable to pull %s from %s', frompath, hostname)
+        finally:
+            errors += 1
+
+    raise exc.GenericError('Failed to fetch config from %d hosts' % errors)
+
+
+def config(args):
+    if args.subcommand == 'push':
+        config_push(args)
+    elif args.subcommand == 'pull':
+        config_pull(args)
+    else:
+        LOG.error('subcommand %s not implemented', args.subcommand)
+
+
+@priority(70)
+def make(parser):
+    """
+    Push configuration file to a remote host.
+    """
+    parser.add_argument(
+        'subcommand',
+        metavar='SUBCOMMAND',
+        choices=[
+            'push',
+            'pull',
+            ],
+        help='push or pull',
+        )
+    parser.add_argument(
+        'client',
+        metavar='HOST',
+        nargs='*',
+        help='host to push/pull the config to/from',
+        )
+    parser.set_defaults(
+        func=config,
+        )
diff --git a/ceph_deploy/connection.py b/ceph_deploy/connection.py
new file mode 100644 (file)
index 0000000..d380cbe
--- /dev/null
@@ -0,0 +1,32 @@
+import getpass
+from ceph_deploy.lib.remoto import Connection
+
+
+def get_connection(hostname, logger, threads=5):
+    """
+    A very simple helper, meant to return a connection
+    that will know about the need to use sudo.
+    """
+    try:
+        conn = Connection(
+            hostname,
+            logger=logger,
+            sudo=needs_sudo(),
+            threads=threads,
+        )
+
+        # Set a timeout value in seconds to disconnect and move on
+        # if no data is sent back.
+        conn.global_timeout = 300
+        return conn
+
+    except Exception as error:
+        msg = "connecting to host: %s " % hostname
+        errors = "resulted in errors: %s %s" % (error.__class__.__name__, error)
+        raise RuntimeError(msg + errors)
+
+
+def needs_sudo():
+    if getpass.getuser() == 'root':
+        return False
+    return True
diff --git a/ceph_deploy/exc.py b/ceph_deploy/exc.py
new file mode 100644 (file)
index 0000000..62e0eda
--- /dev/null
@@ -0,0 +1,74 @@
+class DeployError(Exception):
+    """
+    Unknown deploy error
+    """
+
+    def __str__(self):
+        doc = self.__doc__.strip()
+        return ': '.join([doc] + [str(a) for a in self.args])
+
+
+class UnableToResolveError(DeployError):
+    """
+    Unable to resolve host
+    """
+class ClusterExistsError(DeployError):
+    """
+    Cluster config exists already
+    """
+
+
+class ConfigError(DeployError):
+    """
+    Cannot load config
+    """
+
+
+class NeedHostError(DeployError):
+    """
+    No hosts specified to deploy to.
+    """
+
+
+class NeedMonError(DeployError):
+    """
+    Cannot find nodes with ceph-mon.
+    """
+
+class NeedDiskError(DeployError):
+    """
+    Must supply disk/path argument
+    """
+
+class UnsupportedPlatform(DeployError):
+    """
+    Platform is not supported
+    """
+    def __init__(self, distro, codename):
+        self.distro = distro
+        self.codename = codename
+
+    def __str__(self):
+        return '{doc}: {distro} {codename}'.format(
+            doc=self.__doc__.strip(),
+            distro=self.distro,
+            codename=self.codename,
+            )
+
+class MissingPackageError(DeployError):
+    """
+    A required package or command is missing
+    """
+    def __init__(self, message):
+        self.message = message
+
+    def __str__(self):
+        return self.message
+
+
+class GenericError(DeployError):
+    def __init__(self, message):
+        self.message = message
+
+    def __str__(self):
+        return self.message
diff --git a/ceph_deploy/forgetkeys.py b/ceph_deploy/forgetkeys.py
new file mode 100644 (file)
index 0000000..86bedbe
--- /dev/null
@@ -0,0 +1,36 @@
+import logging
+import errno
+
+from .cliutil import priority
+
+
+LOG = logging.getLogger(__name__)
+
+
+def forgetkeys(args):
+    import os
+    for f in [
+        'mon',
+        'client.admin',
+        'bootstrap-osd',
+        'bootstrap-mds',
+        ]:
+        try:
+            os.unlink('{cluster}.{what}.keyring'.format(
+                    cluster=args.cluster,
+                    what=f,
+                    ))
+        except OSError, e:
+            if e.errno == errno.ENOENT:
+                pass
+            else:
+                raise
+
+@priority(100)
+def make(parser):
+    """
+    Remove authentication keys from the local directory.
+    """
+    parser.set_defaults(
+        func=forgetkeys,
+        )
diff --git a/ceph_deploy/gatherkeys.py b/ceph_deploy/gatherkeys.py
new file mode 100644 (file)
index 0000000..9710f3a
--- /dev/null
@@ -0,0 +1,89 @@
+import os.path
+import logging
+
+from .cliutil import priority
+from . import hosts
+
+
+LOG = logging.getLogger(__name__)
+
+
+def fetch_file(args, frompath, topath, _hosts):
+    if os.path.exists(topath):
+        LOG.debug('Have %s', topath)
+        return True
+    else:
+        for hostname in _hosts:
+            LOG.debug('Checking %s for %s', hostname, frompath)
+            distro = hosts.get(hostname)
+            key = distro.conn.remote_module.get_file(
+                frompath.format(hostname=hostname)
+            )
+
+            if key is not None:
+                LOG.debug('Got %s key from %s.', topath, hostname)
+                with file(topath, 'w') as f:
+                    f.write(key)
+                    return True
+            distro.conn.exit()
+    LOG.warning('Unable to find %s on %s', frompath, _hosts)
+    return False
+
+
+def gatherkeys(args):
+    ret = 0
+
+    # client.admin
+    r = fetch_file(
+        args=args,
+        frompath='/etc/ceph/{cluster}.client.admin.keyring'.format(
+            cluster=args.cluster),
+        topath='{cluster}.client.admin.keyring'.format(
+            cluster=args.cluster),
+        _hosts=args.mon,
+        )
+    if not r:
+        ret = 1
+
+    # mon.
+    r = fetch_file(
+        args=args,
+        frompath='/var/lib/ceph/mon/%s-{hostname}/keyring' % args.cluster,
+        topath='{cluster}.mon.keyring'.format(cluster=args.cluster),
+        _hosts=args.mon,
+        )
+    if not r:
+        ret = 1
+
+    # bootstrap
+    for what in ['osd', 'mds']:
+        r = fetch_file(
+            args=args,
+            frompath='/var/lib/ceph/bootstrap-{what}/{cluster}.keyring'.format(
+                cluster=args.cluster,
+                what=what),
+            topath='{cluster}.bootstrap-{what}.keyring'.format(
+                cluster=args.cluster,
+                what=what),
+            _hosts=args.mon,
+            )
+        if not r:
+            ret = 1
+
+    return ret
+
+
+@priority(40)
+def make(parser):
+    """
+    Gather authentication keys for provisioning new nodes.
+    """
+    parser.add_argument(
+        'mon',
+        metavar='HOST',
+        nargs='+',
+        help='monitor host to pull keys from',
+        )
+    parser.set_defaults(
+        func=gatherkeys,
+        )
diff --git a/ceph_deploy/hosts/__init__.py b/ceph_deploy/hosts/__init__.py
new file mode 100644 (file)
index 0000000..470956c
--- /dev/null
@@ -0,0 +1,73 @@
+"""
+We deal (mostly) with remote hosts. To avoid special casing each different
+commands (e.g. using `yum` as opposed to `apt`) we can make a one time call to
+that remote host and set all the special cases for running commands depending
+on the type of distribution/version we are dealing with.
+"""
+import logging
+from ceph_deploy import exc, lsb
+from ceph_deploy.hosts import debian, centos, fedora, suse, remotes
+from ceph_deploy.connection import get_connection
+
+logger = logging.getLogger()
+
+
+def get(hostname, fallback=None):
+    """
+    Retrieve the module that matches the distribution of a ``hostname``. This
+    function will connect to that host and retrieve the distribution
+    informaiton, then return the appropriate module and slap a few attributes
+    to that module defining the information it found from the hostname.
+
+    For example, if host ``node1.example.com`` is an Ubuntu server, the
+    ``debian`` module would be returned and the following would be set::
+
+        module.name = 'ubuntu'
+        module.release = '12.04'
+        module.codename = 'precise'
+
+    :param hostname: A hostname that is reachable/resolvable over the network
+    :param fallback: Optional fallback to use if no supported distro is found
+    """
+    conn = get_connection(hostname, logger=logging.getLogger(hostname))
+    conn.import_module(remotes)
+    distro_name, release, codename = conn.remote_module.platform_information()
+    machine_type = conn.remote_module.machine_type()
+
+    module = _get_distro(distro_name)
+    module.name = distro_name
+    module.release = release
+    module.codename = codename
+    module.conn = conn
+    module.machine_type = machine_type
+    module.init = lsb.choose_init(distro_name, codename)
+
+    return module
+
+
+def _get_distro(distro, fallback=None):
+    distro = _normalized_distro_name(distro)
+    distributions = {
+        'debian': debian,
+        'ubuntu': debian,
+        'centos': centos,
+        'scientific': centos,
+        'redhat': centos,
+        'fedora': fedora,
+        'suse': suse,
+        }
+    try:
+        return distributions[distro]
+    except KeyError:
+        if fallback:
+            return _get_distro(fallback)
+        raise exc.UnsupportedPlatform(distro=distro, codename='')
+
+
+def _normalized_distro_name(distro):
+    distro = distro.lower()
+    if distro.startswith('redhat'):
+        return 'redhat'
+    elif distro.startswith('suse'):
+        return 'suse'
+    return distro
diff --git a/ceph_deploy/hosts/centos/__init__.py b/ceph_deploy/hosts/centos/__init__.py
new file mode 100644 (file)
index 0000000..3d105e7
--- /dev/null
@@ -0,0 +1,10 @@
+import mon
+from install import install
+from uninstall import uninstall
+
+# Allow to set some information about this distro
+#
+
+distro = None
+release = None
+codename = None
diff --git a/ceph_deploy/hosts/centos/install.py b/ceph_deploy/hosts/centos/install.py
new file mode 100644 (file)
index 0000000..8f4e9a0
--- /dev/null
@@ -0,0 +1,92 @@
+from ceph_deploy.util import pkg_managers
+from ceph_deploy.lib.remoto import process
+
+
+def install(distro, version_kind, version, adjust_repos):
+    release = distro.release
+    machine = distro.machine_type
+
+    # Get EPEL installed before we continue:
+    if adjust_repos:
+        install_epel(distro)
+    if version_kind in ['stable', 'testing']:
+        key = 'release'
+    else:
+        key = 'autobuild'
+
+    if adjust_repos:
+        process.run(
+            distro.conn,
+            [
+                "su",
+                "-c",
+                "'rpm --import \"https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc\"'".format(key=key),
+            ],
+        )
+
+        if version_kind == 'stable':
+            url = 'http://ceph.com/rpm-{version}/el6/'.format(
+                version=version,
+                )
+        elif version_kind == 'testing':
+            url = 'http://ceph.com/rpm-testing/'
+        elif version_kind == 'dev':
+            url = 'http://gitbuilder.ceph.com/ceph-rpm-centos{release}-{machine}-basic/ref/{version}/'.format(
+                release=release.split(".",1)[0],
+                machine=machine,
+                version=version,
+                )
+
+        process.run(
+            distro.conn,
+            [
+                'rpm',
+                '-Uvh',
+                '--replacepkgs',
+                '{url}noarch/ceph-release-1-0.el6.noarch.rpm'.format(url=url),
+            ],
+        )
+
+    process.run(
+        distro.conn,
+        [
+            'yum',
+            '-y',
+            '-q',
+            'install',
+            'ceph',
+        ],
+    )
+
+
+def install_epel(distro):
+    """
+    CentOS and Scientific need the EPEL repo, otherwise Ceph cannot be
+    installed.
+    """
+    if distro.name.lower() in ['centos', 'scientific']:
+        distro.conn.logger.info('adding EPEL repository')
+        if float(distro.release) >= 6:
+            process.run(
+                distro.conn,
+                ['wget', 'http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm'],
+            )
+            pkg_managers.rpm(
+                distro.conn,
+                [
+                    '--replacepkgs',
+                    'epel-release-6*.rpm',
+                ],
+            )
+        else:
+            process.run(
+                distro.conn,
+                ['wget', 'wget http://dl.fedoraproject.org/pub/epel/5/x86_64/epel-release-5-4.noarch.rpm'],
+            )
+            pkg_managers.rpm(
+                distro.conn,
+                [
+                    '--replacepkgs',
+                    'epel-release-5*.rpm'
+                ],
+            )
diff --git a/ceph_deploy/hosts/centos/mon/__init__.py b/ceph_deploy/hosts/centos/mon/__init__.py
new file mode 100644 (file)
index 0000000..fca0e0d
--- /dev/null
@@ -0,0 +1 @@
+from create import create
diff --git a/ceph_deploy/hosts/centos/mon/create.py b/ceph_deploy/hosts/centos/mon/create.py
new file mode 100644 (file)
index 0000000..16b9f22
--- /dev/null
@@ -0,0 +1,21 @@
+from ceph_deploy.hosts import common
+from ceph_deploy.lib.remoto import process
+
+
+def create(distro, args, monitor_keyring):
+    hostname = distro.conn.remote_module.shortname()
+    common.mon_create(distro, args, monitor_keyring, hostname)
+    service = distro.conn.remote_module.which_service()
+
+    process.run(
+        distro.conn,
+        [
+            service,
+            'ceph',
+            '-c',
+            '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster),
+            'start',
+            'mon.{hostname}'.format(hostname=hostname)
+        ],
+        timeout=7,
+    )
diff --git a/ceph_deploy/hosts/centos/uninstall.py b/ceph_deploy/hosts/centos/uninstall.py
new file mode 100644 (file)
index 0000000..4d17133
--- /dev/null
@@ -0,0 +1,12 @@
+from ceph_deploy.util import pkg_managers
+
+
+def uninstall(conn, purge=False):
+    packages = [
+        'ceph',
+        ]
+
+    pkg_managers.yum_remove(
+        conn,
+        packages,
+    )
diff --git a/ceph_deploy/hosts/common.py b/ceph_deploy/hosts/common.py
new file mode 100644 (file)
index 0000000..673cd30
--- /dev/null
@@ -0,0 +1,67 @@
+from ceph_deploy.util import paths
+from ceph_deploy import conf
+from ceph_deploy.lib.remoto import process
+from StringIO import StringIO
+
+
+def ceph_version(conn):
+    """
+    Log the remote ceph-version by calling `ceph --version`
+    """
+    return process.run(conn, ['ceph', '--version'])
+
+
+def mon_create(distro, args, monitor_keyring, hostname):
+    logger = distro.conn.logger
+    logger.debug('remote hostname: %s' % hostname)
+    path = paths.mon.path(args.cluster, hostname)
+    done_path = paths.mon.done(args.cluster, hostname)
+    init_path = paths.mon.init(args.cluster, hostname, distro.init)
+
+    configuration = conf.load(args)
+    conf_data = StringIO()
+    configuration.write(conf_data)
+
+    # write the configuration file
+    distro.conn.remote_module.write_conf(
+        args.cluster,
+        conf_data.getvalue(),
+        args.overwrite_conf,
+    )
+
+    # if the mon path does not exist, create it
+    distro.conn.remote_module.create_mon_path(path)
+
+    logger.debug('checking for done path: %s' % done_path)
+    if not distro.conn.remote_module.path_exists(done_path):
+        logger.debug('done path does not exist: %s' % done_path)
+        if not distro.conn.remote_module.path_exists(paths.mon.constants.tmp_path):
+            logger.info('creating tmp path: %s' % paths.mon.constants.tmp_path)
+            distro.conn.remote_module.makedir(paths.mon.constants.tmp_path)
+        keyring = paths.mon.keyring(args.cluster, hostname)
+
+        logger.info('creating keyring file: %s' % keyring)
+        distro.conn.remote_module.write_monitor_keyring(
+            keyring,
+            monitor_keyring,
+        )
+
+        process.run(
+            distro.conn,
+            [
+                'ceph-mon',
+                '--cluster', args.cluster,
+                '--mkfs',
+                '-i', hostname,
+                '--keyring', keyring,
+            ],
+        )
+
+        logger.info('unlinking keyring file %s' % keyring)
+        distro.conn.remote_module.unlink(keyring)
+
+    # create the done file
+    distro.conn.remote_module.create_done_path(done_path)
+
+    # create init path
+    distro.conn.remote_module.create_init_path(init_path)
diff --git a/ceph_deploy/hosts/debian/__init__.py b/ceph_deploy/hosts/debian/__init__.py
new file mode 100644 (file)
index 0000000..3d105e7
--- /dev/null
@@ -0,0 +1,10 @@
+import mon
+from install import install
+from uninstall import uninstall
+
+# Allow to set some information about this distro
+#
+
+distro = None
+release = None
+codename = None
diff --git a/ceph_deploy/hosts/debian/install.py b/ceph_deploy/hosts/debian/install.py
new file mode 100644 (file)
index 0000000..5125d8f
--- /dev/null
@@ -0,0 +1,83 @@
+from ceph_deploy.lib.remoto import process
+
+
+def install(distro, version_kind, version, adjust_repos):
+    codename = distro.codename
+    machine = distro.machine_type
+
+    if version_kind in ['stable', 'testing']:
+        key = 'release'
+    else:
+        key = 'autobuild'
+
+    # Make sure ca-certificates is installed
+    process.run(
+        distro.conn,
+        [
+            'env',
+            'DEBIAN_FRONTEND=noninteractive',
+            'apt-get',
+            '-q',
+            'install',
+            '--assume-yes',
+            'ca-certificates',
+        ]
+    )
+
+    if adjust_repos:
+        process.run(
+            distro.conn,
+            [
+                'wget',
+                '-q',
+                '-O-',
+                "'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc'".format(key=key),
+                "| apt-key add -",
+            ]
+        )
+
+        if version_kind == 'stable':
+            url = 'http://ceph.com/debian-{version}/'.format(
+                version=version,
+                )
+        elif version_kind == 'testing':
+            url = 'http://ceph.com/debian-testing/'
+        elif version_kind == 'dev':
+            url = 'http://gitbuilder.ceph.com/ceph-deb-{codename}-{machine}-basic/ref/{version}'.format(
+                codename=codename,
+                machine=machine,
+                version=version,
+                )
+        else:
+            raise RuntimeError('Unknown version kind: %r' % version_kind)
+
+        distro.conn.remote_module.write_sources_list(url, codename)
+
+    process.run(
+        distro.conn,
+        ['apt-get', '-q', 'update'],
+        )
+
+    # TODO this does not downgrade -- should it?
+    process.run(
+        distro.conn,
+        [
+            'env',
+            'DEBIAN_FRONTEND=noninteractive',
+            'DEBIAN_PRIORITY=critical',
+            'apt-get',
+            '-q',
+            '-o', 'Dpkg::Options::=--force-confnew',
+            '--no-install-recommends',
+            '--assume-yes',
+            'install',
+            '--',
+            'ceph',
+            'ceph-mds',
+            'ceph-common',
+            'ceph-fs-common',
+            # ceph only recommends gdisk, make sure we actually have
+            # it; only really needed for osds, but minimal collateral
+            'gdisk',
+            ],
+        )
diff --git a/ceph_deploy/hosts/debian/mon/__init__.py b/ceph_deploy/hosts/debian/mon/__init__.py
new file mode 100644 (file)
index 0000000..fca0e0d
--- /dev/null
@@ -0,0 +1 @@
+from create import create
diff --git a/ceph_deploy/hosts/debian/mon/create.py b/ceph_deploy/hosts/debian/mon/create.py
new file mode 100644 (file)
index 0000000..29fa49c
--- /dev/null
@@ -0,0 +1,42 @@
+from ceph_deploy.hosts import common
+from ceph_deploy.lib.remoto import process
+
+
+def create(distro, args, monitor_keyring):
+    logger = distro.conn.logger
+    hostname = distro.conn.remote_module.shortname()
+    common.mon_create(distro, args, monitor_keyring, hostname)
+    service = distro.conn.remote_module.which_service()
+
+    if not service:
+        logger.warning('could not find `service` executable')
+
+    if distro.init == 'upstart':  # Ubuntu uses upstart
+        process.run(
+            distro.conn,
+            [
+                'initctl',
+                'emit',
+                'ceph-mon',
+                'cluster={cluster}'.format(cluster=args.cluster),
+                'id={hostname}'.format(hostname=hostname),
+            ],
+            timeout=7,
+        )
+
+    elif distro.init == 'sysvinit':  # Debian uses sysvinit
+
+        process.run(
+            distro.conn,
+            [
+                service,
+                'ceph',
+                '-c',
+                '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster),
+                'start',
+                'mon.{hostname}'.format(hostname=hostname)
+            ],
+            timeout=7,
+        )
+    else:
+        raise RuntimeError('create cannot use init %s' % distro.init)
diff --git a/ceph_deploy/hosts/debian/uninstall.py b/ceph_deploy/hosts/debian/uninstall.py
new file mode 100644 (file)
index 0000000..8ffdca1
--- /dev/null
@@ -0,0 +1,16 @@
+from ceph_deploy.util import pkg_managers
+from ceph_deploy.lib.remoto import process
+
+
+def uninstall(conn, purge=False):
+    packages = [
+        'ceph',
+        'ceph-mds',
+        'ceph-common',
+        'ceph-fs-common',
+        ]
+    pkg_managers.apt_remove(
+        conn,
+        packages,
+        purge=purge,
+    )
diff --git a/ceph_deploy/hosts/fedora/__init__.py b/ceph_deploy/hosts/fedora/__init__.py
new file mode 100644 (file)
index 0000000..3d105e7
--- /dev/null
@@ -0,0 +1,10 @@
+import mon
+from install import install
+from uninstall import uninstall
+
+# Allow to set some information about this distro
+#
+
+distro = None
+release = None
+codename = None
diff --git a/ceph_deploy/hosts/fedora/install.py b/ceph_deploy/hosts/fedora/install.py
new file mode 100644 (file)
index 0000000..880f911
--- /dev/null
@@ -0,0 +1,63 @@
+from ceph_deploy.lib.remoto import process
+
+
+def install(distro, version_kind, version, adjust_repos):
+    release = distro.release
+    machine = distro.machine_type
+
+    if version_kind in ['stable', 'testing']:
+        key = 'release'
+    else:
+        key = 'autobuild'
+
+    if adjust_repos:
+        process.run(
+            distro.conn,
+            [
+                "su",
+                "-c",
+                "'rpm --import \"https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc\"'".format(key=key),
+            ],
+        )
+
+        if version_kind == 'stable':
+            url = 'http://ceph.com/rpm-{version}/fc{release}/'.format(
+                version=version,
+                release=release,
+                )
+        elif version_kind == 'testing':
+            url = 'http://ceph.com/rpm-testing/fc{release}'.format(
+                release=release,
+                )
+        elif version_kind == 'dev':
+            url = 'http://gitbuilder.ceph.com/ceph-rpm-fc{release}-{machine}-basic/ref/{version}/'.format(
+                release=release.split(".", 1)[0],
+                machine=machine,
+                version=version,
+                )
+
+        process.run(
+            distro.conn,
+            args=[
+                'rpm',
+                '-Uvh',
+                '--replacepkgs',
+                '--force',
+                '--quiet',
+                '{url}noarch/ceph-release-1-0.fc{release}.noarch.rpm'.format(
+                    url=url,
+                    release=release,
+                    ),
+                ]
+            )
+
+    process.run(
+        distro.conn,
+        args=[
+            'yum',
+            '-y',
+            '-q',
+            'install',
+            'ceph',
+            ],
+        )
diff --git a/ceph_deploy/hosts/fedora/mon/__init__.py b/ceph_deploy/hosts/fedora/mon/__init__.py
new file mode 100644 (file)
index 0000000..fca0e0d
--- /dev/null
@@ -0,0 +1 @@
+from create import create
diff --git a/ceph_deploy/hosts/fedora/mon/create.py b/ceph_deploy/hosts/fedora/mon/create.py
new file mode 100644 (file)
index 0000000..16b9f22
--- /dev/null
@@ -0,0 +1,21 @@
+from ceph_deploy.hosts import common
+from ceph_deploy.lib.remoto import process
+
+
+def create(distro, args, monitor_keyring):
+    hostname = distro.conn.remote_module.shortname()
+    common.mon_create(distro, args, monitor_keyring, hostname)
+    service = distro.conn.remote_module.which_service()
+
+    process.run(
+        distro.conn,
+        [
+            service,
+            'ceph',
+            '-c',
+            '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster),
+            'start',
+            'mon.{hostname}'.format(hostname=hostname)
+        ],
+        timeout=7,
+    )
diff --git a/ceph_deploy/hosts/fedora/uninstall.py b/ceph_deploy/hosts/fedora/uninstall.py
new file mode 100644 (file)
index 0000000..4c80827
--- /dev/null
@@ -0,0 +1,13 @@
+from ceph_deploy.util import pkg_managers
+
+
+def uninstall(conn, purge=False):
+    packages = [
+        'ceph',
+        ]
+
+    pkg_managers.yum_remove(
+        conn,
+        packages,
+    )
+
diff --git a/ceph_deploy/hosts/remotes.py b/ceph_deploy/hosts/remotes.py
new file mode 100644 (file)
index 0000000..19c8038
--- /dev/null
@@ -0,0 +1,163 @@
+import errno
+import socket
+import os
+import tempfile
+import platform
+
+
+def platform_information():
+    """ detect platform information from remote host """
+    distro, release, codename = platform.linux_distribution()
+    return (
+        str(distro).rstrip(),
+        str(release).rstrip(),
+        str(codename).rstrip()
+    )
+
+
+def machine_type():
+    """ detect machine type """
+    return platform.machine()
+
+
+def write_sources_list(url, codename):
+    """add ceph deb repo to sources.list"""
+    with file('/etc/apt/sources.list.d/ceph.list', 'w') as f:
+        f.write('deb {url} {codename} main\n'.format(
+                url=url,
+                codename=codename,
+                ))
+
+
+def write_conf(cluster, conf, overwrite):
+    """ write cluster configuration to /etc/ceph/{cluster}.conf """
+    path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster)
+    tmp_file = tempfile.NamedTemporaryFile(delete=False)
+    err_msg = 'config file %s exists with different content; use --overwrite-conf to overwrite' % path
+
+    if os.path.exists(path):
+        with file(path, 'rb') as f:
+            old = f.read()
+            if old != conf and not overwrite:
+                raise RuntimeError(err_msg)
+    tmp_file.write(conf)
+    os.rename(tmp_file.name, path)
+
+
+def write_keyring(path, key):
+    """ create a keyring file """
+    tmp_file = tempfile.NamedTemporaryFile(delete=False)
+    tmp_file.write(key)
+    os.rename(tmp_file.name, path)
+
+
+def create_mon_path(path):
+    """create the mon path if it does not exist"""
+    if not os.path.exists(path):
+        os.makedirs(path)
+
+
+def create_done_path(done_path):
+    """create a done file to avoid re-doing the mon deployment"""
+    with file(done_path, 'w'):
+        pass
+
+
+def create_init_path(init_path):
+    """create the init path if it does not exist"""
+    if not os.path.exists(init_path):
+        with file(init_path, 'w'):
+            pass
+
+
+def path_exists(path):
+    return os.path.exists(path)
+
+
+def makedir(path):
+    os.makedirs(path)
+
+
+def unlink(_file):
+    os.unlink(_file)
+
+
+def write_monitor_keyring(keyring, monitor_keyring):
+    """create the monitor keyring file"""
+    write_file(keyring, monitor_keyring)
+
+
+def write_file(path, content):
+    with file(path, 'w') as f:
+        f.write(content)
+
+
+def touch_file(path):
+    with file(path, 'wb') as f:  # noqa
+        pass
+
+
+def get_file(path):
+    """ fetch remote file """
+    try:
+        with file(path, 'rb') as f:
+            return f.read()
+    except IOError:
+        pass
+
+
+def shortname():
+    """get remote short hostname"""
+    return socket.gethostname().split('.', 1)[0]
+
+
+def which_service():
+    """ locating the `service` executable... """
+    locations = ['/sbin/service', '/usr/sbin/service']
+    for location in locations:
+        if os.path.exists(location):
+            return location
+
+
+def make_mon_removed_dir(path, file_name):
+    """ move old monitor data """
+    try:
+        os.makedirs('/var/lib/ceph/mon-removed')
+    except OSError, e:
+        if e.errno != errno.EEXIST:
+            raise
+    os.rename(path, os.path.join('/var/lib/ceph/mon-removed/', file_name))
+
+
+def safe_mkdir(path):
+    """ create path if it doesn't exist """
+    try:
+        os.mkdir(path)
+    except OSError, e:
+        if e.errno == errno.EEXIST:
+            pass
+        else:
+            raise
+
+
+def zeroing(dev):
+    """ zeroing last few blocks of device """
+    # this kills the crab
+    #
+    # sgdisk will wipe out the main copy of the GPT partition
+    # table (sorry), but it doesn't remove the backup copies, and
+    # subsequent commands will continue to complain and fail when
+    # they see those.  zeroing the last few blocks of the device
+    # appears to do the trick.
+    lba_size = 4096
+    size = 33 * lba_size
+    return True
+    with file(dev, 'wb') as f:
+        f.seek(-size, os.SEEK_END)
+        f.write(size*'\0')
+
+
+# remoto magic, needed to execute these functions remotely
+if __name__ == '__channelexec__':
+    for item in channel:  # noqa
+        channel.send(eval(item))  # noqa
diff --git a/ceph_deploy/hosts/suse/__init__.py b/ceph_deploy/hosts/suse/__init__.py
new file mode 100644 (file)
index 0000000..3d105e7
--- /dev/null
@@ -0,0 +1,10 @@
+import mon
+from install import install
+from uninstall import uninstall
+
+# Allow to set some information about this distro
+#
+
+distro = None
+release = None
+codename = None
diff --git a/ceph_deploy/hosts/suse/install.py b/ceph_deploy/hosts/suse/install.py
new file mode 100644 (file)
index 0000000..5c8a52a
--- /dev/null
@@ -0,0 +1,66 @@
+from ceph_deploy.lib.remoto import process
+
+
+def install(distro, version_kind, version, adjust_repos):
+    release = distro.release
+    machine = distro.machine_type
+
+    if version_kind in ['stable', 'testing']:
+        key = 'release'
+    else:
+        key = 'autobuild'
+
+    if distro.codename == 'Mantis':
+        distro = 'opensuse12'
+    else:
+        distro = 'sles-11sp2'
+
+    if adjust_repos:
+        process.run(
+            distro.conn,
+            [
+                "su",
+                "-c",
+                "'rpm --import \"https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/{key}.asc\"'".format(key=key),
+            ],
+        )
+
+        if version_kind == 'stable':
+            url = 'http://ceph.com/rpm-{version}/{distro}/'.format(
+                version=version,
+                distro=distro,
+                )
+        elif version_kind == 'testing':
+            url = 'http://ceph.com/rpm-testing/{distro}'.format(distro=distro)
+        elif version_kind == 'dev':
+            url = 'http://gitbuilder.ceph.com/ceph-rpm-{distro}{release}-{machine}-basic/ref/{version}/'.format(
+                distro=distro,
+                release=release.split(".", 1)[0],
+                machine=machine,
+                version=version,
+                )
+
+        process.run(
+            distro.conn,
+            [
+                'rpm',
+                '-Uvh',
+                '--replacepkgs',
+                '--force',
+                '--quiet',
+                '{url}noarch/ceph-release-1-0.noarch.rpm'.format(
+                    url=url,
+                    ),
+                ]
+            )
+
+    process.run(
+        distro.conn,
+        [
+            'zypper',
+            '--non-interactive',
+            '--quiet',
+            'install',
+            'ceph',
+            ],
+        )
diff --git a/ceph_deploy/hosts/suse/mon/__init__.py b/ceph_deploy/hosts/suse/mon/__init__.py
new file mode 100644 (file)
index 0000000..fca0e0d
--- /dev/null
@@ -0,0 +1 @@
+from create import create
diff --git a/ceph_deploy/hosts/suse/mon/create.py b/ceph_deploy/hosts/suse/mon/create.py
new file mode 100644 (file)
index 0000000..0c1316c
--- /dev/null
@@ -0,0 +1,20 @@
+from ceph_deploy.hosts import common
+from ceph_deploy.lib.remoto import process
+
+
+def create(distro, args, monitor_keyring):
+    hostname = distro.conn.remote_module.shortname()
+    common.mon_create(distro, args, monitor_keyring, hostname)
+    service = distro.conn.remote_module.which_service()
+
+    process.run(
+        distro.conn,
+        [
+            'rcceph',
+            '-c',
+            '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster),
+            'start',
+            'mon.{hostname}'.format(hostname=hostname)
+        ],
+        timeout=7,
+    )
diff --git a/ceph_deploy/hosts/suse/uninstall.py b/ceph_deploy/hosts/suse/uninstall.py
new file mode 100644 (file)
index 0000000..740f6f3
--- /dev/null
@@ -0,0 +1,19 @@
+from ceph_deploy.lib.remoto import process
+
+
+def uninstall(conn, purge=False):
+    packages = [
+        'ceph',
+        'libcephfs1',
+        'librados2',
+        'librbd1',
+        ]
+    cmd = [
+        'zypper',
+        '--non-interactive',
+        '--quiet',
+        'remove',
+        ]
+
+    cmd.extend(packages)
+    process.run(conn, cmd)
diff --git a/ceph_deploy/install.py b/ceph_deploy/install.py
new file mode 100644 (file)
index 0000000..d7416ae
--- /dev/null
@@ -0,0 +1,281 @@
+import argparse
+import logging
+from distutils.util import strtobool
+
+from . import hosts
+from .cliutil import priority
+from .lib.remoto import process
+
+
+LOG = logging.getLogger(__name__)
+
+
+def ceph_is_installed(conn):
+    """
+    Check if the ceph packages are installed by looking for the
+    presence of the ceph command.
+    """
+    stdout, stderr, return_code = process.check(
+        conn,
+        ['which', 'ceph'],
+    )
+    return not return_code
+
+
+def install(args):
+    version = getattr(args, args.version_kind)
+    version_str = args.version_kind
+    if version:
+        version_str += ' version {version}'.format(version=version)
+    LOG.debug(
+        'Installing %s on cluster %s hosts %s',
+        version_str,
+        args.cluster,
+        ' '.join(args.host),
+        )
+    for hostname in args.host:
+        # TODO username
+        LOG.debug('Detecting platform for host %s ...', hostname)
+        distro = hosts.get(hostname)
+        LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
+        rlogger = logging.getLogger(hostname)
+        rlogger.info('installing ceph on %s' % hostname)
+        distro.install(distro, args.version_kind, version, args.adjust_repos)
+        # Check the ceph version we just installed
+        hosts.common.ceph_version(distro.conn)
+        distro.conn.exit()
+
+
+def uninstall(args):
+    LOG.debug(
+        'Uninstalling on cluster %s hosts %s',
+        args.cluster,
+        ' '.join(args.host),
+        )
+
+    for hostname in args.host:
+        LOG.debug('Detecting platform for host %s ...', hostname)
+
+        # TODO username
+        distro = hosts.get(hostname)
+        LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
+        rlogger = logging.getLogger(hostname)
+        rlogger.info('uninstalling ceph on %s' % hostname)
+        distro.uninstall(distro.conn)
+        distro.conn.exit()
+
+
+def purge(args):
+    LOG.debug(
+        'Purging from cluster %s hosts %s',
+        args.cluster,
+        ' '.join(args.host),
+        )
+
+    for hostname in args.host:
+        LOG.debug('Detecting platform for host %s ...', hostname)
+
+        # TODO username
+        distro = hosts.get(hostname)
+        LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
+        rlogger = logging.getLogger(hostname)
+        rlogger.info('purging host ... %s' % hostname)
+        distro.uninstall(distro.conn, purge=True)
+        distro.conn.exit()
+
+
+def purge_data(args):
+    LOG.debug(
+        'Purging data from cluster %s hosts %s',
+        args.cluster,
+        ' '.join(args.host),
+        )
+
+    installed_hosts = []
+    for hostname in args.host:
+        distro = hosts.get(hostname)
+        if ceph_is_installed(distro.conn):
+            installed_hosts.append(hostname)
+        distro.conn.exit()
+
+    if installed_hosts:
+        print "ceph is still installed on: ", installed_hosts
+        answer = raw_input("Continue (y/n)")
+        if not strtobool(answer):
+            return
+
+    for hostname in args.host:
+        # TODO username
+        distro = hosts.get(hostname)
+        LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
+        rlogger = logging.getLogger(hostname)
+        rlogger.info('purging data on %s' % hostname)
+
+        process.run(
+            distro.conn,
+            [
+                'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
+            ]
+        )
+        if distro.conn.remote_module.path_exists('/var/lib/ceph'):
+            process.run(
+                distro.conn,
+                [
+                    'find', '/var/lib/ceph',
+                    '-mindepth', '1',
+                    '-maxdepth', '2',
+                    '-type', 'd',
+                    '-exec', 'umount', '{}', ';',
+                ]
+            )
+            process.run(
+                distro.conn,
+                [
+                    'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
+                ]
+            )
+
+        process.run(
+            distro.conn,
+            [
+                'rm', '-rf', '--one-file-system', '--', '/etc/ceph/*',
+            ]
+        )
+
+        distro.conn.exit()
+
+
+class StoreVersion(argparse.Action):
+    """
+    Like ``"store"`` but also remember which one of the exclusive
+    options was set.
+
+    There are three kinds of versions: stable, testing and dev.
+    This sets ``version_kind`` to be the right one of the above.
+
+    This kludge essentially lets us differentiate explicitly set
+    values from defaults.
+    """
+    def __call__(self, parser, namespace, values, option_string=None):
+        setattr(namespace, self.dest, values)
+        namespace.version_kind = self.dest
+
+
+@priority(20)
+def make(parser):
+    """
+    Install Ceph packages on remote hosts.
+    """
+
+    version = parser.add_mutually_exclusive_group()
+
+    version.add_argument(
+        '--stable',
+        nargs='?',
+        action=StoreVersion,
+        choices=[
+            'bobtail',
+            'cuttlefish',
+            'dumpling',
+            'emperor',
+            ],
+        metavar='CODENAME',
+        help='install a release known as CODENAME (done by default) (default: %(default)s)',
+    )
+
+    version.add_argument(
+        '--testing',
+        nargs=0,
+        action=StoreVersion,
+        help='install the latest development release',
+    )
+
+    version.add_argument(
+        '--dev',
+        nargs='?',
+        action=StoreVersion,
+        const='master',
+        metavar='BRANCH_OR_TAG',
+        help='install a bleeding edge build from Git branch or tag (default: %(default)s)',
+    )
+
+    version.add_argument(
+        '--adjust-repos',
+        dest='adjust_repos',
+        action='store_true',
+        help='install packages modifying source repos',
+    )
+
+    version.add_argument(
+        '--no-adjust-repos',
+        dest='adjust_repos',
+        action='store_false',
+        help='install packages without modifying source repos',
+    )
+
+    version.set_defaults(
+        func=install,
+        stable='dumpling',
+        dev='master',
+        version_kind='stable',
+        adjust_repos=True,
+    )
+
+    parser.add_argument(
+        'host',
+        metavar='HOST',
+        nargs='+',
+        help='hosts to install on',
+    )
+
+    parser.set_defaults(
+        func=install,
+    )
+
+
+@priority(80)
+def make_uninstall(parser):
+    """
+    Remove Ceph packages from remote hosts.
+    """
+    parser.add_argument(
+        'host',
+        metavar='HOST',
+        nargs='+',
+        help='hosts to uninstall Ceph from',
+        )
+    parser.set_defaults(
+        func=uninstall,
+        )
+
+
+@priority(80)
+def make_purge(parser):
+    """
+    Remove Ceph packages from remote hosts and purge all data.
+    """
+    parser.add_argument(
+        'host',
+        metavar='HOST',
+        nargs='+',
+        help='hosts to purge Ceph from',
+        )
+    parser.set_defaults(
+        func=purge,
+        )
+
+
+@priority(80)
+def make_purge_data(parser):
+    """
+    Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph
+    """
+    parser.add_argument(
+        'host',
+        metavar='HOST',
+        nargs='+',
+        help='hosts to purge Ceph data from',
+        )
+    parser.set_defaults(
+        func=purge_data,
+        )
diff --git a/ceph_deploy/lib/__init__.py b/ceph_deploy/lib/__init__.py
new file mode 100644 (file)
index 0000000..c741c0b
--- /dev/null
@@ -0,0 +1,10 @@
+"""
+This module is meant for vendorizing Python libraries. Most libraries will need
+to have some ``sys.path`` alterations done unless they are doing relative
+imports.
+
+Do **not** add anything to this module that does not represent a vendorized
+library.
+"""
+
+import remoto
diff --git a/ceph_deploy/lsb.py b/ceph_deploy/lsb.py
new file mode 100644 (file)
index 0000000..b7f3e73
--- /dev/null
@@ -0,0 +1,121 @@
+import logging
+from . import exc
+
+
+logger = logging.getLogger(__name__)
+
+
+def check_lsb_release():
+    """
+    Verify if lsb_release command is available
+    """
+    import subprocess
+
+    args = [ 'which', 'lsb_release', ]
+    process = subprocess.Popen(
+        args=args,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        )
+    lsb_release_path, _ = process.communicate()
+    ret = process.wait()
+    if ret != 0:
+        raise RuntimeError('The lsb_release command was not found on remote host.  Please install the lsb-release package.')
+
+
+def lsb_fallback(conn):
+    """
+    This fallback will attempt to detect the distro, release and codename for
+    a given remote host when lsb fails. It uses the
+    ``platform.linux_distribution`` module that should be fairly robust and
+    would prevent us from adding repositories and installing a package just to
+    detect a platform.
+    """
+    distro, release, codename = conn.modules.platform.linux_distribution()
+    return (
+        str(distro).rstrip(),
+        str(release).rstrip(),
+        str(codename).rstrip()
+    )
+
+
+def lsb_release():
+    """
+    Get LSB release information from lsb_release.
+
+    Returns truple with distro, release and codename. Otherwise
+    the function raises an error (subprocess.CalledProcessError or
+    RuntimeError).
+    """
+    import subprocess
+
+    args = [ 'lsb_release', '-s', '-i' ]
+    process = subprocess.Popen(
+        args=args,
+        stdout=subprocess.PIPE,
+        )
+    distro, _ = process.communicate()
+    ret = process.wait()
+    if ret != 0:
+        raise subprocess.CalledProcessError(ret, args, output=distro)
+    if distro == '':
+        raise RuntimeError('lsb_release gave invalid output for distro')
+
+    args = [ 'lsb_release', '-s', '-r', ]
+    process = subprocess.Popen(
+        args=args,
+        stdout=subprocess.PIPE,
+        )
+    release, _ = process.communicate()
+    ret = process.wait()
+    if ret != 0:
+        raise subprocess.CalledProcessError(ret, args, output=release)
+    if release == '':
+        raise RuntimeError('lsb_release gave invalid output for release')
+
+    args = [ 'lsb_release', '-s', '-c', ]
+    process = subprocess.Popen(
+        args=args,
+        stdout=subprocess.PIPE,
+        )
+    codename, _ = process.communicate()
+    ret = process.wait()
+    if ret != 0:
+        raise subprocess.CalledProcessError(ret, args, output=codename)
+    if codename == '':
+        raise RuntimeError('lsb_release gave invalid output for codename')
+
+    return (str(distro).rstrip(), str(release).rstrip(), str(codename).rstrip())
+
+
+def get_lsb_release(sudo):
+    """
+    Get LSB release information from lsb_release.
+
+    Check if lsb_release is installed on the remote host and issue
+    a message if not.
+
+    Returns truple with distro, release and codename. Otherwise
+    the function raises an error (subprocess.CalledProcessError or
+    RuntimeError).
+    """
+    try:
+        check_lsb_release_r = sudo.compile(check_lsb_release)
+        status = check_lsb_release_r()
+    except RuntimeError as e:
+        logger.warning('lsb_release was not found - inferring OS details')
+        return lsb_fallback(sudo)
+
+    lsb_release_r = sudo.compile(lsb_release)
+    return lsb_release_r()
+
+
+def choose_init(distro, codename):
+    """
+    Select a init system for a given distribution.
+
+    Returns the name of a init system (upstart, sysvinit ...).
+    """
+    if distro == 'Ubuntu':
+        return 'upstart'
+    return 'sysvinit'
diff --git a/ceph_deploy/mds.py b/ceph_deploy/mds.py
new file mode 100644 (file)
index 0000000..571f317
--- /dev/null
@@ -0,0 +1,202 @@
+from cStringIO import StringIO
+import errno
+import logging
+import os
+
+from . import conf
+from . import exc
+from . import hosts
+from .lib.remoto import process
+from .cliutil import priority
+
+
+LOG = logging.getLogger(__name__)
+
+
+def get_bootstrap_mds_key(cluster):
+    """
+    Read the bootstrap-mds key for `cluster`.
+    """
+    path = '{cluster}.bootstrap-mds.keyring'.format(cluster=cluster)
+    try:
+        with file(path, 'rb') as f:
+            return f.read()
+    except IOError:
+        raise RuntimeError('bootstrap-mds keyring not found; run \'gatherkeys\'')
+
+
+def create_mds(conn, name, cluster, init):
+
+    path = '/var/lib/ceph/mds/{cluster}-{name}'.format(
+        cluster=cluster,
+        name=name
+        )
+
+    conn.remote_module.safe_mkdir(path)
+
+    bootstrap_keyring = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
+        cluster=cluster
+        )
+
+    keypath = os.path.join(path, 'keyring')
+
+    stdout, stderr, returncode = process.check(
+        conn,
+        [
+            'ceph',
+            '--cluster', cluster,
+            '--name', 'client.bootstrap-mds',
+            '--keyring', bootstrap_keyring,
+            'auth', 'get-or-create', 'mds.{name}'.format(name=name),
+            'osd', 'allow rwx',
+            'mds', 'allow',
+            'mon', 'allow profile mds',
+            '-o',
+            os.path.join(keypath),
+        ]
+    )
+    if returncode != errno.EACCES:
+        for line in stderr:
+            conn.logger.error(line)
+        raise RuntimeError('could not create mds')
+
+        process.check(
+            conn,
+            [
+                'ceph',
+                '--cluster', cluster,
+                '--name', 'client.bootstrap-mds',
+                '--keyring', bootstrap_keyring,
+                'auth', 'get-or-create', 'mds.{name}'.format(name=name),
+                'osd', 'allow *',
+                'mds', 'allow',
+                'mon', 'allow rwx',
+                '-o',
+                os.path.join(keypath),
+            ]
+        )
+
+    conn.remote_module.touch_file(os.path.join(path, 'done'))
+    conn.remote_module.touch_file(os.path.join(path, init))
+
+    if init == 'upstart':
+        process.run(
+            conn,
+            [
+                'initctl',
+                'emit',
+                'ceph-mds',
+                'cluster={cluster}'.format(cluster=cluster),
+                'id={name}'.format(name=name),
+            ],
+            timeout=7
+        )
+    elif init == 'sysvinit':
+        process.run(
+            conn,
+            [
+                'service',
+                'ceph',
+                'start',
+                'mds.{name}'.format(name=name),
+            ],
+            timeout=7
+        )
+
+
+def mds_create(args):
+    cfg = conf.load(args)
+    LOG.debug(
+        'Deploying mds, cluster %s hosts %s',
+        args.cluster,
+        ' '.join(':'.join(x or '' for x in t) for t in args.mds),
+        )
+
+    if not args.mds:
+        raise exc.NeedHostError()
+
+    key = get_bootstrap_mds_key(cluster=args.cluster)
+
+    bootstrapped = set()
+    errors = 0
+    for hostname, name in args.mds:
+        try:
+            # TODO username
+            distro = hosts.get(hostname)
+            rlogger = distro.conn.logger
+            LOG.info(
+                'Distro info: %s %s %s',
+                distro.name,
+                distro.release,
+                distro.codename
+            )
+            LOG.debug('remote host will use %s', distro.init)
+
+            if hostname not in bootstrapped:
+                bootstrapped.add(hostname)
+                LOG.debug('deploying mds bootstrap to %s', hostname)
+                conf_data = StringIO()
+                cfg.write(conf_data)
+                distro.conn.remote_module.write_conf(
+                    args.cluster,
+                    conf_data.getvalue(),
+                    args.overwrite_conf,
+                )
+
+                path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
+                    cluster=args.cluster,
+                )
+
+                if not distro.conn.remote_module.path_exists(path):
+                    rlogger.info('mds keyring does not exist yet, creating one')
+                    distro.conn.remote_module.write_keyring(path, key)
+
+            create_mds(distro.conn, name, args.cluster, distro.init)
+            distro.conn.exit()
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to create %d MDSs' % errors)
+
+
+def mds(args):
+    if args.subcommand == 'create':
+        mds_create(args)
+    else:
+        LOG.error('subcommand %s not implemented', args.subcommand)
+
+
+def colon_separated(s):
+    host = s
+    name = s
+    if s.count(':') == 1:
+        (host, name) = s.split(':')
+    return (host, name)
+
+
+@priority(30)
+def make(parser):
+    """
+    Deploy ceph MDS on remote hosts.
+    """
+    parser.add_argument(
+        'subcommand',
+        metavar='SUBCOMMAND',
+        choices=[
+            'create',
+            'destroy',
+            ],
+        help='create or destroy',
+        )
+    parser.add_argument(
+        'mds',
+        metavar='HOST[:NAME]',
+        nargs='*',
+        type=colon_separated,
+        help='host (and optionally the daemon name) to deploy on',
+        )
+    parser.set_defaults(
+        func=mds,
+        )
diff --git a/ceph_deploy/memoize.py b/ceph_deploy/memoize.py
new file mode 100644 (file)
index 0000000..fd344a0
--- /dev/null
@@ -0,0 +1,26 @@
+import functools
+
+
+class NotFound(object):
+    """
+    Sentinel object to say call was not memoized.
+
+    Supposed to be faster than throwing exceptions on cache miss.
+    """
+    def __str__(self):
+        return self.__class__.__name__
+
+NotFound = NotFound()
+
+
+def memoize(f):
+    cache = {}
+
+    @functools.wraps(f)
+    def wrapper(*args, **kwargs):
+        key = (args, tuple(sorted(kwargs.iteritems())))
+        val = cache.get(key, NotFound)
+        if val is NotFound:
+            val = cache[key] = f(*args, **kwargs)
+        return val
+    return wrapper
diff --git a/ceph_deploy/misc.py b/ceph_deploy/misc.py
new file mode 100644 (file)
index 0000000..1620e1f
--- /dev/null
@@ -0,0 +1,22 @@
+
+def mon_hosts(mons):
+    """
+    Iterate through list of MON hosts, return tuples of (name, host).
+    """
+    for m in mons:
+        if m.count(':'):
+            (name, host) = m.split(':')
+        else:
+            name = m
+            host = m
+            if name.count('.') > 0:
+                name = name.split('.')[0]
+        yield (name, host)
+
+def remote_shortname(socket):
+    """
+    Obtains remote hostname of the socket and cuts off the domain part
+    of its FQDN.
+    """
+    return socket.gethostname().split('.', 1)[0]
+
diff --git a/ceph_deploy/mon.py b/ceph_deploy/mon.py
new file mode 100644 (file)
index 0000000..59f3baf
--- /dev/null
@@ -0,0 +1,404 @@
+import argparse
+import json
+import logging
+import re
+import os
+from textwrap import dedent
+import time
+
+from . import conf, exc
+from .cliutil import priority
+from .util import paths
+from .lib.remoto import process
+from . import hosts
+from .misc import mon_hosts
+from .connection import get_connection
+from . import gatherkeys
+
+
+LOG = logging.getLogger(__name__)
+
+
+def mon_status_check(conn, logger, hostname):
+    """
+    A direct check for JSON output on the monitor status.
+
+    For newer versions of Ceph (dumpling and newer) a new mon_status command
+    was added ( `ceph daemon mon mon_status` ) and should be revisited if the
+    output changes as this check depends on that availability.
+
+    WARNING: this function requires the new connection object
+    """
+    mon = 'mon.%s' % hostname
+
+    out, err, code = process.check(
+        conn,
+        [
+            'ceph',
+            '--admin-daemon',
+            '/var/run/ceph/ceph-%s.asok' % mon,
+            'mon_status',
+        ],
+    )
+
+    for line in err:
+        logger.error(line)
+
+    try:
+        return json.loads(''.join(out))
+    except ValueError:
+        return {}
+
+
+def catch_mon_errors(conn, logger, hostname, cfg):
+    """
+    Make sure we are able to catch up common mishaps with monitors
+    and use that state of a monitor to determine what is missing
+    and warn apropriately about it.
+    """
+    monmap = mon_status_check(conn, logger, hostname).get('monmap', {})
+    mon_initial_members = cfg.safe_get('global', 'mon_initial_members')
+    public_addr = cfg.safe_get('global', 'public_addr')
+    public_network = cfg.safe_get('global', 'public_network')
+    mon_in_monmap = [
+        mon.get('name')
+        for mon in monmap.get('mons', [{}])
+        if mon.get('name') == hostname
+    ]
+    if mon_initial_members is None or not hostname in mon_initial_members:
+            logger.warning('%s is not defined in `mon initial members`', hostname)
+    if not mon_in_monmap:
+        logger.warning('monitor %s does not exist in monmap', hostname)
+        if not public_addr and not public_network:
+            logger.warning('neither `public_addr` nor `public_network` keys are defined for monitors')
+            logger.warning('monitors may not be able to form quorum')
+
+
+def mon_status(conn, logger, hostname, silent=False):
+    """
+    run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide
+    not only the output, but be able to return a boolean status of what is
+    going on.
+    ``False`` represents a monitor that is not doing OK even if it is up and
+    running, while ``True`` would mean the monitor is up and running correctly.
+    """
+    mon = 'mon.%s' % hostname
+
+    try:
+        out = mon_status_check(conn, logger, hostname)
+        if not out:
+            logger.warning('monitor: %s, might not be running yet' % mon)
+            return False
+
+        if not silent:
+            logger.debug('*'*80)
+            logger.debug('status for monitor: %s' % mon)
+            for line in json.dumps(out, indent=2, sort_keys=True).split('\n'):
+                logger.debug(line)
+            logger.debug('*'*80)
+        if out['rank'] >= 0:
+            logger.info('monitor: %s is running' % mon)
+            return True
+        logger.info('monitor: %s is not running' % mon)
+        return False
+    except RuntimeError:
+        logger.info('monitor: %s is not running' % mon)
+        return False
+
+
+def mon_create(args):
+
+    cfg = conf.load(args)
+    if not args.mon:
+        mon_initial_members = cfg.safe_get('global', 'mon_initial_members')
+        args.mon = re.split(r'[,\s]+', mon_initial_members)
+
+    if not args.mon:
+        raise exc.NeedHostError()
+
+    try:
+        with file('{cluster}.mon.keyring'.format(cluster=args.cluster),
+                  'rb') as f:
+            monitor_keyring = f.read()
+    except IOError:
+        raise RuntimeError('mon keyring not found; run \'new\' to create a new cluster')
+
+    LOG.debug(
+        'Deploying mon, cluster %s hosts %s',
+        args.cluster,
+        ' '.join(args.mon),
+        )
+
+    errors = 0
+    for (name, host) in mon_hosts(args.mon):
+        try:
+            # TODO username
+            # TODO add_bootstrap_peer_hint
+            LOG.debug('detecting platform for host %s ...', name)
+            distro = hosts.get(host)
+            LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
+            rlogger = logging.getLogger(name)
+
+            # ensure remote hostname is good to go
+            hostname_is_compatible(distro.conn, rlogger, name)
+            rlogger.debug('deploying mon to %s', name)
+            distro.mon.create(distro, args, monitor_keyring)
+
+            # tell me the status of the deployed mon
+            time.sleep(2)  # give some room to start
+            mon_status(distro.conn, rlogger, name)
+            catch_mon_errors(distro.conn, rlogger, name, cfg)
+            distro.conn.exit()
+
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to create %d monitors' % errors)
+
+
+def hostname_is_compatible(conn, logger, provided_hostname):
+    """
+    Make sure that the host that we are connecting to has the same value as the
+    `hostname` in the remote host, otherwise mons can fail not reaching quorum.
+    """
+    logger.debug('determining if provided host has same hostname in remote')
+    remote_hostname = conn.remote_module.shortname()
+    if remote_hostname == provided_hostname:
+        return
+    logger.warning('*'*80)
+    logger.warning('provided hostname must match remote hostname')
+    logger.warning('provided hostname: %s' % provided_hostname)
+    logger.warning('remote hostname: %s' % remote_hostname)
+    logger.warning('monitors may not reach quorum and create-keys will not complete')
+    logger.warning('*'*80)
+
+
+def destroy_mon(conn, cluster, hostname):
+    import datetime
+    import time
+    retries = 5
+
+    path = paths.mon.path(cluster, hostname)
+
+    if conn.remote_module.path_exists(path):
+        # remove from cluster
+        process.run(
+            conn,
+            [
+                'ceph',
+                '--cluster={cluster}'.format(cluster=cluster),
+                '-n', 'mon.',
+                '-k', '{path}/keyring'.format(path=path),
+                'mon',
+                'remove',
+                hostname,
+            ],
+            timeout=7,
+        )
+
+        # stop
+        if conn.remote_module.path_exists(os.path.join(path, 'upstart')):
+            status_args = [
+                'initctl',
+                'status',
+                'ceph-mon',
+                'cluster={cluster}'.format(cluster=cluster),
+                'id={hostname}'.format(hostname=hostname),
+            ]
+
+        elif conn.remote_module.path_exists(os.path.join(path, 'sysvinit')):
+            status_args = [
+                'service',
+                'ceph',
+                'status',
+                'mon.{hostname}'.format(hostname=hostname),
+            ]
+
+        while retries:
+            conn.logger.info('polling the daemon to verify it stopped')
+            if is_running(conn, status_args):
+                time.sleep(5)
+                retries -= 1
+                if retries <= 0:
+                    raise RuntimeError('ceph-mon deamon did not stop')
+            else:
+                break
+
+        # archive old monitor directory
+        fn = '{cluster}-{hostname}-{stamp}'.format(
+            hostname=hostname,
+            cluster=cluster,
+            stamp=datetime.datetime.utcnow().strftime("%Y-%m-%dZ%H:%M:%S"),
+            )
+
+        process.run(
+            conn,
+            [
+                'mkdir',
+                '-p',
+                '/var/lib/ceph/mon-removed',
+            ],
+        )
+
+        conn.remote_module.make_mon_removed_dir(path, fn)
+
+
+def mon_destroy(args):
+    errors = 0
+    for (name, host) in mon_hosts(args.mon):
+        try:
+            LOG.debug('Removing mon from %s', name)
+
+            # TODO username
+            distro = hosts.get(host)
+            hostname = distro.conn.remote_module.shortname()
+
+            destroy_mon(
+                distro.conn,
+                args.cluster,
+                hostname,
+            )
+            distro.conn.exit()
+
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to destroy %d monitors' % errors)
+
+
+def mon_create_initial(args):
+    cfg = conf.load(args)
+    cfg_initial_members = cfg.safe_get('global', 'mon_initial_members')
+    if cfg_initial_members is None:
+        raise RuntimeError('No `mon initial members` defined in config')
+    mon_initial_members = re.split(r'[,\s]+', cfg_initial_members)
+
+    # create them normally through mon_create
+    mon_create(args)
+
+    # make the sets to be able to compare late
+    mon_in_quorum = set([])
+    mon_members = set([host for host in mon_initial_members])
+
+    for host in mon_initial_members:
+        mon_name = 'mon.%s' % host
+        LOG.info('processing monitor %s', mon_name)
+        sleeps = [20, 20, 15, 10, 10, 5]
+        tries = 5
+        rlogger = logging.getLogger(host)
+        rconn = get_connection(host, logger=rlogger)
+        while tries:
+            status = mon_status_check(rconn, rlogger, host)
+            has_reached_quorum = status.get('state', '') in ['peon', 'leader']
+            if not has_reached_quorum:
+                LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries))
+                tries -= 1
+                sleep_seconds = sleeps.pop()
+                LOG.warning('waiting %s seconds before retrying', sleep_seconds)
+                time.sleep(sleep_seconds)  # Magic number
+            else:
+                mon_in_quorum.add(host)
+                LOG.info('%s monitor has reached quorum!', mon_name)
+                break
+        rconn.exit()
+
+    if mon_in_quorum == mon_members:
+        LOG.info('all initial monitors are running and have formed quorum')
+        LOG.info('Running gatherkeys...')
+        gatherkeys.gatherkeys(args)
+    else:
+        LOG.error('Some monitors have still not reached quorum:')
+        for host in mon_members - mon_in_quorum:
+            LOG.error('%s', host)
+
+
+def mon(args):
+    if args.subcommand == 'create':
+        mon_create(args)
+    elif args.subcommand == 'destroy':
+        mon_destroy(args)
+    elif args.subcommand == 'create-initial':
+        mon_create_initial(args)
+    else:
+        LOG.error('subcommand %s not implemented', args.subcommand)
+
+
+@priority(30)
+def make(parser):
+    """
+    Deploy ceph monitor on remote hosts.
+    """
+    sub_command_help = dedent("""
+    Subcommands:
+
+    create-initial
+      Will deploy for monitors defined in `mon initial members`, wait until
+      they form quorum and then gatherkeys, reporting the monitor status along
+      the process. If monitors don't form quorum the command will eventually
+      time out.
+
+    create
+      Deploy monitors by specifying them like:
+
+        ceph-deploy mon create node1 node2 node3
+
+      If no hosts are passed it will default to use the `mon initial members`
+      defined in the configuration.
+
+    destroy
+      Completely remove monitors on a remote host. Requires hostname(s) as
+      arguments.
+    """)
+    parser.formatter_class = argparse.RawDescriptionHelpFormatter
+    parser.description = sub_command_help
+
+    parser.add_argument(
+        'subcommand',
+        choices=[
+            'create',
+            'create-initial',
+            'destroy',
+            ],
+        )
+    parser.add_argument(
+        'mon',
+        nargs='*',
+        )
+    parser.set_defaults(
+        func=mon,
+        )
+
+#
+# Helpers
+#
+
+
+def is_running(conn, args):
+    """
+    Run a command to check the status of a mon, return a boolean.
+
+    We heavily depend on the format of the output, if that ever changes
+    we need to modify this.
+    Check daemon status for 3 times
+    output of the status should be similar to::
+
+        mon.mira094: running {"version":"0.61.5"}
+
+    or when it fails::
+
+        mon.mira094: dead {"version":"0.61.5"}
+        mon.mira094: not running {"version":"0.61.5"}
+    """
+    stdout, stderr, _ = process.check(
+        conn,
+        args
+    )
+    result_string = ' '.join(stdout)
+    for run_check in [': running', ' start/running']:
+        if run_check in result_string:
+            return True
+    return False
diff --git a/ceph_deploy/new.py b/ceph_deploy/new.py
new file mode 100644 (file)
index 0000000..33d2b6e
--- /dev/null
@@ -0,0 +1,133 @@
+import errno
+import logging
+import os
+import uuid
+import struct
+import time
+import base64
+import socket
+
+from . import exc
+from .cliutil import priority
+from .conf import CephConf
+from .util import arg_validators
+from .misc import mon_hosts
+
+
+LOG = logging.getLogger(__name__)
+
+
+def generate_auth_key():
+    key = os.urandom(16)
+    header = struct.pack('<hiih',
+                1,               # le16 type: CEPH_CRYPTO_AES
+                int(time.time()),  # le32 created: seconds
+                0,               # le32 created: nanoseconds,
+                len(key),        # le16: len(key)
+                )
+    return base64.b64encode(header + key)
+
+def get_nonlocal_ip(host):
+    """
+    Search result of getaddrinfo() for a non-localhost-net address
+    """
+    ailist = socket.getaddrinfo(host, None)
+    for ai in ailist:
+        # an ai is a 5-tuple; the last element is (ip, port)
+        ip = ai[4][0]
+        if not ip.startswith('127.'):
+            return ip
+    raise exc.UnableToResolveError(host)
+
+
+def new(args):
+    LOG.debug('Creating new cluster named %s', args.cluster)
+    cfg = CephConf()
+    cfg.add_section('global')
+
+    fsid = uuid.uuid4()
+    cfg.set('global', 'fsid', str(fsid))
+
+    mon_initial_members = []
+    mon_host = []
+
+    for (name, host) in mon_hosts(args.mon):
+        LOG.debug('Resolving host %s', host)
+        ip = None
+        ip = get_nonlocal_ip(host)
+        LOG.debug('Monitor %s at %s', name, ip)
+        mon_initial_members.append(name)
+        mon_host.append(ip)
+
+    LOG.debug('Monitor initial members are %s', mon_initial_members)
+    LOG.debug('Monitor addrs are %s', mon_host)
+
+    cfg.set('global', 'mon initial members', ', '.join(mon_initial_members))
+    # no spaces here, see http://tracker.newdream.net/issues/3145
+    cfg.set('global', 'mon host', ','.join(mon_host))
+
+    # override undesirable defaults, needed until bobtail
+
+    # http://tracker.newdream.net/issues/3136
+    cfg.set('global', 'auth supported', 'cephx')
+
+    # http://tracker.newdream.net/issues/3137
+    cfg.set('global', 'osd journal size', '1024')
+
+    # http://tracker.newdream.net/issues/3138
+    cfg.set('global', 'filestore xattr use omap', 'true')
+
+    path = '{name}.conf'.format(
+        name=args.cluster,
+        )
+
+    # FIXME: create a random key
+    LOG.debug('Creating a random mon key...')
+    mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key()
+
+    keypath = '{name}.mon.keyring'.format(
+        name=args.cluster,
+        )
+
+    LOG.debug('Writing initial config to %s...', path)
+    if not args.dry_run:
+        tmp = '%s.tmp' % path
+        with file(tmp, 'w') as f:
+            cfg.write(f)
+        try:
+            os.rename(tmp, path)
+        except OSError as e:
+            if e.errno == errno.EEXIST:
+                raise exc.ClusterExistsError(path)
+            else:
+                raise
+
+    LOG.debug('Writing monitor keyring to %s...', keypath)
+    if not args.dry_run:
+        tmp = '%s.tmp' % keypath
+        with file(tmp, 'w') as f:
+            f.write(mon_keyring)
+        try:
+            os.rename(tmp, keypath)
+        except OSError as e:
+            if e.errno == errno.EEXIST:
+                raise exc.ClusterExistsError(keypath)
+            else:
+                raise
+
+
+@priority(10)
+def make(parser):
+    """
+    Start deploying a new cluster, and write a CLUSTER.conf and keyring for it.
+    """
+    parser.add_argument(
+        'mon',
+        metavar='MON',
+        nargs='+',
+        help='initial monitor hostname, fqdn, or hostname:fqdn pair',
+        type=arg_validators.Hostname(),
+        )
+    parser.set_defaults(
+        func=new,
+        )
diff --git a/ceph_deploy/osd.py b/ceph_deploy/osd.py
new file mode 100644 (file)
index 0000000..9b939ce
--- /dev/null
@@ -0,0 +1,438 @@
+import argparse
+import logging
+import os
+import sys
+from textwrap import dedent
+
+from cStringIO import StringIO
+
+from . import conf
+from . import exc
+from . import hosts
+from .cliutil import priority
+from .lib.remoto import process
+
+
+LOG = logging.getLogger(__name__)
+
+
+def get_bootstrap_osd_key(cluster):
+    """
+    Read the bootstrap-osd key for `cluster`.
+    """
+    path = '{cluster}.bootstrap-osd.keyring'.format(cluster=cluster)
+    try:
+        with file(path, 'rb') as f:
+            return f.read()
+    except IOError:
+        raise RuntimeError('bootstrap-osd keyring not found; run \'gatherkeys\'')
+
+
+def create_osd(conn, cluster, key):
+    """
+    Run on osd node, writes the bootstrap key if not there yet.
+    """
+    logger = conn.logger
+    path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring'.format(
+        cluster=cluster,
+        )
+    if not conn.remote_module.path_exists(path):
+        logger.info('osd keyring does not exist yet, creating one')
+        conn.remote_module.write_keyring(path, key)
+
+    return process.run(
+        conn,
+        [
+            'udevadm',
+            'trigger',
+            '--subsystem-match=block',
+            '--action=add',
+        ],
+    )
+
+
+def prepare_disk(
+        conn,
+        cluster,
+        disk,
+        journal,
+        activate_prepared_disk,
+        zap,
+        fs_type,
+        dmcrypt,
+        dmcrypt_dir):
+    """
+    Run on osd node, prepares a data disk for use.
+    """
+    args = [
+        'ceph-disk-prepare',
+        ]
+    if zap:
+        args.append('--zap-disk')
+    if fs_type:
+        if fs_type not in ('btrfs', 'ext4', 'xfs'):
+            raise argparse.ArgumentTypeError(
+                "FS_TYPE must be one of 'btrfs', 'ext4' or 'xfs'")
+        args.extend(['--fs-type', fs_type])
+    if dmcrypt:
+        args.append('--dmcrypt')
+        if dmcrypt_dir is not None:
+            args.append('--dmcrypt-key-dir')
+            args.append(dmcrypt_dir)
+    args.extend([
+        '--cluster',
+        cluster,
+        '--',
+        disk,
+    ])
+
+    if journal is not None:
+        args.append(journal)
+
+    process.run(
+        conn,
+        args
+    )
+
+    if activate_prepared_disk:
+        return process.run(
+            conn,
+            [
+                'udevadm',
+                'trigger',
+                '--subsystem-match=block',
+                '--action=add',
+            ],
+        )
+
+
+def prepare(args, cfg, activate_prepared_disk):
+    LOG.debug(
+        'Preparing cluster %s disks %s',
+        args.cluster,
+        ' '.join(':'.join(x or '' for x in t) for t in args.disk),
+        )
+
+    key = get_bootstrap_osd_key(cluster=args.cluster)
+
+    bootstrapped = set()
+    errors = 0
+    for hostname, disk, journal in args.disk:
+        try:
+            if disk is None:
+                raise exc.NeedDiskError(hostname)
+
+            # TODO username
+            distro = hosts.get(hostname)
+            LOG.info(
+                'Distro info: %s %s %s',
+                distro.name,
+                distro.release,
+                distro.codename
+            )
+
+            if hostname not in bootstrapped:
+                bootstrapped.add(hostname)
+                LOG.debug('Deploying osd to %s', hostname)
+
+                conf_data = StringIO()
+                cfg.write(conf_data)
+                distro.conn.remote_module.write_conf(
+                    args.cluster,
+                    conf_data.getvalue(),
+                    args.overwrite_conf
+                )
+
+                create_osd(distro.conn, args.cluster, key)
+
+            LOG.debug('Preparing host %s disk %s journal %s activate %s',
+                      hostname, disk, journal, activate_prepared_disk)
+
+            prepare_disk(
+                distro.conn,
+                cluster=args.cluster,
+                disk=disk,
+                journal=journal,
+                activate_prepared_disk=activate_prepared_disk,
+                zap=args.zap_disk,
+                fs_type=args.fs_type,
+                dmcrypt=args.dmcrypt,
+                dmcrypt_dir=args.dmcrypt_key_dir,
+            )
+
+            LOG.debug('Host %s is now ready for osd use.', hostname)
+            distro.conn.exit()
+
+        except RuntimeError as e:
+            LOG.error(e)
+            errors += 1
+
+    if errors:
+        raise exc.GenericError('Failed to create %d OSDs' % errors)
+
+
+def activate(args, cfg):
+    LOG.debug(
+        'Activating cluster %s disks %s',
+        args.cluster,
+        # join elements of t with ':', t's with ' '
+        # allow None in elements of t; print as empty
+        ' '.join(':'.join((s or '') for s in t) for t in args.disk),
+        )
+
+    for hostname, disk, journal in args.disk:
+
+        # TODO username
+        distro = hosts.get(hostname)
+        LOG.info(
+            'Distro info: %s %s %s',
+            distro.name,
+            distro.release,
+            distro.codename
+        )
+
+        LOG.debug('activating host %s disk %s', hostname, disk)
+        LOG.debug('will use init type: %s', distro.init)
+
+        process.run(
+            distro.conn,
+            [
+                'ceph-disk-activate',
+                '--mark-init',
+                distro.init,
+                '--mount',
+                disk,
+            ],
+        )
+
+        distro.conn.exit()
+
+
+def disk_zap(args):
+    cfg = conf.load(args)
+
+    for hostname, disk, journal in args.disk:
+        if not disk or not hostname:
+            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
+        LOG.debug('zapping %s on %s', disk, hostname)
+        distro = hosts.get(hostname)
+        LOG.info(
+            'Distro info: %s %s %s',
+            distro.name,
+            distro.release,
+            distro.codename
+        )
+
+        # NOTE: this mirrors ceph-disk-prepare --zap-disk DEV
+        # zero the device
+        distro.conn.remote_module.zeroing(disk)
+
+        process.run(
+            distro.conn,
+            [
+                'sgdisk',
+                '--zap-all',
+                '--clear',
+                '--mbrtogpt',
+                '--',
+                disk,
+            ],
+        )
+        distro.conn.exit()
+
+
+def disk_list(args, cfg):
+    for hostname, disk, journal in args.disk:
+        distro = hosts.get(hostname)
+        LOG.info(
+            'Distro info: %s %s %s',
+            distro.name,
+            distro.release,
+            distro.codename
+        )
+
+        # TODO username
+        LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
+        process.run(
+            distro.conn,
+            [
+                'ceph-disk',
+                'list',
+            ],
+        )
+        distro.conn.exit()
+
+
+def osd_list(args, cfg):
+    LOG.error('Not yet implemented; see http://tracker.ceph.com/issues/5071')
+    sys.exit(1)
+
+
+def osd(args):
+    cfg = conf.load(args)
+
+    if args.subcommand == 'list':
+        osd_list(args, cfg)
+    elif args.subcommand == 'prepare':
+        prepare(args, cfg, activate_prepared_disk=False)
+    elif args.subcommand == 'create':
+        prepare(args, cfg, activate_prepared_disk=True)
+    elif args.subcommand == 'activate':
+        activate(args, cfg)
+    else:
+        LOG.error('subcommand %s not implemented', args.subcommand)
+        sys.exit(1)
+
+
+def disk(args):
+    cfg = conf.load(args)
+
+    if args.subcommand == 'list':
+        disk_list(args, cfg)
+    elif args.subcommand == 'prepare':
+        prepare(args, cfg, activate_prepared_disk=False)
+    elif args.subcommand == 'activate':
+        activate(args, cfg)
+    elif args.subcommand == 'zap':
+        disk_zap(args)
+    else:
+        LOG.error('subcommand %s not implemented', args.subcommand)
+        sys.exit(1)
+
+
+def colon_separated(s):
+    journal = None
+    disk = None
+    host = None
+    if s.count(':') == 2:
+        (host, disk, journal) = s.split(':')
+    elif s.count(':') == 1:
+        (host, disk) = s.split(':')
+    elif s.count(':') == 0:
+        (host) = s
+    else:
+        raise argparse.ArgumentTypeError('must be in form HOST:DISK[:JOURNAL]')
+
+    if disk:
+        # allow just "sdb" to mean /dev/sdb
+        disk = os.path.join('/dev', disk)
+        if journal is not None:
+            journal = os.path.join('/dev', journal)
+
+    return (host, disk, journal)
+
+
+@priority(50)
+def make(parser):
+    """
+    Prepare a data disk on remote host.
+    """
+    sub_command_help = dedent("""
+    Manage OSDs by preparing a data disk on remote host.
+
+    For paths, first prepare and then activate:
+
+        ceph-deploy osd prepare {osd-node-name}:/path/to/osd
+        ceph-deploy osd activate {osd-node-name}:/path/to/osd
+
+    For disks or journals the `create` command will do prepare and activate
+    for you.
+    """
+    )
+    parser.formatter_class = argparse.RawDescriptionHelpFormatter
+    parser.description = sub_command_help
+
+    parser.add_argument(
+        'subcommand',
+        metavar='SUBCOMMAND',
+        choices=[
+            'list',
+            'create',
+            'prepare',
+            'activate',
+            'destroy',
+            ],
+        help='list, create (prepare+activate), prepare, activate, or destroy',
+        )
+    parser.add_argument(
+        'disk',
+        nargs='+',
+        metavar='HOST:DISK[:JOURNAL]',
+        type=colon_separated,
+        help='host and disk to prepare',
+        )
+    parser.add_argument(
+        '--zap-disk',
+        action='store_true', default=None,
+        help='destroy existing partition table and content for DISK',
+        )
+    parser.add_argument(
+        '--fs-type',
+        metavar='FS_TYPE',
+        default='xfs',
+        help='filesystem to use to format DISK (xfs, btrfs or ext4)',
+        )
+    parser.add_argument(
+        '--dmcrypt',
+        action='store_true', default=None,
+        help='use dm-crypt on DISK',
+        )
+    parser.add_argument(
+        '--dmcrypt-key-dir',
+        metavar='KEYDIR',
+        default='/etc/ceph/dmcrypt-keys',
+        help='directory where dm-crypt keys are stored',
+        )
+    parser.set_defaults(
+        func=osd,
+        )
+
+
+@priority(50)
+def make_disk(parser):
+    """
+    Manage disks on a remote host.
+    """
+    parser.add_argument(
+        'subcommand',
+        metavar='SUBCOMMAND',
+        choices=[
+            'list',
+            'prepare',
+            'activate',
+            'zap',
+            ],
+        help='list, prepare, activate, zap',
+        )
+    parser.add_argument(
+        'disk',
+        nargs='+',
+        metavar='HOST:DISK',
+        type=colon_separated,
+        help='host and disk (or path)',
+        )
+    parser.add_argument(
+        '--zap-disk',
+        action='store_true', default=None,
+        help='destroy existing partition table and content for DISK',
+        )
+    parser.add_argument(
+        '--fs-type',
+        metavar='FS_TYPE',
+        default='xfs',
+        help='filesystem to use to format DISK (xfs, btrfs or ext4)'
+        )
+    parser.add_argument(
+        '--dmcrypt',
+        action='store_true', default=None,
+        help='use dm-crypt on DISK',
+        )
+    parser.add_argument(
+        '--dmcrypt-key-dir',
+        metavar='KEYDIR',
+        default='/etc/ceph/dmcrypt-keys',
+        help='directory where dm-crypt keys are stored',
+        )
+    parser.set_defaults(
+        func=disk,
+        )
diff --git a/ceph_deploy/sudo_pushy.py b/ceph_deploy/sudo_pushy.py
new file mode 100644 (file)
index 0000000..ef57c98
--- /dev/null
@@ -0,0 +1,74 @@
+import getpass
+import logging
+import socket
+import pushy.transport.ssh
+import pushy.transport.local
+import subprocess
+
+from .misc import remote_shortname
+
+logger = logging.getLogger(__name__)
+
+
+class Local_Popen(pushy.transport.local.Popen):
+    def __init__(self, command, address, **kwargs):
+        pushy.transport.BaseTransport.__init__(self, address)
+
+        self.__proc = subprocess.Popen(command, stdin=subprocess.PIPE,
+                                       stdout=subprocess.PIPE,
+                                       stderr=subprocess.PIPE,
+                                       bufsize=65535)
+
+        self.stdout = self.__proc.stdout
+        self.stderr = self.__proc.stderr
+        self.stdin  = self.__proc.stdin
+
+    def close(self):
+        self.stdin.close()
+        self.__proc.wait()
+
+
+class SshSudoTransport(object):
+    @staticmethod
+    def Popen(command, *a, **kw):
+        command = ['sudo'] + command
+        return pushy.transport.ssh.Popen(command, *a, **kw)
+
+
+class LocalSudoTransport(object):
+    @staticmethod
+    def Popen(command, *a, **kw):
+        command = ['sudo'] + command
+        return Local_Popen(command, *a, **kw)
+
+
+def get_transport(hostname):
+    use_sudo = needs_sudo()
+    myhostname = remote_shortname(socket)
+    if hostname == myhostname:
+        if use_sudo:
+            logger.debug('will use a local connection with sudo')
+            return 'local+sudo:'
+        logger.debug('will use a local connection without sudo')
+        return 'local:'
+    else:
+        if use_sudo:
+            logger.debug('will use a remote connection with sudo')
+            return 'ssh+sudo:{hostname}'.format(hostname=hostname)
+        logger.debug('will use a remote connection without sudo')
+        return 'ssh:{hostname}'.format(hostname=hostname)
+
+
+def needs_sudo():
+    if getpass.getuser() == 'root':
+        return False
+    return True
+
+
+def patch():
+    """
+    Monkey patches pushy so it supports running via (passphraseless)
+    sudo on the remote host.
+    """
+    pushy.transports['ssh+sudo'] = SshSudoTransport
+    pushy.transports['local+sudo'] = LocalSudoTransport
diff --git a/ceph_deploy/tests/__init__.py b/ceph_deploy/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/ceph_deploy/tests/conftest.py b/ceph_deploy/tests/conftest.py
new file mode 100644 (file)
index 0000000..819fc34
--- /dev/null
@@ -0,0 +1,98 @@
+import logging
+import os
+import subprocess
+import sys
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _prepend_path(env):
+    """
+    Make sure the PATH contains the location where the Python binary
+    lives. This makes sure cli tools installed in a virtualenv work.
+    """
+    if env is None:
+        env = os.environ
+    env = dict(env)
+    new = os.path.dirname(sys.executable)
+    path = env.get('PATH')
+    if path is not None:
+        new = new + ':' + path
+    env['PATH'] = new
+    return env
+
+
+class CLIFailed(Exception):
+    """CLI tool failed"""
+
+    def __init__(self, args, status):
+        self.args = args
+        self.status = status
+
+    def __str__(self):
+        return '{doc}: {args}: exited with status {status}'.format(
+            doc=self.__doc__,
+            args=self.args,
+            status=self.status,
+            )
+
+
+class CLIProcess(object):
+    def __init__(self, **kw):
+        self.kw = kw
+
+    def __enter__(self):
+        try:
+            self.p = subprocess.Popen(**self.kw)
+        except OSError as e:
+            raise AssertionError(
+                'CLI tool {args!r} does not work: {err}'.format(
+                    args=self.kw['args'],
+                    err=e,
+                    ),
+                )
+        else:
+            return self.p
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.p.wait()
+        if self.p.returncode != 0:
+            err = CLIFailed(
+                args=self.kw['args'],
+                status=self.p.returncode,
+                )
+            if exc_type is None:
+                # nothing else raised, so we should complain; if
+                # something else failed, we'll just log
+                raise err
+            else:
+                LOG.error(str(err))
+
+
+class CLITester(object):
+    # provide easy way for caller to access the exception class
+    # without importing us
+    Failed = CLIFailed
+
+    def __init__(self, tmpdir):
+        self.tmpdir = tmpdir
+
+    def __call__(self, **kw):
+        kw.setdefault('cwd', str(self.tmpdir))
+        kw['env'] = _prepend_path(kw.get('env'))
+        kw['env']['COLUMNS'] = '80'
+        return CLIProcess(**kw)
+
+
+def pytest_funcarg__cli(request):
+    """
+    Test command line behavior.
+    """
+
+    # the tmpdir here will be the same value as the test function
+    # sees; we rely on that to let caller prepare and introspect
+    # any files the cli tool will read or create
+    tmpdir = request.getfuncargvalue('tmpdir')
+
+    return CLITester(tmpdir=tmpdir)
diff --git a/ceph_deploy/tests/directory.py b/ceph_deploy/tests/directory.py
new file mode 100644 (file)
index 0000000..81d3e19
--- /dev/null
@@ -0,0 +1,13 @@
+import contextlib
+import os
+
+
+@contextlib.contextmanager
+def directory(path):
+    prev = os.open('.', os.O_RDONLY | os.O_DIRECTORY)
+    try:
+        os.chdir(path)
+        yield
+    finally:
+        os.fchdir(prev)
+        os.close(prev)
diff --git a/ceph_deploy/tests/fakes.py b/ceph_deploy/tests/fakes.py
new file mode 100644 (file)
index 0000000..a96bcf7
--- /dev/null
@@ -0,0 +1,5 @@
+
+
+def fake_getaddrinfo(*a, **kw):
+    return_host = kw.get('return_host', 'host1')
+    return [[0,0,0,0, return_host]]
diff --git a/ceph_deploy/tests/test_cli.py b/ceph_deploy/tests/test_cli.py
new file mode 100644 (file)
index 0000000..9829ee5
--- /dev/null
@@ -0,0 +1,37 @@
+import pytest
+import subprocess
+
+
+def test_help(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', '--help'],
+        stdout=subprocess.PIPE,
+        ) as p:
+        result = p.stdout.read()
+        assert 'usage: ceph-deploy' in result
+        assert 'optional arguments:' in result
+        assert 'commands:' in result
+
+
+def test_bad_command(tmpdir, cli):
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'bork'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            result = p.stderr.read()
+    assert 'usage: ceph-deploy' in result
+    assert err.value.status == 2
+    assert [p.basename for p in tmpdir.listdir()] == []
+
+
+def test_bad_cluster(tmpdir, cli):
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', '--cluster=/evil-this-should-not-be-created', 'new'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            result = p.stderr.read()
+    assert 'usage: ceph-deploy' in result
+    assert err.value.status == 2
+    assert [p.basename for p in tmpdir.listdir()] == []
diff --git a/ceph_deploy/tests/test_cli_install.py b/ceph_deploy/tests/test_cli_install.py
new file mode 100644 (file)
index 0000000..ea24831
--- /dev/null
@@ -0,0 +1,65 @@
+import argparse
+import collections
+import mock
+import pytest
+import subprocess
+
+from ..cli import main
+from .. import install
+
+from .directory import directory
+
+
+def test_help(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', 'install', '--help'],
+        stdout=subprocess.PIPE,
+        ) as p:
+        result = p.stdout.read()
+    assert 'usage: ceph-deploy' in result
+    assert 'positional arguments:' in result
+    assert 'optional arguments:' in result
+
+
+def test_bad_no_host(tmpdir, cli):
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'install'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            result = p.stderr.read()
+    assert 'usage: ceph-deploy install' in result
+    assert 'too few arguments' in result
+    assert err.value.status == 2
+
+
+def test_simple(tmpdir):
+    ns = argparse.Namespace()
+    ns.pushy = mock.Mock()
+    conn = mock.NonCallableMock(name='PushyClient')
+    ns.pushy.return_value = conn
+
+    mock_compiled = collections.defaultdict(mock.Mock)
+    conn.compile.return_value = mock.Mock(return_value = ('Ubuntu', 'precise','cuttlefish'))
+    fake_get_release = mock.Mock(return_value = ('Ubuntu', 'precise','cuttlefish'))
+    fake_distro = mock.Mock(name='FakeDistro')
+    fake_distro.return_value = fake_distro
+
+    try:
+        with directory(str(tmpdir)):
+            with mock.patch('ceph_deploy.hosts.lsb.get_lsb_release', fake_get_release):
+                with mock.patch('ceph_deploy.hosts.pushy', ns.pushy):
+                    with mock.patch('ceph_deploy.hosts._get_distro', fake_distro):
+
+                        main(
+                            args=['-v', 'install', 'storehost1'],
+                            namespace=ns,
+                            )
+    except SystemExit as e:
+        raise AssertionError('Unexpected exit: %s', e)
+
+    connect_calls = ns.pushy.connect.call_args[0][0]
+    assert connect_calls == 'ssh+sudo:storehost1'
+    assert fake_distro.name == 'Ubuntu'
+    assert fake_distro.release == 'precise'
+    assert fake_distro.codename == 'cuttlefish'
diff --git a/ceph_deploy/tests/test_cli_mon.py b/ceph_deploy/tests/test_cli_mon.py
new file mode 100644 (file)
index 0000000..bcbc31c
--- /dev/null
@@ -0,0 +1,92 @@
+import argparse
+import collections
+import mock
+import pytest
+import subprocess
+
+from ..cli import main
+from .. import mon
+
+from .directory import directory
+from .fakes import fake_getaddrinfo
+
+def test_help(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', 'mon', '--help'],
+        stdout=subprocess.PIPE,
+        ) as p:
+        result = p.stdout.read()
+    assert 'usage: ceph-deploy' in result
+    assert 'positional arguments:' in result
+    assert 'optional arguments:' in result
+
+
+def test_bad_no_conf(tmpdir, cli):
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'mon'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            result = p.stderr.read()
+    assert 'usage: ceph-deploy' in result
+    assert 'too few arguments' in result
+    assert err.value.status == 2
+
+
+def test_bad_no_mon(tmpdir, cli):
+    with tmpdir.join('ceph.conf').open('w'):
+        pass
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'mon'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            result = p.stderr.read()
+    assert 'usage: ceph-deploy mon' in result
+    assert 'too few arguments' in result
+    assert err.value.status == 2
+
+
+def test_simple(tmpdir, capsys):
+    with tmpdir.join('ceph.conf').open('w') as f:
+        f.write("""\
+[global]
+fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0
+mon initial members = host1
+""")
+
+    ns = argparse.Namespace()
+    ns.pushy = mock.Mock()
+    conn = mock.NonCallableMock(name='PushyClient')
+    ns.pushy.return_value = conn
+
+    mock_compiled = collections.defaultdict(mock.Mock)
+    conn.compile.side_effect = mock_compiled.__getitem__
+
+    MON_SECRET = 'AQBWDj5QAP6LHhAAskVBnUkYHJ7eYREmKo5qKA=='
+
+    def _create_mon(cluster, get_monitor_secret):
+        secret = get_monitor_secret()
+        assert secret == MON_SECRET
+
+    try:
+        with mock.patch('ceph_deploy.new.socket.gethostbyname'):
+            with mock.patch('socket.getaddrinfo', fake_getaddrinfo):
+                with directory(str(tmpdir)):
+                    main(
+                        args=['-v', 'new', 'host1'],
+                        namespace=ns,
+                        )
+                    main(
+                        args=['-v', 'mon', 'create', 'host1'],
+                        namespace=ns,
+                        )
+    except SystemExit as e:
+        raise AssertionError('Unexpected exit: %s', e)
+    out, err = capsys.readouterr()
+    err = err.lower()
+    assert 'creating new cluster named ceph' in err
+    assert 'monitor host1 at h' in err
+    assert 'resolving host host1' in err
+    assert "monitor initial members are ['host1']" in err
+    assert "monitor addrs are ['h']" in err
diff --git a/ceph_deploy/tests/test_cli_new.py b/ceph_deploy/tests/test_cli_new.py
new file mode 100644 (file)
index 0000000..4e926ad
--- /dev/null
@@ -0,0 +1,77 @@
+import pytest
+from mock import patch
+import re
+import subprocess
+import uuid
+
+from .. import conf
+from ..cli import main
+from .directory import directory
+from .fakes import fake_getaddrinfo
+
+
+def test_help(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', 'new', '--help'],
+        stdout=subprocess.PIPE,
+        ) as p:
+        result = p.stdout.read()
+    assert 'usage: ceph-deploy new' in result
+    assert 'positional arguments' in result
+    assert 'optional arguments' in result
+
+
+def test_write_global_conf_section(tmpdir, cli):
+    with patch('ceph_deploy.new.socket.gethostbyname'):
+        with patch('ceph_deploy.new.socket.getaddrinfo', fake_getaddrinfo):
+            with directory(str(tmpdir)):
+                main(args=['new', 'host1'])
+    with tmpdir.join('ceph.conf').open() as f:
+        cfg = conf.parse(f)
+    assert cfg.sections() == ['global']
+
+
+def pytest_funcarg__newcfg(request):
+    tmpdir = request.getfuncargvalue('tmpdir')
+    cli = request.getfuncargvalue('cli')
+
+    def new(*args):
+        with patch('ceph_deploy.new.socket.gethostbyname'):
+            with patch('ceph_deploy.new.socket.getaddrinfo', fake_getaddrinfo):
+                with directory(str(tmpdir)):
+                    main( args=['new'] + list(args))
+                    with tmpdir.join('ceph.conf').open() as f:
+                        cfg = conf.parse(f)
+                    return cfg
+    return new
+
+
+def test_uuid(newcfg):
+    cfg = newcfg('host1')
+    fsid = cfg.get('global', 'fsid')
+    # make sure it's a valid uuid
+    uuid.UUID(hex=fsid)
+    # make sure it looks pretty, too
+    UUID_RE = re.compile(
+        r'^[0-9a-f]{8}-'
+        + r'[0-9a-f]{4}-'
+        # constant 4 here, we want to enforce randomness and not leak
+        # MACs or time
+        + r'4[0-9a-f]{3}-'
+        + r'[0-9a-f]{4}-'
+        + r'[0-9a-f]{12}$',
+        )
+    assert UUID_RE.match(fsid)
+
+
+def test_mons(newcfg):
+    cfg = newcfg('node01', 'node07', 'node34')
+    mon_initial_members = cfg.get('global', 'mon_initial_members')
+    assert mon_initial_members == 'node01, node07, node34'
+
+
+def test_defaults(newcfg):
+    cfg = newcfg('host1')
+    assert cfg.get('global', 'auth_supported') == 'cephx'
+    assert cfg.get('global', 'osd_journal_size') == '1024'
+    assert cfg.get('global', 'filestore_xattr_use_omap') == 'true'
diff --git a/ceph_deploy/tests/test_cli_osd.py b/ceph_deploy/tests/test_cli_osd.py
new file mode 100644 (file)
index 0000000..25045af
--- /dev/null
@@ -0,0 +1,104 @@
+import argparse
+import collections
+import mock
+import pytest
+import subprocess
+
+from ..cli import main
+from .. import osd
+
+from .directory import directory
+
+
+def test_help(tmpdir, cli):
+    with cli(
+        args=['ceph-deploy', 'osd', '--help'],
+        stdout=subprocess.PIPE,
+        ) as p:
+        result = p.stdout.read()
+    assert 'usage: ceph-deploy osd' in result
+    assert 'positional arguments' in result
+    assert 'optional arguments' in result
+
+
+def test_bad_no_conf(tmpdir, cli):
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'osd', 'fakehost:/does-not-exist'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            result = p.stderr.read()
+    assert 'ceph-deploy osd: error' in result
+    assert 'invalid choice' in result
+    assert err.value.status == 2
+
+
+def test_bad_no_disk(tmpdir, cli):
+    with tmpdir.join('ceph.conf').open('w'):
+        pass
+    with pytest.raises(cli.Failed) as err:
+        with cli(
+            args=['ceph-deploy', 'osd'],
+            stderr=subprocess.PIPE,
+            ) as p:
+            result = p.stderr.read()
+    assert 'usage: ceph-deploy osd' in result
+    assert err.value.status == 2
+
+
+def test_simple(tmpdir, capsys):
+    with tmpdir.join('ceph.conf').open('w') as f:
+        f.write("""\
+[global]
+fsid = 6ede5564-3cf1-44b5-aa96-1c77b0c3e1d0
+mon host = host1
+""")
+
+    ns = argparse.Namespace()
+
+    conn_osd = mock.NonCallableMock(name='PushyClient-osd')
+    mock_compiled_osd = collections.defaultdict(mock.Mock)
+    #conn_osd.compile.side_effect = mock_compiled_osd.__getitem__
+    conn_osd.compile.return_value = mock.Mock(return_value='fakekeyring')
+
+    conn_mon = mock.NonCallableMock(name='PushyClient-mon')
+    mock_compiled_mon = collections.defaultdict(mock.Mock)
+    conn_mon.compile.side_effect = mock_compiled_mon.__getitem__
+
+    ns.pushy = mock.Mock(name='pushy namespace')
+
+    def _conn(url):
+        if url == 'ssh+sudo:host1':
+            return conn_mon
+        elif url == 'ssh+sudo:storehost1:sdc':
+            return conn_osd
+        else:
+            raise AssertionError('Unexpected connection url: %r', url)
+    ns.pushy.side_effect = _conn
+
+    BOOTSTRAP_KEY = 'fakekeyring'
+
+    mock_compiled_mon[osd.get_bootstrap_osd_key].side_effect = BOOTSTRAP_KEY
+
+    def _create_osd(cluster, find_key):
+        key = find_key()
+        assert key == BOOTSTRAP_KEY
+
+    mock_compiled_osd[osd.create_osd].side_effect = _create_osd
+
+    with directory(str(tmpdir)):
+        main(
+            args=['-v', 'gatherkeys', 'storehost1:sdc'],
+            namespace=ns,
+        )
+        main(
+            args=['-v', 'osd', 'prepare', 'storehost1:sdc'],
+            namespace=ns,
+            )
+    out, err = capsys.readouterr()
+    err = err.lower()
+    assert 'have ceph.mon.keyring' in err
+    assert 'have ceph.client.admin.keyring' in err
+    assert 'have ceph.bootstrap-osd.keyring' in err
+    assert 'got ceph.bootstrap-mds.keyring key from storehost1:sdc' in err
+    assert 'got ceph.bootstrap-osd.keyring key from storehost1:sdc' in err
diff --git a/ceph_deploy/tests/test_conf.py b/ceph_deploy/tests/test_conf.py
new file mode 100644 (file)
index 0000000..79796c6
--- /dev/null
@@ -0,0 +1,68 @@
+from cStringIO import StringIO
+from .. import conf
+
+
+def test_simple():
+    f = StringIO("""\
+[foo]
+bar = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar') == 'baz'
+
+
+def test_indent_space():
+    f = StringIO("""\
+[foo]
+        bar = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar') == 'baz'
+
+
+def test_indent_tab():
+    f = StringIO("""\
+[foo]
+\tbar = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar') == 'baz'
+
+
+def test_words_underscore():
+    f = StringIO("""\
+[foo]
+bar_thud = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar_thud') == 'baz'
+    assert cfg.get('foo', 'bar thud') == 'baz'
+
+
+def test_words_space():
+    f = StringIO("""\
+[foo]
+bar thud = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar_thud') == 'baz'
+    assert cfg.get('foo', 'bar thud') == 'baz'
+
+
+def test_words_many():
+    f = StringIO("""\
+[foo]
+bar__ thud   quux = baz
+""")
+    cfg = conf.parse(f)
+    assert cfg.get('foo', 'bar_thud_quux') == 'baz'
+    assert cfg.get('foo', 'bar thud quux') == 'baz'
+
+def test_write_words_underscore():
+    cfg = conf.CephConf()
+    cfg.add_section('foo')
+    cfg.set('foo', 'bar thud quux', 'baz')
+    f = StringIO()
+    cfg.write(f)
+    f.reset()
+    assert f.readlines() == ['[foo]\n', 'bar_thud_quux = baz\n','\n']
diff --git a/ceph_deploy/tests/test_mon.py b/ceph_deploy/tests/test_mon.py
new file mode 100644 (file)
index 0000000..35a1298
--- /dev/null
@@ -0,0 +1,64 @@
+from ceph_deploy import mon
+from ceph_deploy.conf import CephConf
+from mock import Mock
+
+
+def make_fake_conf():
+    return CephConf()
+
+# NOTE: If at some point we re-use this helper, move it out
+# and make it even more generic
+
+def make_fake_conn(receive_returns=None):
+    receive_returns = receive_returns or (['{}'], '', 0)
+    conn = Mock()
+    conn.return_value = conn
+    conn.execute = conn
+    conn.receive = Mock(return_value=receive_returns)
+    conn.result = Mock(return_value=conn)
+    return conn
+
+
+class TestCatchCommonErrors(object):
+
+    def setup(self):
+        self.logger = Mock()
+
+    def assert_logger_message(self, logger, msg):
+        calls = logger.call_args_list
+        for log_call in calls:
+            if msg in log_call[0][0]:
+                return True
+        raise AssertionError('"%s" was not found in any of %s' % (msg, calls))
+
+    def test_warn_if_no_intial_members(self):
+        fake_conn = make_fake_conn()
+        cfg = make_fake_conf()
+        mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg)
+        expected_msg = 'is not defined in `mon initial members`'
+        self.assert_logger_message(self.logger.warning, expected_msg)
+
+    def test_warn_if_host_not_in_intial_members(self):
+        fake_conn = make_fake_conn()
+        cfg = make_fake_conf()
+        cfg.add_section('global')
+        cfg.set('global', 'mon initial members', 'AAAA')
+        mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg)
+        expected_msg = 'is not defined in `mon initial members`'
+        self.assert_logger_message(self.logger.warning, expected_msg)
+
+
+    def test_warn_if_not_mon_in_monmap(self):
+        fake_conn = make_fake_conn()
+        cfg = make_fake_conf()
+        mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg)
+        expected_msg = 'does not exist in monmap'
+        self.assert_logger_message(self.logger.warning, expected_msg)
+
+    def test_warn_if_not_public_addr_and_not_public_netw(self):
+        fake_conn = make_fake_conn()
+        cfg = make_fake_conf()
+        cfg.add_section('global')
+        mon.catch_mon_errors(fake_conn, self.logger, 'host', cfg)
+        expected_msg = 'neither `public_addr` nor `public_network`'
+        self.assert_logger_message(self.logger.warning, expected_msg)
diff --git a/ceph_deploy/tests/unit/hosts/test_hosts.py b/ceph_deploy/tests/unit/hosts/test_hosts.py
new file mode 100644 (file)
index 0000000..ac087c0
--- /dev/null
@@ -0,0 +1,55 @@
+from pytest import raises
+
+from ceph_deploy import exc
+from ceph_deploy import hosts
+
+
+class TestNormalized(object):
+
+    def test_get_debian(self):
+        result = hosts._normalized_distro_name('Debian')
+        assert result == 'debian'
+
+    def test_get_ubuntu(self):
+        result = hosts._normalized_distro_name('Ubuntu')
+        assert result == 'ubuntu'
+
+    def test_get_suse(self):
+        result = hosts._normalized_distro_name('SUSE LINUX')
+        assert result == 'suse'
+
+    def test_get_redhat(self):
+        result = hosts._normalized_distro_name('RedHatEnterpriseLinux')
+        assert result == 'redhat'
+
+
+class TestGetDistro(object):
+
+    def test_get_debian(self):
+        result = hosts._get_distro('Debian')
+        assert result.__name__.endswith('debian')
+
+    def test_get_ubuntu(self):
+        # Ubuntu imports debian stuff
+        result = hosts._get_distro('Ubuntu')
+        assert result.__name__.endswith('debian')
+
+    def test_get_centos(self):
+        result = hosts._get_distro('CentOS')
+        assert result.__name__.endswith('centos')
+
+    def test_get_scientific(self):
+        result = hosts._get_distro('Scientific')
+        assert result.__name__.endswith('centos')
+
+    def test_get_redhat(self):
+        result = hosts._get_distro('RedHat')
+        assert result.__name__.endswith('centos')
+
+    def test_get_uknown(self):
+        with raises(exc.UnsupportedPlatform):
+            hosts._get_distro('Solaris')
+
+    def test_get_fallback(self):
+        result = hosts._get_distro('Solaris', 'Debian')
+        assert result.__name__.endswith('debian')
diff --git a/ceph_deploy/tests/unit/test_mon.py b/ceph_deploy/tests/unit/test_mon.py
new file mode 100644 (file)
index 0000000..7ffb5ba
--- /dev/null
@@ -0,0 +1,190 @@
+import sys
+from mock import Mock, MagicMock, patch, call
+from ceph_deploy import mon
+from ceph_deploy.hosts.common import mon_create
+from ceph_deploy.misc import mon_hosts, remote_shortname
+
+
+def path_exists(target_paths=None):
+    """
+    A quick helper that enforces a check for the existence of a path. Since we
+    are dealing with fakes, we allow to pass in a list of paths that are OK to
+    return True, otherwise return False.
+    """
+    target_paths = target_paths or []
+
+    def exists(path):
+        return path in target_paths
+    return exists
+
+
+def mock_open(mock=None, data=None):
+    """
+    Fake the behavior of `open` when used as a context manager
+    """
+    if mock is None:
+        mock = MagicMock(spec=file)
+
+    handle = MagicMock(spec=file)
+    handle.write.return_value = None
+    if data is None:
+        handle.__enter__.return_value = handle
+    else:
+        handle.__enter__.return_value = data
+    mock.return_value = handle
+    return mock
+
+
+class TestCreateMon(object):
+
+    def setup(self):
+        # this setup is way more verbose than normal
+        # but we are forced to because this function needs a lot
+        # passed in for remote execution. No other way around it.
+        self.socket = Mock()
+        self.socket.gethostname.return_value = 'hostname'
+        self.fake_write = Mock(name='fake_write')
+        self.fake_file = mock_open(data=self.fake_write)
+        self.fake_file.readline.return_value = self.fake_file
+        self.fake_file.readline.lstrip.return_value = ''
+        self.distro = Mock()
+        self.sprocess = Mock()
+        self.paths = Mock()
+        self.paths.mon.path = Mock(return_value='/cluster-hostname')
+        self.logger = Mock()
+        self.logger.info = self.logger.debug = lambda x: sys.stdout.write(str(x) + "\n")
+
+    def test_create_mon_tmp_path_if_nonexistent(self):
+        self.distro.sudo_conn.modules.os.path.exists = Mock(
+            side_effect=path_exists(['/cluster-hostname']))
+        self.paths.mon.constants.tmp_path = '/var/lib/ceph/tmp'
+        args = Mock(return_value=['cluster', '1234', 'initd'])
+        args.cluster = 'cluster'
+        with patch('ceph_deploy.hosts.common.conf.load'):
+            mon_create(self.distro, self.logger, args, Mock(), 'hostname')
+
+        result = self.distro.sudo_conn.modules.os.makedirs.call_args_list[-1]
+        assert result == call('/var/lib/ceph/tmp')
+
+    def test_create_mon_path_if_nonexistent(self):
+        self.distro.sudo_conn.modules.os.path.exists = Mock(
+            side_effect=path_exists(['/']))
+        args = Mock(return_value=['cluster', '1234', 'initd'])
+        args.cluster = 'cluster'
+        with patch('ceph_deploy.hosts.common.conf.load'):
+            mon_create(self.distro, self.logger, args, Mock(), 'hostname')
+
+        result = self.distro.sudo_conn.modules.os.makedirs.call_args_list[0]
+        assert result == call('/var/lib/ceph/mon/cluster-hostname')
+
+    def test_write_keyring(self):
+        self.distro.sudo_conn.modules.os.path.exists = Mock(
+            side_effect=path_exists(['/']))
+        args = Mock(return_value=['cluster', '1234', 'initd'])
+        args.cluster = 'cluster'
+        with patch('ceph_deploy.hosts.common.conf.load'):
+            with patch('ceph_deploy.hosts.common.remote') as fake_remote:
+                mon_create(self.distro, self.logger, args, Mock(), 'hostname')
+
+        # the second argument to `remote()` should be the write func
+        result = fake_remote.call_args_list[1][0][-1].__name__
+        assert result == 'write_monitor_keyring'
+
+    def test_write_done_path(self):
+        self.distro.sudo_conn.modules.os.path.exists = Mock(
+            side_effect=path_exists(['/']))
+        args = Mock(return_value=['cluster', '1234', 'initd'])
+        args.cluster = 'cluster'
+
+        with patch('ceph_deploy.hosts.common.conf.load'):
+            with patch('ceph_deploy.hosts.common.remote') as fake_remote:
+                mon_create(self.distro, self.logger, args, Mock(), 'hostname')
+
+        # the second to last argument to `remote()` should be the done path
+        # write
+        result = fake_remote.call_args_list[-2][0][-1].__name__
+        assert result == 'create_done_path'
+
+    def test_write_init_path(self):
+        self.distro.sudo_conn.modules.os.path.exists = Mock(
+            side_effect=path_exists(['/']))
+        args = Mock(return_value=['cluster', '1234', 'initd'])
+        args.cluster = 'cluster'
+
+        with patch('ceph_deploy.hosts.common.conf.load'):
+            with patch('ceph_deploy.hosts.common.remote') as fake_remote:
+                mon_create(self.distro, self.logger, args, Mock(), 'hostname')
+
+        result = fake_remote.call_args_list[-1][0][-1].__name__
+        assert result == 'create_init_path'
+
+    def test_mon_hosts(self):
+        hosts = Mock()
+        for (name, host) in mon_hosts(('name1', 'name2.localdomain',
+                    'name3:1.2.3.6', 'name4:localhost.localdomain')):
+            hosts.get(name, host)
+
+        expected = [call.get('name1', 'name1'),
+                    call.get('name2', 'name2.localdomain'),
+                    call.get('name3', '1.2.3.6'),
+                    call.get('name4', 'localhost.localdomain')]
+        result = hosts.mock_calls
+        assert result == expected
+
+    def test_remote_shortname_fqdn(self):
+        socket = Mock()
+        socket.gethostname.return_value = 'host.f.q.d.n'
+        assert remote_shortname(socket) == 'host'
+
+    def test_remote_shortname_host(self):
+        socket = Mock()
+        socket.gethostname.return_value = 'host'
+        assert remote_shortname(socket) == 'host'
+
+class TestIsRunning(object):
+
+    def setup(self):
+        self.fake_popen = Mock()
+        self.fake_popen.return_value = self.fake_popen
+
+    def test_is_running_centos(self):
+        centos_out = ['', "mon.mire094: running {'version': '0.6.15'}"]
+        self.fake_popen.communicate = Mock(return_value=centos_out)
+        with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+            result = mon.is_running(['ceph', 'status'])
+        assert result is True
+
+    def test_is_not_running_centos(self):
+        centos_out = ['', "mon.mire094: not running {'version': '0.6.15'}"]
+        self.fake_popen.communicate = Mock(return_value=centos_out)
+        with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+            result = mon.is_running(['ceph', 'status'])
+        assert result is False
+
+    def test_is_dead_centos(self):
+        centos_out = ['', "mon.mire094: dead {'version': '0.6.15'}"]
+        self.fake_popen.communicate = Mock(return_value=centos_out)
+        with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+            result = mon.is_running(['ceph', 'status'])
+        assert result is False
+
+    def test_is_running_ubuntu(self):
+        ubuntu_out = ['', "ceph-mon (ceph/mira103) start/running, process 5866"]
+        self.fake_popen.communicate = Mock(return_value=ubuntu_out)
+        with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+            result = mon.is_running(['ceph', 'status'])
+        assert result is True
+
+    def test_is_not_running_ubuntu(self):
+        ubuntu_out = ['', "ceph-mon (ceph/mira103) start/dead, process 5866"]
+        self.fake_popen.communicate = Mock(return_value=ubuntu_out)
+        with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+            result = mon.is_running(['ceph', 'status'])
+        assert result is False
+
+    def test_is_dead_ubuntu(self):
+        ubuntu_out = ['', "ceph-mon (ceph/mira103) stop/not running, process 5866"]
+        self.fake_popen.communicate = Mock(return_value=ubuntu_out)
+        with patch('ceph_deploy.mon.subprocess.Popen', self.fake_popen):
+            result = mon.is_running(['ceph', 'status'])
+        assert result is False
diff --git a/ceph_deploy/tests/unit/util/test_arg_validators.py b/ceph_deploy/tests/unit/util/test_arg_validators.py
new file mode 100644 (file)
index 0000000..d9ef2a9
--- /dev/null
@@ -0,0 +1,83 @@
+import socket
+from mock import Mock
+from argparse import ArgumentError
+from pytest import raises
+
+from ceph_deploy.util import arg_validators
+
+
+class TestRegexMatch(object):
+
+    def test_match_raises(self):
+        validator = arg_validators.RegexMatch(r'\d+')
+        with raises(ArgumentError):
+            validator('1')
+
+    def test_match_passes(self):
+        validator = arg_validators.RegexMatch(r'\d+')
+        assert validator('foo') == 'foo'
+
+    def test_default_error_message(self):
+        validator = arg_validators.RegexMatch(r'\d+')
+        with raises(ArgumentError) as error:
+            validator('1')
+        message = error.value.message
+        assert message == 'must match pattern \d+'
+
+    def test_custom_error_message(self):
+        validator = arg_validators.RegexMatch(r'\d+', 'wat')
+        with raises(ArgumentError) as error:
+            validator('1')
+        message = error.value.message
+        assert message == 'wat'
+
+
+class TestHostName(object):
+
+    def setup(self):
+        self.fake_sock = Mock()
+        self.fake_sock.gaierror = socket.gaierror
+        self.fake_sock.error = socket.error
+        self.fake_sock.gethostbyname.side_effect = socket.gaierror
+        self.fake_sock.inet_aton.side_effect = socket.error
+
+    def test_hostname_is_not_resolvable(self):
+        hostname = arg_validators.Hostname(self.fake_sock)
+        with raises(ArgumentError) as error:
+            hostname('unresolvable')
+        message = error.value.message
+        assert 'is not resolvable' in message
+
+    def test_hostname_with_name_is_not_resolvable(self):
+        hostname = arg_validators.Hostname(self.fake_sock)
+        with raises(ArgumentError) as error:
+            hostname('name:foo')
+        message = error.value.message
+        assert 'foo is not resolvable' in message
+
+    def test_ip_is_allowed_when_paired_with_host(self):
+        self.fake_sock.gethostbyname = Mock(return_value='192.168.1.111')
+        hostname = arg_validators.Hostname(self.fake_sock)
+        result = hostname('name:192.168.1.111')
+        assert result == 'name:192.168.1.111'
+
+    def test_ipv6_is_allowed_when_paired_with_host(self):
+        self.fake_sock.gethostbyname = Mock(return_value='2001:0db8:85a3:0000:0000:8a2e:0370:7334')
+        hostname = arg_validators.Hostname(self.fake_sock)
+        result = hostname('name:2001:0db8:85a3:0000:0000:8a2e:0370:7334')
+        assert result == 'name:2001:0db8:85a3:0000:0000:8a2e:0370:7334'
+
+    def test_host_is_resolvable(self):
+        self.fake_sock.gethostbyname = Mock()
+        hostname = arg_validators.Hostname(self.fake_sock)
+        result = hostname('name:example.com')
+        assert result == 'name:example.com'
+
+    def test_hostname_must_be_an_ip(self):
+        self.fake_sock.gethostbyname = Mock()
+        self.fake_sock.inet_aton = Mock()
+        hostname = arg_validators.Hostname(self.fake_sock)
+        with raises(ArgumentError) as error:
+            hostname('0')
+        message = error.value.message
+        assert '0 must be a hostname' in message
diff --git a/ceph_deploy/tests/unit/util/test_constants.py b/ceph_deploy/tests/unit/util/test_constants.py
new file mode 100644 (file)
index 0000000..ce32a57
--- /dev/null
@@ -0,0 +1,16 @@
+from ceph_deploy.util import constants
+
+
+class TestPaths(object):
+
+    def test_mon_path(self):
+        assert constants.mon_path.startswith('/')
+        assert constants.mon_path.endswith('/mon')
+
+    def test_mds_path(self):
+        assert constants.mds_path.startswith('/')
+        assert constants.mds_path.endswith('/mds')
+
+    def test_tmp_path(self):
+        assert constants.tmp_path.startswith('/')
+        assert constants.tmp_path.endswith('/tmp')
diff --git a/ceph_deploy/tests/unit/util/test_paths.py b/ceph_deploy/tests/unit/util/test_paths.py
new file mode 100644 (file)
index 0000000..71bcc62
--- /dev/null
@@ -0,0 +1,29 @@
+from ceph_deploy.util import paths
+
+
+class TestMonPaths(object):
+
+    def test_base_path(self):
+        result = paths.mon.base('mycluster')
+        assert result.endswith('/mycluster-')
+
+    def test_path(self):
+        result = paths.mon.path('mycluster', 'myhostname')
+        assert result.startswith('/')
+        assert result.endswith('/mycluster-myhostname')
+
+    def test_done(self):
+        result = paths.mon.done('mycluster', 'myhostname')
+        assert result.startswith('/')
+        assert result.endswith('mycluster-myhostname/done')
+
+    def test_init(self):
+        result = paths.mon.init('mycluster', 'myhostname', 'init')
+        assert result.startswith('/')
+        assert result.endswith('mycluster-myhostname/init')
+
+    def test_keyring(self):
+        result = paths.mon.keyring('mycluster', 'myhostname')
+        assert result.startswith('/')
+        assert result.endswith('tmp/mycluster-myhostname.mon.keyring')
+
diff --git a/ceph_deploy/tests/unit/util/test_pkg_managers.py b/ceph_deploy/tests/unit/util/test_pkg_managers.py
new file mode 100644 (file)
index 0000000..518e549
--- /dev/null
@@ -0,0 +1,20 @@
+from mock import patch, Mock
+from ceph_deploy.util import pkg_managers
+
+
+class TestRPM(object):
+
+    def setup(self):
+        self.to_patch = 'ceph_deploy.util.pkg_managers.wrappers'
+
+    def test_extend_flags(self):
+        fake_check_call = Mock()
+        with patch(self.to_patch, fake_check_call):
+            pkg_managers.rpm(
+                Mock(),
+                Mock(),
+                ['-f', 'vim'])
+            result = fake_check_call.check_call.call_args_list[-1]
+        assert result[0][-1] == ['rpm', '-Uvh', '-f', 'vim']
+
+
diff --git a/ceph_deploy/util/__init__.py b/ceph_deploy/util/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/ceph_deploy/util/arg_validators.py b/ceph_deploy/util/arg_validators.py
new file mode 100644 (file)
index 0000000..422bba3
--- /dev/null
@@ -0,0 +1,54 @@
+import socket
+import argparse
+import re
+
+
+class RegexMatch(object):
+    """
+    Performs regular expression match on value.
+    If the regular expression pattern matches it will it will return an error
+    message that will work with argparse.
+    """
+
+    def __init__(self, pattern, statement=None):
+        self.string_pattern = pattern
+        self.pattern = re.compile(pattern)
+        self.statement = statement
+        if not self.statement:
+            self.statement = "must match pattern %s" % self.string_pattern
+
+    def __call__(self, string):
+        match = self.pattern.search(string)
+        if match:
+            raise argparse.ArgumentError(None, self.statement)
+        return string
+
+
+class Hostname(object):
+    """
+    Checks wether a given hostname is resolvable in DNS, otherwise raising and
+    argparse error.
+    """
+
+    def __init__(self, _socket=None):
+        self.socket = _socket or socket  # just used for testing
+
+    def __call__(self, string):
+        parts = string.split(':')
+        name = parts[0]
+        host = parts[-1]
+        try:
+            self.socket.gethostbyname(host)
+        except self.socket.gaierror:
+            msg = "hostname: %s is not resolvable" % host
+            raise argparse.ArgumentError(None, msg)
+
+        try:
+            self.socket.inet_aton(name)
+        except self.socket.error:
+            return string  # not an IP
+        else:
+            msg = '%s must be a hostname not an IP' % name
+            raise argparse.ArgumentError(None, msg)
+
+        return string
diff --git a/ceph_deploy/util/constants.py b/ceph_deploy/util/constants.py
new file mode 100644 (file)
index 0000000..7fbadc1
--- /dev/null
@@ -0,0 +1,10 @@
+from os.path import join
+
+# Base Path for ceph
+base_path = '/var/lib/ceph'
+
+tmp_path = join(base_path, 'tmp')
+
+mon_path = join(base_path, 'mon')
+
+mds_path = join(base_path, 'mds')
diff --git a/ceph_deploy/util/decorators.py b/ceph_deploy/util/decorators.py
new file mode 100644 (file)
index 0000000..b6d5d15
--- /dev/null
@@ -0,0 +1,89 @@
+import logging
+import sys
+from functools import wraps
+
+
+def catches(catch=None, handler=None, exit=True):
+    """
+    Very simple decorator that tries any of the exception(s) passed in as
+    a single exception class or tuple (containing multiple ones) returning the
+    exception message and optionally handling the problem if it raises with the
+    handler if it is provided.
+
+    So instead of doing something like this::
+
+        def bar():
+            try:
+                some_call()
+                print "Success!"
+            except TypeError, exc:
+                print "Error while handling some call: %s" % exc
+                sys.exit(1)
+
+    You would need to decorate it like this to have the same effect::
+
+        @catches(TypeError)
+        def bar():
+            some_call()
+            print "Success!"
+
+    If multiple exceptions need to be caught they need to be provided as a
+    tuple::
+
+        @catches((TypeError, AttributeError))
+        def bar():
+            some_call()
+            print "Success!"
+
+    If adding a handler, it should accept a single argument, which would be the
+    exception that was raised, it would look like::
+
+        def my_handler(exc):
+            print 'Handling exception %s' % str(exc)
+            raise SystemExit
+
+        @catches(KeyboardInterrupt, handler=my_handler)
+        def bar():
+            some_call()
+
+    Note that the handler needs to raise its SystemExit if it wants to halt
+    execution, otherwise the decorator would continue as a normal try/except
+    block.
+
+    """
+    catch = catch or Exception
+    logger = logging.getLogger('ceph_deploy')
+
+    def decorate(f):
+
+        @wraps(f)
+        def newfunc(*a, **kw):
+            try:
+                return f(*a, **kw)
+            except catch as e:
+                if handler:
+                    return handler(e)
+                else:
+                    logger.error(make_exception_message(e))
+                    if exit:
+                        sys.exit(1)
+        return newfunc
+
+    return decorate
+
+#
+# Decorator helpers
+#
+
+
+def make_exception_message(exc):
+    """
+    An exception is passed in and this function
+    returns the proper string depending on the result
+    so it is readable enough.
+    """
+    if str(exc):
+        return '%s: %s\n' % (exc.__class__.__name__, exc)
+    else:
+        return '%s\n' % (exc.__class__.__name__)
+
diff --git a/ceph_deploy/util/log.py b/ceph_deploy/util/log.py
new file mode 100644 (file)
index 0000000..3b4e8ad
--- /dev/null
@@ -0,0 +1,51 @@
+import logging
+
+BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
+
+COLORS = {
+    'WARNING': YELLOW,
+    'INFO': WHITE,
+    'DEBUG': BLUE,
+    'CRITICAL': RED,
+    'ERROR': RED
+}
+
+RESET_SEQ = "\033[0m"
+COLOR_SEQ = "\033[1;%dm"
+BOLD_SEQ = "\033[1m"
+
+BASE_COLOR_FORMAT = "[$BOLD%(name)s$RESET][%(color_levelname)-17s] %(message)s"
+BASE_FORMAT = "%(asctime)s [%(name)s][%(levelname)-6s] %(message)s"
+
+
+def color_message(message):
+    message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
+    return message
+
+
+class ColoredFormatter(logging.Formatter):
+    """
+    A very basic logging formatter that not only applies color to the levels of
+    the ouput but will also truncate the level names so that they do not alter
+    the visuals of logging when presented on the terminal.
+    """
+
+    def __init__(self, msg):
+        logging.Formatter.__init__(self, msg)
+
+    def format(self, record):
+        levelname = record.levelname
+        truncated_level = record.levelname[:6]
+        if levelname in COLORS:
+            levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + truncated_level + RESET_SEQ
+            record.color_levelname = levelname_color
+        return logging.Formatter.format(self, record)
+
+
+def color_format():
+    """
+    Main entry point to get a colored formatter, it will use the
+    BASE_FORMAT by default.
+    """
+    color_format = color_message(BASE_COLOR_FORMAT)
+    return ColoredFormatter(color_format)
diff --git a/ceph_deploy/util/paths/__init__.py b/ceph_deploy/util/paths/__init__.py
new file mode 100644 (file)
index 0000000..129ef45
--- /dev/null
@@ -0,0 +1 @@
+import mon
diff --git a/ceph_deploy/util/paths/mon.py b/ceph_deploy/util/paths/mon.py
new file mode 100644 (file)
index 0000000..46a728a
--- /dev/null
@@ -0,0 +1,55 @@
+"""
+Common paths for mon, based on the constant file paths defined in
+``ceph_deploy.util.constants``.
+All functions return a string representation of the absolute path
+construction.
+"""
+from os.path import join
+
+from ceph_deploy.util import constants
+
+
+def base(cluster):
+    cluster = "%s-" % cluster
+    return join(constants.mon_path, cluster)
+
+
+def path(cluster, hostname):
+    """
+    Example usage::
+
+        >>> mon.path('mycluster', 'hostname')
+        /var/lib/ceph/mon/mycluster-myhostname
+    """
+    return "%s%s" % (base(cluster), hostname)
+
+
+def done(cluster, hostname):
+    """
+    Example usage::
+
+        >>> mon.done('mycluster', 'hostname')
+        /var/lib/ceph/mon/mycluster-myhostname/done
+    """
+    return join(path(cluster, hostname), 'done')
+
+
+def init(cluster, hostname, init):
+    """
+    Example usage::
+
+        >>> mon.init('mycluster', 'hostname', 'init')
+        /var/lib/ceph/mon/mycluster-myhostname/init
+    """
+    return join(path(cluster, hostname), init)
+
+
+def keyring(cluster, hostname):
+    """
+    Example usage::
+
+        >>> mon.keyring('mycluster', 'myhostname')
+        /var/lib/ceph/tmp/mycluster-myhostname.mon.keyring
+    """
+    keyring_file = '%s-%s.mon.keyring' % (cluster, hostname)
+    return join(constants.tmp_path, keyring_file)
diff --git a/ceph_deploy/util/pkg_managers.py b/ceph_deploy/util/pkg_managers.py
new file mode 100644 (file)
index 0000000..f1c8635
--- /dev/null
@@ -0,0 +1,109 @@
+from ceph_deploy.lib.remoto import process
+
+
+def apt(conn, package, *a, **kw):
+    cmd = [
+        'env',
+        'DEBIAN_FRONTEND=noninteractive',
+        'apt-get',
+        '-q',
+        'install',
+        '--assume-yes',
+        package,
+    ]
+    return process.run(
+        conn,
+        cmd,
+        *a,
+        **kw
+    )
+
+
+def apt_remove(conn, packages, *a, **kw):
+    purge = kw.pop('purge', False)
+    cmd = [
+        'apt-get',
+        '-q',
+        'remove',
+        '-f',
+        '-y',
+        '--force-yes',
+    ]
+    if purge:
+        cmd.append('--purge')
+    cmd.append('--')
+    cmd.extend(packages)
+
+    return process.run(
+        conn,
+        cmd,
+        *a,
+        **kw
+    )
+
+
+def apt_update(conn):
+    cmd = [
+        'apt-get',
+        '-q',
+        'update',
+    ]
+    return process.run(
+        conn,
+        logger,
+        cmd,
+    )
+
+
+def yum(conn, package, *a, **kw):
+    cmd = [
+        'yum',
+        '-y',
+        '-q',
+        'install',
+        package,
+    ]
+    return process.run(
+        conn,
+        cmd,
+        *a,
+        **kw
+    )
+
+
+def yum_remove(conn, packages, *a, **kw):
+    cmd = [
+        'yum',
+        '-y',
+        '-q',
+        'remove',
+    ]
+    if isinstance(packages, str):
+        cmd.append(packages)
+    else:
+        cmd.extend(packages)
+    return process.run(
+        conn,
+        cmd,
+        *a,
+        **kw
+    )
+
+
+def rpm(conn, rpm_args=None, *a, **kw):
+    """
+    A minimal front end for ``rpm`. Extra flags can be passed in via
+    ``rpm_args`` as an iterable.
+    """
+    rpm_args = rpm_args or []
+    cmd = [
+        'rpm',
+        '-Uvh',
+    ]
+    cmd.extend(rpm_args)
+    return process.run(
+        conn,
+        cmd,
+        *a,
+        **kw
+    )
diff --git a/ceph_deploy/util/wrappers.py b/ceph_deploy/util/wrappers.py
new file mode 100644 (file)
index 0000000..04c38a1
--- /dev/null
@@ -0,0 +1,117 @@
+"""
+In a lot of places we need to make system calls, mainly through subprocess.
+Here we define them and reuse them with the added functionality of getting
+logging and remote execution.
+
+This allows us to only remote-execute the actual calls, not whole functions.
+"""
+from ceph_deploy.util import context
+
+
+def check_call(conn, logger, args, *a, **kw):
+    """
+    Wraps ``subprocess.check_call`` for a remote call via ``pushy``
+    doing all the capturing and logging nicely upon failure/success
+
+    The mangling of the traceback when an exception ocurrs, is because the
+    caller gets eating up by not being executed in the actual function of
+    a given module (e.g. ``centos/install.py``) but rather here, where the
+    stack trace is no longer relevant.
+
+    :param args: The args to be passed onto ``check_call``
+    """
+    command = ' '.join(args)
+    patch = kw.pop('patch', True)  # Always patch unless explicitly told to
+    mangle = kw.pop('mangle_exc', False)  # Default to not mangle exceptions
+    stop_on_error = kw.pop('stop_on_error', True)  # Halt on remote exceptions
+    logger.info('Running command: %s' % command)
+    kw.setdefault(
+        'env',
+        {
+            'PATH':
+            '/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin'
+        }
+    )
+
+    def remote_call(args, *a, **kw):
+        import subprocess
+        return subprocess.check_call(
+            args,
+            *a,
+            **kw
+        )
+
+    with context.remote(conn, logger, remote_call, mangle_exc=mangle, patch=patch) as call:
+        try:
+            return call(args, *a, **kw)
+        except Exception as err:
+            import inspect
+            stack = inspect.getframeinfo(inspect.currentframe().f_back)
+            if hasattr(err, 'remote_traceback'):
+                logger.error('Traceback (most recent call last):')
+                logger.error('  File "%s", line %s, in %s' % (
+                    stack[0],
+                    stack[1],
+                    stack[2])
+                )
+                err.remote_traceback.pop(0)
+                for line in err.remote_traceback:
+                    if line:
+                        logger.error(line)
+                if stop_on_error:
+                    raise RuntimeError(
+                        'Failed to execute command: %s' % ' '.join(args)
+                    )
+            else:
+                if stop_on_error:
+                    raise err
+
+
+def Popen(conn, logger, args, *a, **kw):
+    """
+    Wraps ``subprocess.Popen`` for a remote call via ``pushy``
+    doing all the capturing and logging nicely upon failure/success
+
+    The mangling of the traceback when an exception ocurrs, is because the
+    caller gets eating up by not being executed in the actual function of
+    a given module (e.g. ``centos/install.py``) but rather here, where the
+    stack trace is no longer relevant.
+
+    :param args: The args to be passed onto ``Popen``
+    """
+    command = ' '.join(args)
+    patch = kw.pop('patch', True)  # Always patch unless explicitly told to
+    logger.info('Running command: %s' % command)
+
+    def remote_call(args, *a, **kw):
+        import subprocess
+        process = subprocess.Popen(
+            args,
+            *a,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            **kw
+        )
+        stdout, stderr = process.communicate()
+        return stdout, stderr, process.wait()
+
+    with context.remote(conn, logger, remote_call, mangle_exc=False, patch=patch) as call:
+        try:
+            return call(args, *a, **kw)
+        except Exception as err:
+            import inspect
+            stack = inspect.getframeinfo(inspect.currentframe().f_back)
+            if hasattr(err, 'remote_traceback'):
+                logger.error('Traceback (most recent call last):')
+                logger.error('  File "%s", line %s, in %s' % (
+                    stack[0],
+                    stack[1],
+                    stack[2])
+                )
+                err.remote_traceback.pop(0)
+                for line in err.remote_traceback:
+                    if line:
+                        logger.error(line)
+                raise RuntimeError('Failed to execute command: %s' % ' '.join(args))
+            else:
+                raise err
diff --git a/ceph_deploy/validate.py b/ceph_deploy/validate.py
new file mode 100644 (file)
index 0000000..8ef5e73
--- /dev/null
@@ -0,0 +1,16 @@
+import argparse
+import re
+
+
+ALPHANUMERIC_RE = re.compile(r'^[a-zA-Z][a-zA-Z0-9]*$')
+
+
+def alphanumeric(s):
+    """
+    Enforces string to be alphanumeric with leading alpha.
+    """
+    if not ALPHANUMERIC_RE.match(s):
+        raise argparse.ArgumentTypeError(
+            'argument must start with a letter and contain only letters and numbers',
+            )
+    return s
diff --git a/debian/ceph-deploy.install b/debian/ceph-deploy.install
new file mode 100644 (file)
index 0000000..cec4ab6
--- /dev/null
@@ -0,0 +1 @@
+./scripts/ceph-deploy  /usr/bin
diff --git a/debian/changelog b/debian/changelog
new file mode 100644 (file)
index 0000000..760d9fd
--- /dev/null
@@ -0,0 +1,65 @@
+ceph-deploy (1.2.7) stable; urgency=low
+
+  * New upstream release 
+
+ -- Gary Lowell <gary.lowell@inktank.com>  Mon, 07 Oct 2013 18:33:45 +0000
+
+ceph-deploy (1.2.6-1) precise; urgency=low
+
+  * New upstream release 
+
+ -- Gary Lowell <glowell@pudgy.ops.newdream.net>  Wed, 18 Sep 2013 09:26:57 -0700
+
+ceph-deploy (1.2.5-1) precise; urgency=low
+
+  * New upstream release 
+
+ -- Gary Lowell <gary.lowell@inktank.com>  Tue, 17 Sep 2013 19:25:43 -0700
+
+ceph-deploy (1.2.4-1) precise; urgency=low
+
+  * New upstream release 
+
+ -- Gary Lowell <glowell@pudgy.ops.newdream.net>  Tue, 17 Sep 2013 11:19:59 -0700
+
+ceph-deploy (1.2.3) precise; urgency=low
+
+  * New upstream release 
+
+ -- Gary Lowell <gary.lowell@inktank.com>  Thu, 29 Aug 2013 15:20:22 -0700
+
+ceph-deploy (1.2.2) precise; urgency=low
+
+  * New upstream release 
+
+ -- Gary Lowell <gary.lowell@inktank.com>  Thu, 22 Aug 2013 12:26:56 -0700
+
+ceph-deploy (1.2.1-1) precise; urgency=low
+
+  * New upstream release 
+
+ -- Gary Lowell <gary.lowell@inktank.com>  Thu, 15 Aug 2013 15:19:33 -0700
+
+ceph-deploy (1.2-1) precise; urgency=low
+
+  * New upstream release 
+
+ -- Gary Lowell <gary.lowell@inktank.com>  Mon, 12 Aug 2013 16:59:09 -0700
+
+ceph-deploy (1.1-1) precise; urgency=low
+
+  * New upstream release 
+
+ -- Gary Lowell <gary.lowell@inktank.com>  Tue, 18 Jun 2013 11:07:00 -0700
+
+ceph-deploy (1.0-1) stable; urgency=low
+
+  * New upstream release
+
+ -- Gary Lowell <gary.lowell@inktank.com>  Fri, 24 May 2013 11:57:40 +0800
+
+ceph-deploy (0.0.1-1) unstable; urgency=low
+
+  * Initial release.
+
+ -- Gary Lowell <gary.lowell@inktank.com>  Mon, 10 Mar 2013 18:38:40 +0800
diff --git a/debian/compat b/debian/compat
new file mode 100644 (file)
index 0000000..7f8f011
--- /dev/null
@@ -0,0 +1 @@
+7
diff --git a/debian/control b/debian/control
new file mode 100644 (file)
index 0000000..84dacb4
--- /dev/null
@@ -0,0 +1,26 @@
+Source: ceph-deploy
+Maintainer: Sage Weil <sage@newdream.net>
+Uploaders: Sage Weil <sage@newdream.net>
+Section: admin
+Priority: optional
+Build-Depends: debhelper (>= 7), python-setuptools, git
+X-Python-Version: >= 2.4
+Standards-Version: 3.9.2
+Homepage: http://ceph.com/
+Vcs-Git: git://github.com/ceph/ceph-deploy.git
+Vcs-Browser: https://github.com/ceph/ceph-deploy
+
+Package: ceph-deploy
+Architecture: all
+Depends: python,
+         python-argparse,
+         python-pushy (>= 0.5.3),
+         python-setuptools,
+         ${misc:Depends},
+         ${python:Depends}
+Description:  Ceph-deploy is an easy to use configuration tool
+ for the Ceph distributed storage system.
+ .
+ This package includes the programs and libraries to support
+ simple ceph cluster deployment.
+
diff --git a/debian/copyright b/debian/copyright
new file mode 100644 (file)
index 0000000..93bc530
--- /dev/null
@@ -0,0 +1,3 @@
+Files: *
+Copyright: (c) 2004-2012 by Sage Weil <sage@newdream.net>
+License: LGPL2.1 (see /usr/share/common-licenses/LGPL-2.1)
diff --git a/debian/rules b/debian/rules
new file mode 100755 (executable)
index 0000000..b46b956
--- /dev/null
@@ -0,0 +1,12 @@
+#!/usr/bin/make -f
+
+# Uncomment this to turn on verbose mode.
+export DH_VERBOSE=1
+@export DEB_PYTHON_INSTALL_ARGS_ALL += --install-lib=/usr/share/ceph-deploy
+
+%:
+       dh $@ --buildsystem python_distutils --with python2
+
+override_dh_clean:
+       rm -rf ceph_deploy/lib/remoto
+       dh_clean
diff --git a/debian/source/format b/debian/source/format
new file mode 100644 (file)
index 0000000..d3827e7
--- /dev/null
@@ -0,0 +1 @@
+1.0
diff --git a/requirements-dev.txt b/requirements-dev.txt
new file mode 100644 (file)
index 0000000..dbc0d19
--- /dev/null
@@ -0,0 +1,3 @@
+pytest >=2.1.3
+tox >=1.2
+mock >=1.0b1
diff --git a/requirements.txt b/requirements.txt
new file mode 100644 (file)
index 0000000..41b5dc8
--- /dev/null
@@ -0,0 +1 @@
+pushy >=0.5.1
diff --git a/scripts/build-debian.sh b/scripts/build-debian.sh
new file mode 100755 (executable)
index 0000000..ce205ca
--- /dev/null
@@ -0,0 +1,84 @@
+#! /bin/sh
+
+# Tag tree and update version number in change log and
+# in setup.py before building.
+
+REPO=debian-repo
+COMPONENT=main
+KEYID=${KEYID:-03C3951A}  # default is autobuild keyid
+DEB_DIST="sid wheezy squeeze quantal precise oneiric natty raring"
+DEB_BUILD=$(lsb_release -s -c)
+RELEASE=0
+
+if [ X"$1" = X"--release" ] ; then
+    echo "Release Build"
+    RELEASE=1
+fi
+
+if [ ! -d debian ] ; then
+    echo "Are we in the right directory"
+    exit 1
+fi
+
+if gpg --list-keys 2>/dev/null | grep -q ${KEYID} ; then
+    echo "Signing packages and repo with ${KEYID}"
+else
+    echo "Package signing key (${KEYID}) not found"
+    echo "Have you set \$GNUPGHOME ? "
+    exit 3
+fi
+
+# Clean up any leftover builds
+rm -f ../ceph-deploy*.dsc ../ceph-deploy*.changes ../ceph-deploy*.deb ../ceph-deploy.tgz
+rm -rf ./debian-repo
+
+# Apply backport tag if release build
+if [ $RELEASE -eq 1 ] ; then 
+    DEB_VERSION=$(dpkg-parsechangelog | sed -rne 's,^Version: (.*),\1, p')
+    BP_VERSION=${DEB_VERSION}${BPTAG}
+    DEBEMAIL="gary.lowell@inktank.com" dch -D $DIST --force-distribution -b -v "$BP_VERSION" "$comment"
+    dpkg-source -b .
+fi
+
+# Build Package
+echo "Building for dist: $DEB_BUILD"
+dpkg-buildpackage -k$KEYID
+if [ $? -ne 0 ] ; then
+    echo "Build failed"
+    exit 2
+fi
+
+# Build Repo
+PKG=../ceph-deploy*.changes
+mkdir -p $REPO/conf
+if [ -e $REPO/conf/distributions ] ; then
+    rm -f $REPO/conf/distributions
+fi
+
+for DIST in  $DEB_DIST ; do
+    cat <<EOF >> $REPO/conf/distributions
+Codename: $DIST
+Suite: stable
+Components: $COMPONENT
+Architectures: amd64 armhf i386 source
+Origin: Inktank
+Description: Ceph distributed file system
+DebIndices: Packages Release . .gz .bz2
+DscIndices: Sources Release .gz .bz2
+Contents: .gz .bz2
+SignWith: $KEYID
+
+EOF
+done
+
+echo "Adding package to repo, dist: $DEB_BUILD ($PKG)"
+reprepro --ask-passphrase -b $REPO -C $COMPONENT --ignore=undefinedtarget --ignore=wrongdistribution include $DEB_BUILD $PKG
+
+#for DIST in $DEB_DIST
+#do
+#    [ "$DIST" = "$DEB_BUILD" ] && continue
+#    echo "Copying package to dist: $DIST"
+#    reprepro -b $REPO --ignore=undefinedtarget --ignore=wrongdistribution copy $DIST $DEB_BUILD ceph-deploy
+#done
+
+echo "Done"
diff --git a/scripts/build-rpm.sh b/scripts/build-rpm.sh
new file mode 100755 (executable)
index 0000000..9b330e4
--- /dev/null
@@ -0,0 +1,59 @@
+#! /bin/sh
+
+# Tag tree and update version number in change log and
+# in setup.py before building.
+
+REPO=rpm-repo
+KEYID=${KEYID:-03C3951A}  # Default is autobuild-key
+BUILDAREA=./rpmbuild
+DIST=el6
+RPM_BUILD=$(lsb_release -s -c)
+
+if [ ! -e setup.py ] ; then
+    echo "Are we in the right directory"
+    exit 1
+fi
+
+if gpg --list-keys 2>/dev/null | grep -q ${KEYID} ; then
+    echo "Signing packages and repo with ${KEYID}"
+else
+    echo "Package signing key (${KEYID}) not found"
+    echo "Have you set \$GNUPGHOME ? "
+    exit 3
+fi
+
+if ! CREATEREPO=`which createrepo` ; then
+    echo "Please install the createrepo package"
+    exit 4
+fi
+
+# Create Tarball
+python setup.py sdist --formats=bztar
+
+# Build RPM
+mkdir -p rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
+BUILDAREA=`readlink -fn ${BUILDAREA}`   ### rpm wants absolute path
+cp ceph-deploy.spec ${BUILDAREA}/SPECS
+cp dist/*.tar.bz2 ${BUILDAREA}/SOURCES
+echo "buildarea is: ${BUILDAREA}"
+rpmbuild -ba --define "_topdir ${BUILDAREA}" --define "_unpackaged_files_terminate_build 0" ${BUILDAREA}/SPECS/ceph-deploy.spec
+
+# create repo
+DEST=${REPO}/${DIST}
+mkdir -p ${REPO}/${DIST}
+cp -r ${BUILDAREA}/*RPMS ${DEST}
+
+# Sign all the RPMs for this release
+rpm_list=`find ${REPO} -name "*.rpm" -print`
+rpm --addsign --define "_gpg_name ${KEYID}" $rpm_list
+
+# Construct repodata
+for dir in ${DEST}/SRPMS ${DEST}/RPMS/*
+do
+    if [ -d $dir ] ; then
+        createrepo $dir
+        gpg --detach-sign --armor -u ${KEYID} $dir/repodata/repomd.xml
+    fi
+done
+
+exit 0
diff --git a/scripts/ceph-deploy b/scripts/ceph-deploy
new file mode 100755 (executable)
index 0000000..cc8dd62
--- /dev/null
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+import os
+import platform
+import sys
+"""
+ceph-deploy - admin tool for ceph
+"""
+
+if os.path.exists('/usr/share/pyshared/ceph_deploy'):
+    sys.path.insert(0,'/usr/share/pyshared/ceph_deploy')
+elif os.path.exists('/usr/share/ceph-deploy'):
+    sys.path.insert(0,'/usr/share/ceph-deploy')
+elif os.path.exists('/usr/share/pyshared/ceph-deploy'):
+    sys.path.insert(0,'/usr/share/pyshared/ceph-deploy')
+elif os.path.exists('/usr/lib/python2.6/site-packages/ceph_deploy'):
+    sys.path.insert(0,'/usr/lib/python2.6/site-packages/ceph_deploy')
+
+from ceph_deploy.cli import main
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/setup.cfg b/setup.cfg
new file mode 100644 (file)
index 0000000..d9ec107
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,2 @@
+[pytest]
+norecursedirs = .* _* virtualenv
diff --git a/setup.py b/setup.py
new file mode 100644 (file)
index 0000000..3e4479c
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,72 @@
+from setuptools import setup, find_packages
+import os
+import sys
+import ceph_deploy
+from vendor import vendorize
+
+
+def read(fname):
+    path = os.path.join(os.path.dirname(__file__), fname)
+    f = open(path)
+    return f.read()
+
+install_requires = []
+pyversion = sys.version_info[:2]
+if pyversion < (2, 7) or (3, 0) <= pyversion <= (3, 1):
+    install_requires.append('argparse')
+
+#
+# Add libraries that are not part of install_requires
+#
+vendorize([
+    ('remoto', '0.0.5'),
+])
+
+
+setup(
+    name='ceph-deploy',
+    version=ceph_deploy.__version__,
+    packages=find_packages(),
+
+    author='Inktank',
+    author_email='ceph-devel@vger.kernel.org',
+    description='Deploy Ceph with minimal infrastructure',
+    long_description=read('README.rst'),
+    license='MIT',
+    keywords='ceph deploy',
+    url="https://github.com/ceph/ceph-deploy",
+
+    install_requires=[
+        'setuptools',
+        'pushy >=0.5.2',
+        ] + install_requires,
+
+    tests_require=[
+        'pytest >=2.1.3',
+        'mock >=1.0b1',
+        ],
+
+    entry_points={
+
+        'console_scripts': [
+            'ceph-deploy = ceph_deploy.cli:main',
+            ],
+
+        'ceph_deploy.cli': [
+            'new = ceph_deploy.new:make',
+            'install = ceph_deploy.install:make',
+            'uninstall = ceph_deploy.install:make_uninstall',
+            'purge = ceph_deploy.install:make_purge',
+            'purgedata = ceph_deploy.install:make_purge_data',
+            'mon = ceph_deploy.mon:make',
+            'gatherkeys = ceph_deploy.gatherkeys:make',
+            'osd = ceph_deploy.osd:make',
+            'disk = ceph_deploy.osd:make_disk',
+            'mds = ceph_deploy.mds:make',
+            'forgetkeys = ceph_deploy.forgetkeys:make',
+            'config = ceph_deploy.config:make',
+            'admin = ceph_deploy.admin:make',
+            ],
+
+        },
+    )
diff --git a/tox.ini b/tox.ini
new file mode 100644 (file)
index 0000000..5a078cd
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,8 @@
+[tox]
+envlist = py26, py27
+
+[testenv]
+deps=
+  pytest
+  mock
+commands=py.test -v {posargs:ceph_deploy/tests}
diff --git a/vendor.py b/vendor.py
new file mode 100644 (file)
index 0000000..7ce7f05
--- /dev/null
+++ b/vendor.py
@@ -0,0 +1,79 @@
+import subprocess
+import os
+from os import path
+import traceback
+
+
+error_msg = """
+This library depends on sources fetched when packaging that failed to be
+retrieved.
+
+This means that it will *not* work as expected. Errors encountered:
+"""
+
+
+def run(cmd):
+    print '[vendoring] Running command: %s' % ' '.join(cmd)
+    try:
+        result = subprocess.Popen(
+            cmd,
+            stderr=subprocess.PIPE,
+            stdout=subprocess.PIPE
+        )
+    except Exception as error:
+        print_error([], traceback.format_exc(error).split('\n'))
+        raise SystemExit(1)
+
+    if result.wait():
+        print_error(result.stdout.readlines(), result.stderr.readlines())
+
+
+def print_error(stdout, stderr):
+    print '*'*80
+    print error_msg
+    for line in stdout:
+        print line
+    for line in stderr:
+        print line
+    print '*'*80
+
+
+def vendor_library(name, version):
+    this_dir = path.dirname(path.abspath(__file__))
+    vendor_dest = path.join(this_dir, 'ceph_deploy/lib/%s' % name)
+    vendor_src = path.join(this_dir, name)
+    vendor_module = path.join(vendor_src, name)
+    current_dir = os.getcwd()
+
+    if path.exists(vendor_src):
+        run(['rm', '-rf', vendor_src])
+
+    if path.exists(vendor_dest):
+        module = __import__('ceph_deploy.lib.remoto', globals(), locals(), ['__version__'])
+        if module.__version__ != version:
+            run(['rm', '-rf', vendor_dest])
+
+    if not path.exists(vendor_dest):
+        run(['git', 'clone', 'git://ceph.com/%s' % name])
+        os.chdir(vendor_src)
+        run(['git', 'checkout', version])
+        run(['mv', vendor_module, vendor_dest])
+    os.chdir(current_dir)
+
+
+def vendorize(vendor_requirements):
+    """
+    This is the main entry point for vendorizing requirements. It expects
+    a list of tuples that should contain the name of the library and the
+    version.
+
+    For example, a library ``foo`` with version ``0.0.1`` would look like::
+
+        vendor_requirements = [
+            ('foo', '0.0.1'),
+        ]
+    """
+
+    for library in vendor_requirements:
+        name, version = library
+        vendor_library(name, version)